fbe114c212172b7088d9213dcedae45f49ea10e4
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                       uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84
85 /**
86  * Initialize flow attributes structure according to flow items' types.
87  *
88  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
89  * mode. For tunnel mode, the items to be modified are the outermost ones.
90  *
91  * @param[in] item
92  *   Pointer to item specification.
93  * @param[out] attr
94  *   Pointer to flow attributes structure.
95  * @param[in] dev_flow
96  *   Pointer to the sub flow.
97  * @param[in] tunnel_decap
98  *   Whether action is after tunnel decapsulation.
99  */
100 static void
101 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
102                   struct mlx5_flow *dev_flow, bool tunnel_decap)
103 {
104         uint64_t layers = dev_flow->handle->layers;
105
106         /*
107          * If layers is already initialized, it means this dev_flow is the
108          * suffix flow, the layers flags is set by the prefix flow. Need to
109          * use the layer flags from prefix flow as the suffix flow may not
110          * have the user defined items as the flow is split.
111          */
112         if (layers) {
113                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
114                         attr->ipv4 = 1;
115                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
116                         attr->ipv6 = 1;
117                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
118                         attr->tcp = 1;
119                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
120                         attr->udp = 1;
121                 attr->valid = 1;
122                 return;
123         }
124         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
125                 uint8_t next_protocol = 0xff;
126                 switch (item->type) {
127                 case RTE_FLOW_ITEM_TYPE_GRE:
128                 case RTE_FLOW_ITEM_TYPE_NVGRE:
129                 case RTE_FLOW_ITEM_TYPE_VXLAN:
130                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
131                 case RTE_FLOW_ITEM_TYPE_GENEVE:
132                 case RTE_FLOW_ITEM_TYPE_MPLS:
133                         if (tunnel_decap)
134                                 attr->attr = 0;
135                         break;
136                 case RTE_FLOW_ITEM_TYPE_IPV4:
137                         if (!attr->ipv6)
138                                 attr->ipv4 = 1;
139                         if (item->mask != NULL &&
140                             ((const struct rte_flow_item_ipv4 *)
141                             item->mask)->hdr.next_proto_id)
142                                 next_protocol =
143                                     ((const struct rte_flow_item_ipv4 *)
144                                       (item->spec))->hdr.next_proto_id &
145                                     ((const struct rte_flow_item_ipv4 *)
146                                       (item->mask))->hdr.next_proto_id;
147                         if ((next_protocol == IPPROTO_IPIP ||
148                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
149                                 attr->attr = 0;
150                         break;
151                 case RTE_FLOW_ITEM_TYPE_IPV6:
152                         if (!attr->ipv4)
153                                 attr->ipv6 = 1;
154                         if (item->mask != NULL &&
155                             ((const struct rte_flow_item_ipv6 *)
156                             item->mask)->hdr.proto)
157                                 next_protocol =
158                                     ((const struct rte_flow_item_ipv6 *)
159                                       (item->spec))->hdr.proto &
160                                     ((const struct rte_flow_item_ipv6 *)
161                                       (item->mask))->hdr.proto;
162                         if ((next_protocol == IPPROTO_IPIP ||
163                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
164                                 attr->attr = 0;
165                         break;
166                 case RTE_FLOW_ITEM_TYPE_UDP:
167                         if (!attr->tcp)
168                                 attr->udp = 1;
169                         break;
170                 case RTE_FLOW_ITEM_TYPE_TCP:
171                         if (!attr->udp)
172                                 attr->tcp = 1;
173                         break;
174                 default:
175                         break;
176                 }
177         }
178         attr->valid = 1;
179 }
180
181 /**
182  * Convert rte_mtr_color to mlx5 color.
183  *
184  * @param[in] rcol
185  *   rte_mtr_color.
186  *
187  * @return
188  *   mlx5 color.
189  */
190 static int
191 rte_col_2_mlx5_col(enum rte_color rcol)
192 {
193         switch (rcol) {
194         case RTE_COLOR_GREEN:
195                 return MLX5_FLOW_COLOR_GREEN;
196         case RTE_COLOR_YELLOW:
197                 return MLX5_FLOW_COLOR_YELLOW;
198         case RTE_COLOR_RED:
199                 return MLX5_FLOW_COLOR_RED;
200         default:
201                 break;
202         }
203         return MLX5_FLOW_COLOR_UNDEFINED;
204 }
205
206 struct field_modify_info {
207         uint32_t size; /* Size of field in protocol header, in bytes. */
208         uint32_t offset; /* Offset of field in protocol header, in bytes. */
209         enum mlx5_modification_field id;
210 };
211
212 struct field_modify_info modify_eth[] = {
213         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
214         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
215         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
216         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
217         {0, 0, 0},
218 };
219
220 struct field_modify_info modify_vlan_out_first_vid[] = {
221         /* Size in bits !!! */
222         {12, 0, MLX5_MODI_OUT_FIRST_VID},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_ipv4[] = {
227         {1,  1, MLX5_MODI_OUT_IP_DSCP},
228         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
229         {4, 12, MLX5_MODI_OUT_SIPV4},
230         {4, 16, MLX5_MODI_OUT_DIPV4},
231         {0, 0, 0},
232 };
233
234 struct field_modify_info modify_ipv6[] = {
235         {1,  0, MLX5_MODI_OUT_IP_DSCP},
236         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
237         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
238         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
239         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
240         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
241         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
242         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
243         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
244         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_udp[] = {
249         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
250         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_tcp[] = {
255         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
256         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
257         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
258         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
259         {0, 0, 0},
260 };
261
262 static void
263 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
264                           uint8_t next_protocol, uint64_t *item_flags,
265                           int *tunnel)
266 {
267         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
268                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
269         if (next_protocol == IPPROTO_IPIP) {
270                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
271                 *tunnel = 1;
272         }
273         if (next_protocol == IPPROTO_IPV6) {
274                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
275                 *tunnel = 1;
276         }
277 }
278
279 /**
280  * Acquire the synchronizing object to protect multithreaded access
281  * to shared dv context. Lock occurs only if context is actually
282  * shared, i.e. we have multiport IB device and representors are
283  * created.
284  *
285  * @param[in] dev
286  *   Pointer to the rte_eth_dev structure.
287  */
288 static void
289 flow_dv_shared_lock(struct rte_eth_dev *dev)
290 {
291         struct mlx5_priv *priv = dev->data->dev_private;
292         struct mlx5_dev_ctx_shared *sh = priv->sh;
293
294         if (sh->refcnt > 1) {
295                 int ret;
296
297                 ret = pthread_mutex_lock(&sh->dv_mutex);
298                 MLX5_ASSERT(!ret);
299                 (void)ret;
300         }
301 }
302
303 static void
304 flow_dv_shared_unlock(struct rte_eth_dev *dev)
305 {
306         struct mlx5_priv *priv = dev->data->dev_private;
307         struct mlx5_dev_ctx_shared *sh = priv->sh;
308
309         if (sh->refcnt > 1) {
310                 int ret;
311
312                 ret = pthread_mutex_unlock(&sh->dv_mutex);
313                 MLX5_ASSERT(!ret);
314                 (void)ret;
315         }
316 }
317
318 /* Update VLAN's VID/PCP based on input rte_flow_action.
319  *
320  * @param[in] action
321  *   Pointer to struct rte_flow_action.
322  * @param[out] vlan
323  *   Pointer to struct rte_vlan_hdr.
324  */
325 static void
326 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
327                          struct rte_vlan_hdr *vlan)
328 {
329         uint16_t vlan_tci;
330         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
331                 vlan_tci =
332                     ((const struct rte_flow_action_of_set_vlan_pcp *)
333                                                action->conf)->vlan_pcp;
334                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
335                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
336                 vlan->vlan_tci |= vlan_tci;
337         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
338                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
339                 vlan->vlan_tci |= rte_be_to_cpu_16
340                     (((const struct rte_flow_action_of_set_vlan_vid *)
341                                              action->conf)->vlan_vid);
342         }
343 }
344
345 /**
346  * Fetch 1, 2, 3 or 4 byte field from the byte array
347  * and return as unsigned integer in host-endian format.
348  *
349  * @param[in] data
350  *   Pointer to data array.
351  * @param[in] size
352  *   Size of field to extract.
353  *
354  * @return
355  *   converted field in host endian format.
356  */
357 static inline uint32_t
358 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
359 {
360         uint32_t ret;
361
362         switch (size) {
363         case 1:
364                 ret = *data;
365                 break;
366         case 2:
367                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
368                 break;
369         case 3:
370                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
371                 ret = (ret << 8) | *(data + sizeof(uint16_t));
372                 break;
373         case 4:
374                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
375                 break;
376         default:
377                 MLX5_ASSERT(false);
378                 ret = 0;
379                 break;
380         }
381         return ret;
382 }
383
384 /**
385  * Convert modify-header action to DV specification.
386  *
387  * Data length of each action is determined by provided field description
388  * and the item mask. Data bit offset and width of each action is determined
389  * by provided item mask.
390  *
391  * @param[in] item
392  *   Pointer to item specification.
393  * @param[in] field
394  *   Pointer to field modification information.
395  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
396  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
397  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
398  * @param[in] dcopy
399  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
400  *   Negative offset value sets the same offset as source offset.
401  *   size field is ignored, value is taken from source field.
402  * @param[in,out] resource
403  *   Pointer to the modify-header resource.
404  * @param[in] type
405  *   Type of modification.
406  * @param[out] error
407  *   Pointer to the error structure.
408  *
409  * @return
410  *   0 on success, a negative errno value otherwise and rte_errno is set.
411  */
412 static int
413 flow_dv_convert_modify_action(struct rte_flow_item *item,
414                               struct field_modify_info *field,
415                               struct field_modify_info *dcopy,
416                               struct mlx5_flow_dv_modify_hdr_resource *resource,
417                               uint32_t type, struct rte_flow_error *error)
418 {
419         uint32_t i = resource->actions_num;
420         struct mlx5_modification_cmd *actions = resource->actions;
421
422         /*
423          * The item and mask are provided in big-endian format.
424          * The fields should be presented as in big-endian format either.
425          * Mask must be always present, it defines the actual field width.
426          */
427         MLX5_ASSERT(item->mask);
428         MLX5_ASSERT(field->size);
429         do {
430                 unsigned int size_b;
431                 unsigned int off_b;
432                 uint32_t mask;
433                 uint32_t data;
434
435                 if (i >= MLX5_MAX_MODIFY_NUM)
436                         return rte_flow_error_set(error, EINVAL,
437                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
438                                  "too many items to modify");
439                 /* Fetch variable byte size mask from the array. */
440                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
441                                            field->offset, field->size);
442                 if (!mask) {
443                         ++field;
444                         continue;
445                 }
446                 /* Deduce actual data width in bits from mask value. */
447                 off_b = rte_bsf32(mask);
448                 size_b = sizeof(uint32_t) * CHAR_BIT -
449                          off_b - __builtin_clz(mask);
450                 MLX5_ASSERT(size_b);
451                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
452                 actions[i] = (struct mlx5_modification_cmd) {
453                         .action_type = type,
454                         .field = field->id,
455                         .offset = off_b,
456                         .length = size_b,
457                 };
458                 /* Convert entire record to expected big-endian format. */
459                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
460                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
461                         MLX5_ASSERT(dcopy);
462                         actions[i].dst_field = dcopy->id;
463                         actions[i].dst_offset =
464                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
465                         /* Convert entire record to big-endian format. */
466                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
467                 } else {
468                         MLX5_ASSERT(item->spec);
469                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
470                                                    field->offset, field->size);
471                         /* Shift out the trailing masked bits from data. */
472                         data = (data & mask) >> off_b;
473                         actions[i].data1 = rte_cpu_to_be_32(data);
474                 }
475                 ++i;
476                 ++field;
477         } while (field->size);
478         if (resource->actions_num == i)
479                 return rte_flow_error_set(error, EINVAL,
480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
481                                           "invalid modification flow item");
482         resource->actions_num = i;
483         return 0;
484 }
485
486 /**
487  * Convert modify-header set IPv4 address action to DV specification.
488  *
489  * @param[in,out] resource
490  *   Pointer to the modify-header resource.
491  * @param[in] action
492  *   Pointer to action specification.
493  * @param[out] error
494  *   Pointer to the error structure.
495  *
496  * @return
497  *   0 on success, a negative errno value otherwise and rte_errno is set.
498  */
499 static int
500 flow_dv_convert_action_modify_ipv4
501                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
502                          const struct rte_flow_action *action,
503                          struct rte_flow_error *error)
504 {
505         const struct rte_flow_action_set_ipv4 *conf =
506                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
507         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
508         struct rte_flow_item_ipv4 ipv4;
509         struct rte_flow_item_ipv4 ipv4_mask;
510
511         memset(&ipv4, 0, sizeof(ipv4));
512         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
513         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
514                 ipv4.hdr.src_addr = conf->ipv4_addr;
515                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
516         } else {
517                 ipv4.hdr.dst_addr = conf->ipv4_addr;
518                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
519         }
520         item.spec = &ipv4;
521         item.mask = &ipv4_mask;
522         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
523                                              MLX5_MODIFICATION_TYPE_SET, error);
524 }
525
526 /**
527  * Convert modify-header set IPv6 address action to DV specification.
528  *
529  * @param[in,out] resource
530  *   Pointer to the modify-header resource.
531  * @param[in] action
532  *   Pointer to action specification.
533  * @param[out] error
534  *   Pointer to the error structure.
535  *
536  * @return
537  *   0 on success, a negative errno value otherwise and rte_errno is set.
538  */
539 static int
540 flow_dv_convert_action_modify_ipv6
541                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
542                          const struct rte_flow_action *action,
543                          struct rte_flow_error *error)
544 {
545         const struct rte_flow_action_set_ipv6 *conf =
546                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
547         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
548         struct rte_flow_item_ipv6 ipv6;
549         struct rte_flow_item_ipv6 ipv6_mask;
550
551         memset(&ipv6, 0, sizeof(ipv6));
552         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
553         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
554                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
555                        sizeof(ipv6.hdr.src_addr));
556                 memcpy(&ipv6_mask.hdr.src_addr,
557                        &rte_flow_item_ipv6_mask.hdr.src_addr,
558                        sizeof(ipv6.hdr.src_addr));
559         } else {
560                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
561                        sizeof(ipv6.hdr.dst_addr));
562                 memcpy(&ipv6_mask.hdr.dst_addr,
563                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
564                        sizeof(ipv6.hdr.dst_addr));
565         }
566         item.spec = &ipv6;
567         item.mask = &ipv6_mask;
568         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
569                                              MLX5_MODIFICATION_TYPE_SET, error);
570 }
571
572 /**
573  * Convert modify-header set MAC address action to DV specification.
574  *
575  * @param[in,out] resource
576  *   Pointer to the modify-header resource.
577  * @param[in] action
578  *   Pointer to action specification.
579  * @param[out] error
580  *   Pointer to the error structure.
581  *
582  * @return
583  *   0 on success, a negative errno value otherwise and rte_errno is set.
584  */
585 static int
586 flow_dv_convert_action_modify_mac
587                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
588                          const struct rte_flow_action *action,
589                          struct rte_flow_error *error)
590 {
591         const struct rte_flow_action_set_mac *conf =
592                 (const struct rte_flow_action_set_mac *)(action->conf);
593         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
594         struct rte_flow_item_eth eth;
595         struct rte_flow_item_eth eth_mask;
596
597         memset(&eth, 0, sizeof(eth));
598         memset(&eth_mask, 0, sizeof(eth_mask));
599         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
600                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
601                        sizeof(eth.src.addr_bytes));
602                 memcpy(&eth_mask.src.addr_bytes,
603                        &rte_flow_item_eth_mask.src.addr_bytes,
604                        sizeof(eth_mask.src.addr_bytes));
605         } else {
606                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
607                        sizeof(eth.dst.addr_bytes));
608                 memcpy(&eth_mask.dst.addr_bytes,
609                        &rte_flow_item_eth_mask.dst.addr_bytes,
610                        sizeof(eth_mask.dst.addr_bytes));
611         }
612         item.spec = &eth;
613         item.mask = &eth_mask;
614         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
615                                              MLX5_MODIFICATION_TYPE_SET, error);
616 }
617
618 /**
619  * Convert modify-header set VLAN VID action to DV specification.
620  *
621  * @param[in,out] resource
622  *   Pointer to the modify-header resource.
623  * @param[in] action
624  *   Pointer to action specification.
625  * @param[out] error
626  *   Pointer to the error structure.
627  *
628  * @return
629  *   0 on success, a negative errno value otherwise and rte_errno is set.
630  */
631 static int
632 flow_dv_convert_action_modify_vlan_vid
633                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
634                          const struct rte_flow_action *action,
635                          struct rte_flow_error *error)
636 {
637         const struct rte_flow_action_of_set_vlan_vid *conf =
638                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
639         int i = resource->actions_num;
640         struct mlx5_modification_cmd *actions = resource->actions;
641         struct field_modify_info *field = modify_vlan_out_first_vid;
642
643         if (i >= MLX5_MAX_MODIFY_NUM)
644                 return rte_flow_error_set(error, EINVAL,
645                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
646                          "too many items to modify");
647         actions[i] = (struct mlx5_modification_cmd) {
648                 .action_type = MLX5_MODIFICATION_TYPE_SET,
649                 .field = field->id,
650                 .length = field->size,
651                 .offset = field->offset,
652         };
653         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
654         actions[i].data1 = conf->vlan_vid;
655         actions[i].data1 = actions[i].data1 << 16;
656         resource->actions_num = ++i;
657         return 0;
658 }
659
660 /**
661  * Convert modify-header set TP action to DV specification.
662  *
663  * @param[in,out] resource
664  *   Pointer to the modify-header resource.
665  * @param[in] action
666  *   Pointer to action specification.
667  * @param[in] items
668  *   Pointer to rte_flow_item objects list.
669  * @param[in] attr
670  *   Pointer to flow attributes structure.
671  * @param[in] dev_flow
672  *   Pointer to the sub flow.
673  * @param[in] tunnel_decap
674  *   Whether action is after tunnel decapsulation.
675  * @param[out] error
676  *   Pointer to the error structure.
677  *
678  * @return
679  *   0 on success, a negative errno value otherwise and rte_errno is set.
680  */
681 static int
682 flow_dv_convert_action_modify_tp
683                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
684                          const struct rte_flow_action *action,
685                          const struct rte_flow_item *items,
686                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
687                          bool tunnel_decap, struct rte_flow_error *error)
688 {
689         const struct rte_flow_action_set_tp *conf =
690                 (const struct rte_flow_action_set_tp *)(action->conf);
691         struct rte_flow_item item;
692         struct rte_flow_item_udp udp;
693         struct rte_flow_item_udp udp_mask;
694         struct rte_flow_item_tcp tcp;
695         struct rte_flow_item_tcp tcp_mask;
696         struct field_modify_info *field;
697
698         if (!attr->valid)
699                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
700         if (attr->udp) {
701                 memset(&udp, 0, sizeof(udp));
702                 memset(&udp_mask, 0, sizeof(udp_mask));
703                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
704                         udp.hdr.src_port = conf->port;
705                         udp_mask.hdr.src_port =
706                                         rte_flow_item_udp_mask.hdr.src_port;
707                 } else {
708                         udp.hdr.dst_port = conf->port;
709                         udp_mask.hdr.dst_port =
710                                         rte_flow_item_udp_mask.hdr.dst_port;
711                 }
712                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
713                 item.spec = &udp;
714                 item.mask = &udp_mask;
715                 field = modify_udp;
716         } else {
717                 MLX5_ASSERT(attr->tcp);
718                 memset(&tcp, 0, sizeof(tcp));
719                 memset(&tcp_mask, 0, sizeof(tcp_mask));
720                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
721                         tcp.hdr.src_port = conf->port;
722                         tcp_mask.hdr.src_port =
723                                         rte_flow_item_tcp_mask.hdr.src_port;
724                 } else {
725                         tcp.hdr.dst_port = conf->port;
726                         tcp_mask.hdr.dst_port =
727                                         rte_flow_item_tcp_mask.hdr.dst_port;
728                 }
729                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
730                 item.spec = &tcp;
731                 item.mask = &tcp_mask;
732                 field = modify_tcp;
733         }
734         return flow_dv_convert_modify_action(&item, field, NULL, resource,
735                                              MLX5_MODIFICATION_TYPE_SET, error);
736 }
737
738 /**
739  * Convert modify-header set TTL action to DV specification.
740  *
741  * @param[in,out] resource
742  *   Pointer to the modify-header resource.
743  * @param[in] action
744  *   Pointer to action specification.
745  * @param[in] items
746  *   Pointer to rte_flow_item objects list.
747  * @param[in] attr
748  *   Pointer to flow attributes structure.
749  * @param[in] dev_flow
750  *   Pointer to the sub flow.
751  * @param[in] tunnel_decap
752  *   Whether action is after tunnel decapsulation.
753  * @param[out] error
754  *   Pointer to the error structure.
755  *
756  * @return
757  *   0 on success, a negative errno value otherwise and rte_errno is set.
758  */
759 static int
760 flow_dv_convert_action_modify_ttl
761                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
762                          const struct rte_flow_action *action,
763                          const struct rte_flow_item *items,
764                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
765                          bool tunnel_decap, struct rte_flow_error *error)
766 {
767         const struct rte_flow_action_set_ttl *conf =
768                 (const struct rte_flow_action_set_ttl *)(action->conf);
769         struct rte_flow_item item;
770         struct rte_flow_item_ipv4 ipv4;
771         struct rte_flow_item_ipv4 ipv4_mask;
772         struct rte_flow_item_ipv6 ipv6;
773         struct rte_flow_item_ipv6 ipv6_mask;
774         struct field_modify_info *field;
775
776         if (!attr->valid)
777                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
778         if (attr->ipv4) {
779                 memset(&ipv4, 0, sizeof(ipv4));
780                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
781                 ipv4.hdr.time_to_live = conf->ttl_value;
782                 ipv4_mask.hdr.time_to_live = 0xFF;
783                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
784                 item.spec = &ipv4;
785                 item.mask = &ipv4_mask;
786                 field = modify_ipv4;
787         } else {
788                 MLX5_ASSERT(attr->ipv6);
789                 memset(&ipv6, 0, sizeof(ipv6));
790                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
791                 ipv6.hdr.hop_limits = conf->ttl_value;
792                 ipv6_mask.hdr.hop_limits = 0xFF;
793                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
794                 item.spec = &ipv6;
795                 item.mask = &ipv6_mask;
796                 field = modify_ipv6;
797         }
798         return flow_dv_convert_modify_action(&item, field, NULL, resource,
799                                              MLX5_MODIFICATION_TYPE_SET, error);
800 }
801
802 /**
803  * Convert modify-header decrement TTL action to DV specification.
804  *
805  * @param[in,out] resource
806  *   Pointer to the modify-header resource.
807  * @param[in] action
808  *   Pointer to action specification.
809  * @param[in] items
810  *   Pointer to rte_flow_item objects list.
811  * @param[in] attr
812  *   Pointer to flow attributes structure.
813  * @param[in] dev_flow
814  *   Pointer to the sub flow.
815  * @param[in] tunnel_decap
816  *   Whether action is after tunnel decapsulation.
817  * @param[out] error
818  *   Pointer to the error structure.
819  *
820  * @return
821  *   0 on success, a negative errno value otherwise and rte_errno is set.
822  */
823 static int
824 flow_dv_convert_action_modify_dec_ttl
825                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
826                          const struct rte_flow_item *items,
827                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
828                          bool tunnel_decap, struct rte_flow_error *error)
829 {
830         struct rte_flow_item item;
831         struct rte_flow_item_ipv4 ipv4;
832         struct rte_flow_item_ipv4 ipv4_mask;
833         struct rte_flow_item_ipv6 ipv6;
834         struct rte_flow_item_ipv6 ipv6_mask;
835         struct field_modify_info *field;
836
837         if (!attr->valid)
838                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
839         if (attr->ipv4) {
840                 memset(&ipv4, 0, sizeof(ipv4));
841                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
842                 ipv4.hdr.time_to_live = 0xFF;
843                 ipv4_mask.hdr.time_to_live = 0xFF;
844                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
845                 item.spec = &ipv4;
846                 item.mask = &ipv4_mask;
847                 field = modify_ipv4;
848         } else {
849                 MLX5_ASSERT(attr->ipv6);
850                 memset(&ipv6, 0, sizeof(ipv6));
851                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
852                 ipv6.hdr.hop_limits = 0xFF;
853                 ipv6_mask.hdr.hop_limits = 0xFF;
854                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
855                 item.spec = &ipv6;
856                 item.mask = &ipv6_mask;
857                 field = modify_ipv6;
858         }
859         return flow_dv_convert_modify_action(&item, field, NULL, resource,
860                                              MLX5_MODIFICATION_TYPE_ADD, error);
861 }
862
863 /**
864  * Convert modify-header increment/decrement TCP Sequence number
865  * to DV specification.
866  *
867  * @param[in,out] resource
868  *   Pointer to the modify-header resource.
869  * @param[in] action
870  *   Pointer to action specification.
871  * @param[out] error
872  *   Pointer to the error structure.
873  *
874  * @return
875  *   0 on success, a negative errno value otherwise and rte_errno is set.
876  */
877 static int
878 flow_dv_convert_action_modify_tcp_seq
879                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
880                          const struct rte_flow_action *action,
881                          struct rte_flow_error *error)
882 {
883         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
884         uint64_t value = rte_be_to_cpu_32(*conf);
885         struct rte_flow_item item;
886         struct rte_flow_item_tcp tcp;
887         struct rte_flow_item_tcp tcp_mask;
888
889         memset(&tcp, 0, sizeof(tcp));
890         memset(&tcp_mask, 0, sizeof(tcp_mask));
891         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
892                 /*
893                  * The HW has no decrement operation, only increment operation.
894                  * To simulate decrement X from Y using increment operation
895                  * we need to add UINT32_MAX X times to Y.
896                  * Each adding of UINT32_MAX decrements Y by 1.
897                  */
898                 value *= UINT32_MAX;
899         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
900         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
901         item.type = RTE_FLOW_ITEM_TYPE_TCP;
902         item.spec = &tcp;
903         item.mask = &tcp_mask;
904         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
905                                              MLX5_MODIFICATION_TYPE_ADD, error);
906 }
907
908 /**
909  * Convert modify-header increment/decrement TCP Acknowledgment number
910  * to DV specification.
911  *
912  * @param[in,out] resource
913  *   Pointer to the modify-header resource.
914  * @param[in] action
915  *   Pointer to action specification.
916  * @param[out] error
917  *   Pointer to the error structure.
918  *
919  * @return
920  *   0 on success, a negative errno value otherwise and rte_errno is set.
921  */
922 static int
923 flow_dv_convert_action_modify_tcp_ack
924                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
925                          const struct rte_flow_action *action,
926                          struct rte_flow_error *error)
927 {
928         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
929         uint64_t value = rte_be_to_cpu_32(*conf);
930         struct rte_flow_item item;
931         struct rte_flow_item_tcp tcp;
932         struct rte_flow_item_tcp tcp_mask;
933
934         memset(&tcp, 0, sizeof(tcp));
935         memset(&tcp_mask, 0, sizeof(tcp_mask));
936         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
937                 /*
938                  * The HW has no decrement operation, only increment operation.
939                  * To simulate decrement X from Y using increment operation
940                  * we need to add UINT32_MAX X times to Y.
941                  * Each adding of UINT32_MAX decrements Y by 1.
942                  */
943                 value *= UINT32_MAX;
944         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
945         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
946         item.type = RTE_FLOW_ITEM_TYPE_TCP;
947         item.spec = &tcp;
948         item.mask = &tcp_mask;
949         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
950                                              MLX5_MODIFICATION_TYPE_ADD, error);
951 }
952
953 static enum mlx5_modification_field reg_to_field[] = {
954         [REG_NON] = MLX5_MODI_OUT_NONE,
955         [REG_A] = MLX5_MODI_META_DATA_REG_A,
956         [REG_B] = MLX5_MODI_META_DATA_REG_B,
957         [REG_C_0] = MLX5_MODI_META_REG_C_0,
958         [REG_C_1] = MLX5_MODI_META_REG_C_1,
959         [REG_C_2] = MLX5_MODI_META_REG_C_2,
960         [REG_C_3] = MLX5_MODI_META_REG_C_3,
961         [REG_C_4] = MLX5_MODI_META_REG_C_4,
962         [REG_C_5] = MLX5_MODI_META_REG_C_5,
963         [REG_C_6] = MLX5_MODI_META_REG_C_6,
964         [REG_C_7] = MLX5_MODI_META_REG_C_7,
965 };
966
967 /**
968  * Convert register set to DV specification.
969  *
970  * @param[in,out] resource
971  *   Pointer to the modify-header resource.
972  * @param[in] action
973  *   Pointer to action specification.
974  * @param[out] error
975  *   Pointer to the error structure.
976  *
977  * @return
978  *   0 on success, a negative errno value otherwise and rte_errno is set.
979  */
980 static int
981 flow_dv_convert_action_set_reg
982                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
983                          const struct rte_flow_action *action,
984                          struct rte_flow_error *error)
985 {
986         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
987         struct mlx5_modification_cmd *actions = resource->actions;
988         uint32_t i = resource->actions_num;
989
990         if (i >= MLX5_MAX_MODIFY_NUM)
991                 return rte_flow_error_set(error, EINVAL,
992                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
993                                           "too many items to modify");
994         MLX5_ASSERT(conf->id != REG_NON);
995         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
996         actions[i] = (struct mlx5_modification_cmd) {
997                 .action_type = MLX5_MODIFICATION_TYPE_SET,
998                 .field = reg_to_field[conf->id],
999         };
1000         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1001         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1002         ++i;
1003         resource->actions_num = i;
1004         return 0;
1005 }
1006
1007 /**
1008  * Convert SET_TAG action to DV specification.
1009  *
1010  * @param[in] dev
1011  *   Pointer to the rte_eth_dev structure.
1012  * @param[in,out] resource
1013  *   Pointer to the modify-header resource.
1014  * @param[in] conf
1015  *   Pointer to action specification.
1016  * @param[out] error
1017  *   Pointer to the error structure.
1018  *
1019  * @return
1020  *   0 on success, a negative errno value otherwise and rte_errno is set.
1021  */
1022 static int
1023 flow_dv_convert_action_set_tag
1024                         (struct rte_eth_dev *dev,
1025                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1026                          const struct rte_flow_action_set_tag *conf,
1027                          struct rte_flow_error *error)
1028 {
1029         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1030         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1031         struct rte_flow_item item = {
1032                 .spec = &data,
1033                 .mask = &mask,
1034         };
1035         struct field_modify_info reg_c_x[] = {
1036                 [1] = {0, 0, 0},
1037         };
1038         enum mlx5_modification_field reg_type;
1039         int ret;
1040
1041         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1042         if (ret < 0)
1043                 return ret;
1044         MLX5_ASSERT(ret != REG_NON);
1045         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1046         reg_type = reg_to_field[ret];
1047         MLX5_ASSERT(reg_type > 0);
1048         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1049         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1050                                              MLX5_MODIFICATION_TYPE_SET, error);
1051 }
1052
1053 /**
1054  * Convert internal COPY_REG action to DV specification.
1055  *
1056  * @param[in] dev
1057  *   Pointer to the rte_eth_dev structure.
1058  * @param[in,out] res
1059  *   Pointer to the modify-header resource.
1060  * @param[in] action
1061  *   Pointer to action specification.
1062  * @param[out] error
1063  *   Pointer to the error structure.
1064  *
1065  * @return
1066  *   0 on success, a negative errno value otherwise and rte_errno is set.
1067  */
1068 static int
1069 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1070                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1071                                  const struct rte_flow_action *action,
1072                                  struct rte_flow_error *error)
1073 {
1074         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1075         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1076         struct rte_flow_item item = {
1077                 .spec = NULL,
1078                 .mask = &mask,
1079         };
1080         struct field_modify_info reg_src[] = {
1081                 {4, 0, reg_to_field[conf->src]},
1082                 {0, 0, 0},
1083         };
1084         struct field_modify_info reg_dst = {
1085                 .offset = 0,
1086                 .id = reg_to_field[conf->dst],
1087         };
1088         /* Adjust reg_c[0] usage according to reported mask. */
1089         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1090                 struct mlx5_priv *priv = dev->data->dev_private;
1091                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1092
1093                 MLX5_ASSERT(reg_c0);
1094                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1095                 if (conf->dst == REG_C_0) {
1096                         /* Copy to reg_c[0], within mask only. */
1097                         reg_dst.offset = rte_bsf32(reg_c0);
1098                         /*
1099                          * Mask is ignoring the enianness, because
1100                          * there is no conversion in datapath.
1101                          */
1102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1103                         /* Copy from destination lower bits to reg_c[0]. */
1104                         mask = reg_c0 >> reg_dst.offset;
1105 #else
1106                         /* Copy from destination upper bits to reg_c[0]. */
1107                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1108                                           rte_fls_u32(reg_c0));
1109 #endif
1110                 } else {
1111                         mask = rte_cpu_to_be_32(reg_c0);
1112 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1113                         /* Copy from reg_c[0] to destination lower bits. */
1114                         reg_dst.offset = 0;
1115 #else
1116                         /* Copy from reg_c[0] to destination upper bits. */
1117                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1118                                          (rte_fls_u32(reg_c0) -
1119                                           rte_bsf32(reg_c0));
1120 #endif
1121                 }
1122         }
1123         return flow_dv_convert_modify_action(&item,
1124                                              reg_src, &reg_dst, res,
1125                                              MLX5_MODIFICATION_TYPE_COPY,
1126                                              error);
1127 }
1128
1129 /**
1130  * Convert MARK action to DV specification. This routine is used
1131  * in extensive metadata only and requires metadata register to be
1132  * handled. In legacy mode hardware tag resource is engaged.
1133  *
1134  * @param[in] dev
1135  *   Pointer to the rte_eth_dev structure.
1136  * @param[in] conf
1137  *   Pointer to MARK action specification.
1138  * @param[in,out] resource
1139  *   Pointer to the modify-header resource.
1140  * @param[out] error
1141  *   Pointer to the error structure.
1142  *
1143  * @return
1144  *   0 on success, a negative errno value otherwise and rte_errno is set.
1145  */
1146 static int
1147 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1148                             const struct rte_flow_action_mark *conf,
1149                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1150                             struct rte_flow_error *error)
1151 {
1152         struct mlx5_priv *priv = dev->data->dev_private;
1153         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1154                                            priv->sh->dv_mark_mask);
1155         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1156         struct rte_flow_item item = {
1157                 .spec = &data,
1158                 .mask = &mask,
1159         };
1160         struct field_modify_info reg_c_x[] = {
1161                 [1] = {0, 0, 0},
1162         };
1163         int reg;
1164
1165         if (!mask)
1166                 return rte_flow_error_set(error, EINVAL,
1167                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1168                                           NULL, "zero mark action mask");
1169         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1170         if (reg < 0)
1171                 return reg;
1172         MLX5_ASSERT(reg > 0);
1173         if (reg == REG_C_0) {
1174                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1175                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1176
1177                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1178                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1179                 mask = rte_cpu_to_be_32(mask << shl_c0);
1180         }
1181         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1182         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1183                                              MLX5_MODIFICATION_TYPE_SET, error);
1184 }
1185
1186 /**
1187  * Get metadata register index for specified steering domain.
1188  *
1189  * @param[in] dev
1190  *   Pointer to the rte_eth_dev structure.
1191  * @param[in] attr
1192  *   Attributes of flow to determine steering domain.
1193  * @param[out] error
1194  *   Pointer to the error structure.
1195  *
1196  * @return
1197  *   positive index on success, a negative errno value otherwise
1198  *   and rte_errno is set.
1199  */
1200 static enum modify_reg
1201 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1202                          const struct rte_flow_attr *attr,
1203                          struct rte_flow_error *error)
1204 {
1205         int reg =
1206                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1207                                           MLX5_METADATA_FDB :
1208                                             attr->egress ?
1209                                             MLX5_METADATA_TX :
1210                                             MLX5_METADATA_RX, 0, error);
1211         if (reg < 0)
1212                 return rte_flow_error_set(error,
1213                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1214                                           NULL, "unavailable "
1215                                           "metadata register");
1216         return reg;
1217 }
1218
1219 /**
1220  * Convert SET_META action to DV specification.
1221  *
1222  * @param[in] dev
1223  *   Pointer to the rte_eth_dev structure.
1224  * @param[in,out] resource
1225  *   Pointer to the modify-header resource.
1226  * @param[in] attr
1227  *   Attributes of flow that includes this item.
1228  * @param[in] conf
1229  *   Pointer to action specification.
1230  * @param[out] error
1231  *   Pointer to the error structure.
1232  *
1233  * @return
1234  *   0 on success, a negative errno value otherwise and rte_errno is set.
1235  */
1236 static int
1237 flow_dv_convert_action_set_meta
1238                         (struct rte_eth_dev *dev,
1239                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1240                          const struct rte_flow_attr *attr,
1241                          const struct rte_flow_action_set_meta *conf,
1242                          struct rte_flow_error *error)
1243 {
1244         uint32_t data = conf->data;
1245         uint32_t mask = conf->mask;
1246         struct rte_flow_item item = {
1247                 .spec = &data,
1248                 .mask = &mask,
1249         };
1250         struct field_modify_info reg_c_x[] = {
1251                 [1] = {0, 0, 0},
1252         };
1253         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1254
1255         if (reg < 0)
1256                 return reg;
1257         /*
1258          * In datapath code there is no endianness
1259          * coversions for perfromance reasons, all
1260          * pattern conversions are done in rte_flow.
1261          */
1262         if (reg == REG_C_0) {
1263                 struct mlx5_priv *priv = dev->data->dev_private;
1264                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1265                 uint32_t shl_c0;
1266
1267                 MLX5_ASSERT(msk_c0);
1268 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1269                 shl_c0 = rte_bsf32(msk_c0);
1270 #else
1271                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1272 #endif
1273                 mask <<= shl_c0;
1274                 data <<= shl_c0;
1275                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1276         }
1277         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1278         /* The routine expects parameters in memory as big-endian ones. */
1279         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1280                                              MLX5_MODIFICATION_TYPE_SET, error);
1281 }
1282
1283 /**
1284  * Convert modify-header set IPv4 DSCP action to DV specification.
1285  *
1286  * @param[in,out] resource
1287  *   Pointer to the modify-header resource.
1288  * @param[in] action
1289  *   Pointer to action specification.
1290  * @param[out] error
1291  *   Pointer to the error structure.
1292  *
1293  * @return
1294  *   0 on success, a negative errno value otherwise and rte_errno is set.
1295  */
1296 static int
1297 flow_dv_convert_action_modify_ipv4_dscp
1298                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1299                          const struct rte_flow_action *action,
1300                          struct rte_flow_error *error)
1301 {
1302         const struct rte_flow_action_set_dscp *conf =
1303                 (const struct rte_flow_action_set_dscp *)(action->conf);
1304         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1305         struct rte_flow_item_ipv4 ipv4;
1306         struct rte_flow_item_ipv4 ipv4_mask;
1307
1308         memset(&ipv4, 0, sizeof(ipv4));
1309         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1310         ipv4.hdr.type_of_service = conf->dscp;
1311         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1312         item.spec = &ipv4;
1313         item.mask = &ipv4_mask;
1314         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1315                                              MLX5_MODIFICATION_TYPE_SET, error);
1316 }
1317
1318 /**
1319  * Convert modify-header set IPv6 DSCP action to DV specification.
1320  *
1321  * @param[in,out] resource
1322  *   Pointer to the modify-header resource.
1323  * @param[in] action
1324  *   Pointer to action specification.
1325  * @param[out] error
1326  *   Pointer to the error structure.
1327  *
1328  * @return
1329  *   0 on success, a negative errno value otherwise and rte_errno is set.
1330  */
1331 static int
1332 flow_dv_convert_action_modify_ipv6_dscp
1333                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1334                          const struct rte_flow_action *action,
1335                          struct rte_flow_error *error)
1336 {
1337         const struct rte_flow_action_set_dscp *conf =
1338                 (const struct rte_flow_action_set_dscp *)(action->conf);
1339         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1340         struct rte_flow_item_ipv6 ipv6;
1341         struct rte_flow_item_ipv6 ipv6_mask;
1342
1343         memset(&ipv6, 0, sizeof(ipv6));
1344         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1345         /*
1346          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1347          * rdma-core only accept the DSCP bits byte aligned start from
1348          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1349          * bits in IPv6 case as rdma-core requires byte aligned value.
1350          */
1351         ipv6.hdr.vtc_flow = conf->dscp;
1352         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1353         item.spec = &ipv6;
1354         item.mask = &ipv6_mask;
1355         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1356                                              MLX5_MODIFICATION_TYPE_SET, error);
1357 }
1358
1359 /**
1360  * Validate MARK item.
1361  *
1362  * @param[in] dev
1363  *   Pointer to the rte_eth_dev structure.
1364  * @param[in] item
1365  *   Item specification.
1366  * @param[in] attr
1367  *   Attributes of flow that includes this item.
1368  * @param[out] error
1369  *   Pointer to error structure.
1370  *
1371  * @return
1372  *   0 on success, a negative errno value otherwise and rte_errno is set.
1373  */
1374 static int
1375 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1376                            const struct rte_flow_item *item,
1377                            const struct rte_flow_attr *attr __rte_unused,
1378                            struct rte_flow_error *error)
1379 {
1380         struct mlx5_priv *priv = dev->data->dev_private;
1381         struct mlx5_dev_config *config = &priv->config;
1382         const struct rte_flow_item_mark *spec = item->spec;
1383         const struct rte_flow_item_mark *mask = item->mask;
1384         const struct rte_flow_item_mark nic_mask = {
1385                 .id = priv->sh->dv_mark_mask,
1386         };
1387         int ret;
1388
1389         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1390                 return rte_flow_error_set(error, ENOTSUP,
1391                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1392                                           "extended metadata feature"
1393                                           " isn't enabled");
1394         if (!mlx5_flow_ext_mreg_supported(dev))
1395                 return rte_flow_error_set(error, ENOTSUP,
1396                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1397                                           "extended metadata register"
1398                                           " isn't supported");
1399         if (!nic_mask.id)
1400                 return rte_flow_error_set(error, ENOTSUP,
1401                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1402                                           "extended metadata register"
1403                                           " isn't available");
1404         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1405         if (ret < 0)
1406                 return ret;
1407         if (!spec)
1408                 return rte_flow_error_set(error, EINVAL,
1409                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1410                                           item->spec,
1411                                           "data cannot be empty");
1412         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1413                 return rte_flow_error_set(error, EINVAL,
1414                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1415                                           &spec->id,
1416                                           "mark id exceeds the limit");
1417         if (!mask)
1418                 mask = &nic_mask;
1419         if (!mask->id)
1420                 return rte_flow_error_set(error, EINVAL,
1421                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1422                                         "mask cannot be zero");
1423
1424         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1425                                         (const uint8_t *)&nic_mask,
1426                                         sizeof(struct rte_flow_item_mark),
1427                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1428         if (ret < 0)
1429                 return ret;
1430         return 0;
1431 }
1432
1433 /**
1434  * Validate META item.
1435  *
1436  * @param[in] dev
1437  *   Pointer to the rte_eth_dev structure.
1438  * @param[in] item
1439  *   Item specification.
1440  * @param[in] attr
1441  *   Attributes of flow that includes this item.
1442  * @param[out] error
1443  *   Pointer to error structure.
1444  *
1445  * @return
1446  *   0 on success, a negative errno value otherwise and rte_errno is set.
1447  */
1448 static int
1449 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1450                            const struct rte_flow_item *item,
1451                            const struct rte_flow_attr *attr,
1452                            struct rte_flow_error *error)
1453 {
1454         struct mlx5_priv *priv = dev->data->dev_private;
1455         struct mlx5_dev_config *config = &priv->config;
1456         const struct rte_flow_item_meta *spec = item->spec;
1457         const struct rte_flow_item_meta *mask = item->mask;
1458         struct rte_flow_item_meta nic_mask = {
1459                 .data = UINT32_MAX
1460         };
1461         int reg;
1462         int ret;
1463
1464         if (!spec)
1465                 return rte_flow_error_set(error, EINVAL,
1466                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1467                                           item->spec,
1468                                           "data cannot be empty");
1469         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1470                 if (!mlx5_flow_ext_mreg_supported(dev))
1471                         return rte_flow_error_set(error, ENOTSUP,
1472                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1473                                           "extended metadata register"
1474                                           " isn't supported");
1475                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1476                 if (reg < 0)
1477                         return reg;
1478                 if (reg == REG_B)
1479                         return rte_flow_error_set(error, ENOTSUP,
1480                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1481                                           "match on reg_b "
1482                                           "isn't supported");
1483                 if (reg != REG_A)
1484                         nic_mask.data = priv->sh->dv_meta_mask;
1485         } else if (attr->transfer) {
1486                 return rte_flow_error_set(error, ENOTSUP,
1487                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1488                                         "extended metadata feature "
1489                                         "should be enabled when "
1490                                         "meta item is requested "
1491                                         "with e-switch mode ");
1492         }
1493         if (!mask)
1494                 mask = &rte_flow_item_meta_mask;
1495         if (!mask->data)
1496                 return rte_flow_error_set(error, EINVAL,
1497                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1498                                         "mask cannot be zero");
1499
1500         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1501                                         (const uint8_t *)&nic_mask,
1502                                         sizeof(struct rte_flow_item_meta),
1503                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1504         return ret;
1505 }
1506
1507 /**
1508  * Validate TAG item.
1509  *
1510  * @param[in] dev
1511  *   Pointer to the rte_eth_dev structure.
1512  * @param[in] item
1513  *   Item specification.
1514  * @param[in] attr
1515  *   Attributes of flow that includes this item.
1516  * @param[out] error
1517  *   Pointer to error structure.
1518  *
1519  * @return
1520  *   0 on success, a negative errno value otherwise and rte_errno is set.
1521  */
1522 static int
1523 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1524                           const struct rte_flow_item *item,
1525                           const struct rte_flow_attr *attr __rte_unused,
1526                           struct rte_flow_error *error)
1527 {
1528         const struct rte_flow_item_tag *spec = item->spec;
1529         const struct rte_flow_item_tag *mask = item->mask;
1530         const struct rte_flow_item_tag nic_mask = {
1531                 .data = RTE_BE32(UINT32_MAX),
1532                 .index = 0xff,
1533         };
1534         int ret;
1535
1536         if (!mlx5_flow_ext_mreg_supported(dev))
1537                 return rte_flow_error_set(error, ENOTSUP,
1538                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1539                                           "extensive metadata register"
1540                                           " isn't supported");
1541         if (!spec)
1542                 return rte_flow_error_set(error, EINVAL,
1543                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1544                                           item->spec,
1545                                           "data cannot be empty");
1546         if (!mask)
1547                 mask = &rte_flow_item_tag_mask;
1548         if (!mask->data)
1549                 return rte_flow_error_set(error, EINVAL,
1550                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1551                                         "mask cannot be zero");
1552
1553         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1554                                         (const uint8_t *)&nic_mask,
1555                                         sizeof(struct rte_flow_item_tag),
1556                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1557         if (ret < 0)
1558                 return ret;
1559         if (mask->index != 0xff)
1560                 return rte_flow_error_set(error, EINVAL,
1561                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1562                                           "partial mask for tag index"
1563                                           " is not supported");
1564         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1565         if (ret < 0)
1566                 return ret;
1567         MLX5_ASSERT(ret != REG_NON);
1568         return 0;
1569 }
1570
1571 /**
1572  * Validate vport item.
1573  *
1574  * @param[in] dev
1575  *   Pointer to the rte_eth_dev structure.
1576  * @param[in] item
1577  *   Item specification.
1578  * @param[in] attr
1579  *   Attributes of flow that includes this item.
1580  * @param[in] item_flags
1581  *   Bit-fields that holds the items detected until now.
1582  * @param[out] error
1583  *   Pointer to error structure.
1584  *
1585  * @return
1586  *   0 on success, a negative errno value otherwise and rte_errno is set.
1587  */
1588 static int
1589 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1590                               const struct rte_flow_item *item,
1591                               const struct rte_flow_attr *attr,
1592                               uint64_t item_flags,
1593                               struct rte_flow_error *error)
1594 {
1595         const struct rte_flow_item_port_id *spec = item->spec;
1596         const struct rte_flow_item_port_id *mask = item->mask;
1597         const struct rte_flow_item_port_id switch_mask = {
1598                         .id = 0xffffffff,
1599         };
1600         struct mlx5_priv *esw_priv;
1601         struct mlx5_priv *dev_priv;
1602         int ret;
1603
1604         if (!attr->transfer)
1605                 return rte_flow_error_set(error, EINVAL,
1606                                           RTE_FLOW_ERROR_TYPE_ITEM,
1607                                           NULL,
1608                                           "match on port id is valid only"
1609                                           " when transfer flag is enabled");
1610         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1611                 return rte_flow_error_set(error, ENOTSUP,
1612                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1613                                           "multiple source ports are not"
1614                                           " supported");
1615         if (!mask)
1616                 mask = &switch_mask;
1617         if (mask->id != 0xffffffff)
1618                 return rte_flow_error_set(error, ENOTSUP,
1619                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1620                                            mask,
1621                                            "no support for partial mask on"
1622                                            " \"id\" field");
1623         ret = mlx5_flow_item_acceptable
1624                                 (item, (const uint8_t *)mask,
1625                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1626                                  sizeof(struct rte_flow_item_port_id),
1627                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1628         if (ret)
1629                 return ret;
1630         if (!spec)
1631                 return 0;
1632         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1633         if (!esw_priv)
1634                 return rte_flow_error_set(error, rte_errno,
1635                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1636                                           "failed to obtain E-Switch info for"
1637                                           " port");
1638         dev_priv = mlx5_dev_to_eswitch_info(dev);
1639         if (!dev_priv)
1640                 return rte_flow_error_set(error, rte_errno,
1641                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1642                                           NULL,
1643                                           "failed to obtain E-Switch info");
1644         if (esw_priv->domain_id != dev_priv->domain_id)
1645                 return rte_flow_error_set(error, EINVAL,
1646                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1647                                           "cannot match on a port from a"
1648                                           " different E-Switch");
1649         return 0;
1650 }
1651
1652 /**
1653  * Validate VLAN item.
1654  *
1655  * @param[in] item
1656  *   Item specification.
1657  * @param[in] item_flags
1658  *   Bit-fields that holds the items detected until now.
1659  * @param[in] dev
1660  *   Ethernet device flow is being created on.
1661  * @param[out] error
1662  *   Pointer to error structure.
1663  *
1664  * @return
1665  *   0 on success, a negative errno value otherwise and rte_errno is set.
1666  */
1667 static int
1668 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1669                            uint64_t item_flags,
1670                            struct rte_eth_dev *dev,
1671                            struct rte_flow_error *error)
1672 {
1673         const struct rte_flow_item_vlan *mask = item->mask;
1674         const struct rte_flow_item_vlan nic_mask = {
1675                 .tci = RTE_BE16(UINT16_MAX),
1676                 .inner_type = RTE_BE16(UINT16_MAX),
1677                 .has_more_vlan = 1,
1678         };
1679         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1680         int ret;
1681         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1682                                         MLX5_FLOW_LAYER_INNER_L4) :
1683                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1684                                         MLX5_FLOW_LAYER_OUTER_L4);
1685         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1686                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1687
1688         if (item_flags & vlanm)
1689                 return rte_flow_error_set(error, EINVAL,
1690                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1691                                           "multiple VLAN layers not supported");
1692         else if ((item_flags & l34m) != 0)
1693                 return rte_flow_error_set(error, EINVAL,
1694                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1695                                           "VLAN cannot follow L3/L4 layer");
1696         if (!mask)
1697                 mask = &rte_flow_item_vlan_mask;
1698         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1699                                         (const uint8_t *)&nic_mask,
1700                                         sizeof(struct rte_flow_item_vlan),
1701                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1702         if (ret)
1703                 return ret;
1704         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1705                 struct mlx5_priv *priv = dev->data->dev_private;
1706
1707                 if (priv->vmwa_context) {
1708                         /*
1709                          * Non-NULL context means we have a virtual machine
1710                          * and SR-IOV enabled, we have to create VLAN interface
1711                          * to make hypervisor to setup E-Switch vport
1712                          * context correctly. We avoid creating the multiple
1713                          * VLAN interfaces, so we cannot support VLAN tag mask.
1714                          */
1715                         return rte_flow_error_set(error, EINVAL,
1716                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1717                                                   item,
1718                                                   "VLAN tag mask is not"
1719                                                   " supported in virtual"
1720                                                   " environment");
1721                 }
1722         }
1723         return 0;
1724 }
1725
1726 /*
1727  * GTP flags are contained in 1 byte of the format:
1728  * -------------------------------------------
1729  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1730  * |-----------------------------------------|
1731  * | value | Version | PT | Res | E | S | PN |
1732  * -------------------------------------------
1733  *
1734  * Matching is supported only for GTP flags E, S, PN.
1735  */
1736 #define MLX5_GTP_FLAGS_MASK     0x07
1737
1738 /**
1739  * Validate GTP item.
1740  *
1741  * @param[in] dev
1742  *   Pointer to the rte_eth_dev structure.
1743  * @param[in] item
1744  *   Item specification.
1745  * @param[in] item_flags
1746  *   Bit-fields that holds the items detected until now.
1747  * @param[out] error
1748  *   Pointer to error structure.
1749  *
1750  * @return
1751  *   0 on success, a negative errno value otherwise and rte_errno is set.
1752  */
1753 static int
1754 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1755                           const struct rte_flow_item *item,
1756                           uint64_t item_flags,
1757                           struct rte_flow_error *error)
1758 {
1759         struct mlx5_priv *priv = dev->data->dev_private;
1760         const struct rte_flow_item_gtp *spec = item->spec;
1761         const struct rte_flow_item_gtp *mask = item->mask;
1762         const struct rte_flow_item_gtp nic_mask = {
1763                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1764                 .msg_type = 0xff,
1765                 .teid = RTE_BE32(0xffffffff),
1766         };
1767
1768         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1769                 return rte_flow_error_set(error, ENOTSUP,
1770                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1771                                           "GTP support is not enabled");
1772         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1773                 return rte_flow_error_set(error, ENOTSUP,
1774                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1775                                           "multiple tunnel layers not"
1776                                           " supported");
1777         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1778                 return rte_flow_error_set(error, EINVAL,
1779                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1780                                           "no outer UDP layer found");
1781         if (!mask)
1782                 mask = &rte_flow_item_gtp_mask;
1783         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1784                 return rte_flow_error_set(error, ENOTSUP,
1785                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1786                                           "Match is supported for GTP"
1787                                           " flags only");
1788         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1789                                          (const uint8_t *)&nic_mask,
1790                                          sizeof(struct rte_flow_item_gtp),
1791                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1792 }
1793
1794 /**
1795  * Validate IPV4 item.
1796  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1797  * add specific validation of fragment_offset field,
1798  *
1799  * @param[in] item
1800  *   Item specification.
1801  * @param[in] item_flags
1802  *   Bit-fields that holds the items detected until now.
1803  * @param[out] error
1804  *   Pointer to error structure.
1805  *
1806  * @return
1807  *   0 on success, a negative errno value otherwise and rte_errno is set.
1808  */
1809 static int
1810 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1811                            uint64_t item_flags,
1812                            uint64_t last_item,
1813                            uint16_t ether_type,
1814                            struct rte_flow_error *error)
1815 {
1816         int ret;
1817         const struct rte_flow_item_ipv4 *spec = item->spec;
1818         const struct rte_flow_item_ipv4 *last = item->last;
1819         const struct rte_flow_item_ipv4 *mask = item->mask;
1820         rte_be16_t fragment_offset_spec = 0;
1821         rte_be16_t fragment_offset_last = 0;
1822         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1823                 .hdr = {
1824                         .src_addr = RTE_BE32(0xffffffff),
1825                         .dst_addr = RTE_BE32(0xffffffff),
1826                         .type_of_service = 0xff,
1827                         .fragment_offset = RTE_BE16(0xffff),
1828                         .next_proto_id = 0xff,
1829                         .time_to_live = 0xff,
1830                 },
1831         };
1832
1833         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1834                                            ether_type, &nic_ipv4_mask,
1835                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1836         if (ret < 0)
1837                 return ret;
1838         if (spec && mask)
1839                 fragment_offset_spec = spec->hdr.fragment_offset &
1840                                        mask->hdr.fragment_offset;
1841         if (!fragment_offset_spec)
1842                 return 0;
1843         /*
1844          * spec and mask are valid, enforce using full mask to make sure the
1845          * complete value is used correctly.
1846          */
1847         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1848                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1849                 return rte_flow_error_set(error, EINVAL,
1850                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1851                                           item, "must use full mask for"
1852                                           " fragment_offset");
1853         /*
1854          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1855          * indicating this is 1st fragment of fragmented packet.
1856          * This is not yet supported in MLX5, return appropriate error message.
1857          */
1858         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1859                 return rte_flow_error_set(error, ENOTSUP,
1860                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1861                                           "match on first fragment not "
1862                                           "supported");
1863         if (fragment_offset_spec && !last)
1864                 return rte_flow_error_set(error, ENOTSUP,
1865                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1866                                           "specified value not supported");
1867         /* spec and last are valid, validate the specified range. */
1868         fragment_offset_last = last->hdr.fragment_offset &
1869                                mask->hdr.fragment_offset;
1870         /*
1871          * Match on fragment_offset spec 0x2001 and last 0x3fff
1872          * means MF is 1 and frag-offset is > 0.
1873          * This packet is fragment 2nd and onward, excluding last.
1874          * This is not yet supported in MLX5, return appropriate
1875          * error message.
1876          */
1877         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1878             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1879                 return rte_flow_error_set(error, ENOTSUP,
1880                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1881                                           last, "match on following "
1882                                           "fragments not supported");
1883         /*
1884          * Match on fragment_offset spec 0x0001 and last 0x1fff
1885          * means MF is 0 and frag-offset is > 0.
1886          * This packet is last fragment of fragmented packet.
1887          * This is not yet supported in MLX5, return appropriate
1888          * error message.
1889          */
1890         if (fragment_offset_spec == RTE_BE16(1) &&
1891             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1892                 return rte_flow_error_set(error, ENOTSUP,
1893                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1894                                           last, "match on last "
1895                                           "fragment not supported");
1896         /*
1897          * Match on fragment_offset spec 0x0001 and last 0x3fff
1898          * means MF and/or frag-offset is not 0.
1899          * This is a fragmented packet.
1900          * Other range values are invalid and rejected.
1901          */
1902         if (!(fragment_offset_spec == RTE_BE16(1) &&
1903               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1904                 return rte_flow_error_set(error, ENOTSUP,
1905                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1906                                           "specified range not supported");
1907         return 0;
1908 }
1909
1910 /**
1911  * Validate IPV6 fragment extension item.
1912  *
1913  * @param[in] item
1914  *   Item specification.
1915  * @param[in] item_flags
1916  *   Bit-fields that holds the items detected until now.
1917  * @param[out] error
1918  *   Pointer to error structure.
1919  *
1920  * @return
1921  *   0 on success, a negative errno value otherwise and rte_errno is set.
1922  */
1923 static int
1924 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1925                                     uint64_t item_flags,
1926                                     struct rte_flow_error *error)
1927 {
1928         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1929         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1930         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1931         rte_be16_t frag_data_spec = 0;
1932         rte_be16_t frag_data_last = 0;
1933         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1934         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1935                                       MLX5_FLOW_LAYER_OUTER_L4;
1936         int ret = 0;
1937         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1938                 .hdr = {
1939                         .next_header = 0xff,
1940                         .frag_data = RTE_BE16(0xffff),
1941                 },
1942         };
1943
1944         if (item_flags & l4m)
1945                 return rte_flow_error_set(error, EINVAL,
1946                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1947                                           "ipv6 fragment extension item cannot "
1948                                           "follow L4 item.");
1949         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1950             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1951                 return rte_flow_error_set(error, EINVAL,
1952                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1953                                           "ipv6 fragment extension item must "
1954                                           "follow ipv6 item");
1955         if (spec && mask)
1956                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1957         if (!frag_data_spec)
1958                 return 0;
1959         /*
1960          * spec and mask are valid, enforce using full mask to make sure the
1961          * complete value is used correctly.
1962          */
1963         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1964                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1965                 return rte_flow_error_set(error, EINVAL,
1966                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1967                                           item, "must use full mask for"
1968                                           " frag_data");
1969         /*
1970          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1971          * This is 1st fragment of fragmented packet.
1972          */
1973         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1974                 return rte_flow_error_set(error, ENOTSUP,
1975                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1976                                           "match on first fragment not "
1977                                           "supported");
1978         if (frag_data_spec && !last)
1979                 return rte_flow_error_set(error, EINVAL,
1980                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1981                                           "specified value not supported");
1982         ret = mlx5_flow_item_acceptable
1983                                 (item, (const uint8_t *)mask,
1984                                  (const uint8_t *)&nic_mask,
1985                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
1986                                  MLX5_ITEM_RANGE_ACCEPTED, error);
1987         if (ret)
1988                 return ret;
1989         /* spec and last are valid, validate the specified range. */
1990         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1991         /*
1992          * Match on frag_data spec 0x0009 and last 0xfff9
1993          * means M is 1 and frag-offset is > 0.
1994          * This packet is fragment 2nd and onward, excluding last.
1995          * This is not yet supported in MLX5, return appropriate
1996          * error message.
1997          */
1998         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1999                                        RTE_IPV6_EHDR_MF_MASK) &&
2000             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2001                 return rte_flow_error_set(error, ENOTSUP,
2002                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2003                                           last, "match on following "
2004                                           "fragments not supported");
2005         /*
2006          * Match on frag_data spec 0x0008 and last 0xfff8
2007          * means M is 0 and frag-offset is > 0.
2008          * This packet is last fragment of fragmented packet.
2009          * This is not yet supported in MLX5, return appropriate
2010          * error message.
2011          */
2012         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2013             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2014                 return rte_flow_error_set(error, ENOTSUP,
2015                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2016                                           last, "match on last "
2017                                           "fragment not supported");
2018         /* Other range values are invalid and rejected. */
2019         return rte_flow_error_set(error, EINVAL,
2020                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2021                                   "specified range not supported");
2022 }
2023
2024 /**
2025  * Validate the pop VLAN action.
2026  *
2027  * @param[in] dev
2028  *   Pointer to the rte_eth_dev structure.
2029  * @param[in] action_flags
2030  *   Holds the actions detected until now.
2031  * @param[in] action
2032  *   Pointer to the pop vlan action.
2033  * @param[in] item_flags
2034  *   The items found in this flow rule.
2035  * @param[in] attr
2036  *   Pointer to flow attributes.
2037  * @param[out] error
2038  *   Pointer to error structure.
2039  *
2040  * @return
2041  *   0 on success, a negative errno value otherwise and rte_errno is set.
2042  */
2043 static int
2044 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2045                                  uint64_t action_flags,
2046                                  const struct rte_flow_action *action,
2047                                  uint64_t item_flags,
2048                                  const struct rte_flow_attr *attr,
2049                                  struct rte_flow_error *error)
2050 {
2051         const struct mlx5_priv *priv = dev->data->dev_private;
2052
2053         (void)action;
2054         (void)attr;
2055         if (!priv->sh->pop_vlan_action)
2056                 return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2058                                           NULL,
2059                                           "pop vlan action is not supported");
2060         if (attr->egress)
2061                 return rte_flow_error_set(error, ENOTSUP,
2062                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2063                                           NULL,
2064                                           "pop vlan action not supported for "
2065                                           "egress");
2066         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2067                 return rte_flow_error_set(error, ENOTSUP,
2068                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2069                                           "no support for multiple VLAN "
2070                                           "actions");
2071         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2072         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2073             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2074                 return rte_flow_error_set(error, ENOTSUP,
2075                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2076                                           NULL,
2077                                           "cannot pop vlan after decap without "
2078                                           "match on inner vlan in the flow");
2079         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2080         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2081             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2082                 return rte_flow_error_set(error, ENOTSUP,
2083                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2084                                           NULL,
2085                                           "cannot pop vlan without a "
2086                                           "match on (outer) vlan in the flow");
2087         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2088                 return rte_flow_error_set(error, EINVAL,
2089                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2090                                           "wrong action order, port_id should "
2091                                           "be after pop VLAN action");
2092         if (!attr->transfer && priv->representor)
2093                 return rte_flow_error_set(error, ENOTSUP,
2094                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2095                                           "pop vlan action for VF representor "
2096                                           "not supported on NIC table");
2097         return 0;
2098 }
2099
2100 /**
2101  * Get VLAN default info from vlan match info.
2102  *
2103  * @param[in] items
2104  *   the list of item specifications.
2105  * @param[out] vlan
2106  *   pointer VLAN info to fill to.
2107  *
2108  * @return
2109  *   0 on success, a negative errno value otherwise and rte_errno is set.
2110  */
2111 static void
2112 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2113                                   struct rte_vlan_hdr *vlan)
2114 {
2115         const struct rte_flow_item_vlan nic_mask = {
2116                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2117                                 MLX5DV_FLOW_VLAN_VID_MASK),
2118                 .inner_type = RTE_BE16(0xffff),
2119         };
2120
2121         if (items == NULL)
2122                 return;
2123         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2124                 int type = items->type;
2125
2126                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2127                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2128                         break;
2129         }
2130         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2131                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2132                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2133
2134                 /* If VLAN item in pattern doesn't contain data, return here. */
2135                 if (!vlan_v)
2136                         return;
2137                 if (!vlan_m)
2138                         vlan_m = &nic_mask;
2139                 /* Only full match values are accepted */
2140                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2141                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2142                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2143                         vlan->vlan_tci |=
2144                                 rte_be_to_cpu_16(vlan_v->tci &
2145                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2146                 }
2147                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2148                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2149                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2150                         vlan->vlan_tci |=
2151                                 rte_be_to_cpu_16(vlan_v->tci &
2152                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2153                 }
2154                 if (vlan_m->inner_type == nic_mask.inner_type)
2155                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2156                                                            vlan_m->inner_type);
2157         }
2158 }
2159
2160 /**
2161  * Validate the push VLAN action.
2162  *
2163  * @param[in] dev
2164  *   Pointer to the rte_eth_dev structure.
2165  * @param[in] action_flags
2166  *   Holds the actions detected until now.
2167  * @param[in] item_flags
2168  *   The items found in this flow rule.
2169  * @param[in] action
2170  *   Pointer to the action structure.
2171  * @param[in] attr
2172  *   Pointer to flow attributes
2173  * @param[out] error
2174  *   Pointer to error structure.
2175  *
2176  * @return
2177  *   0 on success, a negative errno value otherwise and rte_errno is set.
2178  */
2179 static int
2180 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2181                                   uint64_t action_flags,
2182                                   const struct rte_flow_item_vlan *vlan_m,
2183                                   const struct rte_flow_action *action,
2184                                   const struct rte_flow_attr *attr,
2185                                   struct rte_flow_error *error)
2186 {
2187         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2188         const struct mlx5_priv *priv = dev->data->dev_private;
2189
2190         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2191             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2192                 return rte_flow_error_set(error, EINVAL,
2193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2194                                           "invalid vlan ethertype");
2195         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2196                 return rte_flow_error_set(error, EINVAL,
2197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2198                                           "wrong action order, port_id should "
2199                                           "be after push VLAN");
2200         if (!attr->transfer && priv->representor)
2201                 return rte_flow_error_set(error, ENOTSUP,
2202                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2203                                           "push vlan action for VF representor "
2204                                           "not supported on NIC table");
2205         if (vlan_m &&
2206             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2207             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2208                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2209             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2210             !(mlx5_flow_find_action
2211                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2212                 return rte_flow_error_set(error, EINVAL,
2213                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2214                                           "not full match mask on VLAN PCP and "
2215                                           "there is no of_set_vlan_pcp action, "
2216                                           "push VLAN action cannot figure out "
2217                                           "PCP value");
2218         if (vlan_m &&
2219             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2220             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2221                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2222             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2223             !(mlx5_flow_find_action
2224                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2225                 return rte_flow_error_set(error, EINVAL,
2226                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2227                                           "not full match mask on VLAN VID and "
2228                                           "there is no of_set_vlan_vid action, "
2229                                           "push VLAN action cannot figure out "
2230                                           "VID value");
2231         (void)attr;
2232         return 0;
2233 }
2234
2235 /**
2236  * Validate the set VLAN PCP.
2237  *
2238  * @param[in] action_flags
2239  *   Holds the actions detected until now.
2240  * @param[in] actions
2241  *   Pointer to the list of actions remaining in the flow rule.
2242  * @param[out] error
2243  *   Pointer to error structure.
2244  *
2245  * @return
2246  *   0 on success, a negative errno value otherwise and rte_errno is set.
2247  */
2248 static int
2249 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2250                                      const struct rte_flow_action actions[],
2251                                      struct rte_flow_error *error)
2252 {
2253         const struct rte_flow_action *action = actions;
2254         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2255
2256         if (conf->vlan_pcp > 7)
2257                 return rte_flow_error_set(error, EINVAL,
2258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2259                                           "VLAN PCP value is too big");
2260         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2261                 return rte_flow_error_set(error, ENOTSUP,
2262                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2263                                           "set VLAN PCP action must follow "
2264                                           "the push VLAN action");
2265         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2266                 return rte_flow_error_set(error, ENOTSUP,
2267                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2268                                           "Multiple VLAN PCP modification are "
2269                                           "not supported");
2270         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2271                 return rte_flow_error_set(error, EINVAL,
2272                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2273                                           "wrong action order, port_id should "
2274                                           "be after set VLAN PCP");
2275         return 0;
2276 }
2277
2278 /**
2279  * Validate the set VLAN VID.
2280  *
2281  * @param[in] item_flags
2282  *   Holds the items detected in this rule.
2283  * @param[in] action_flags
2284  *   Holds the actions detected until now.
2285  * @param[in] actions
2286  *   Pointer to the list of actions remaining in the flow rule.
2287  * @param[out] error
2288  *   Pointer to error structure.
2289  *
2290  * @return
2291  *   0 on success, a negative errno value otherwise and rte_errno is set.
2292  */
2293 static int
2294 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2295                                      uint64_t action_flags,
2296                                      const struct rte_flow_action actions[],
2297                                      struct rte_flow_error *error)
2298 {
2299         const struct rte_flow_action *action = actions;
2300         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2301
2302         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2303                 return rte_flow_error_set(error, EINVAL,
2304                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2305                                           "VLAN VID value is too big");
2306         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2307             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2308                 return rte_flow_error_set(error, ENOTSUP,
2309                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2310                                           "set VLAN VID action must follow push"
2311                                           " VLAN action or match on VLAN item");
2312         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2313                 return rte_flow_error_set(error, ENOTSUP,
2314                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2315                                           "Multiple VLAN VID modifications are "
2316                                           "not supported");
2317         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2318                 return rte_flow_error_set(error, EINVAL,
2319                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2320                                           "wrong action order, port_id should "
2321                                           "be after set VLAN VID");
2322         return 0;
2323 }
2324
2325 /*
2326  * Validate the FLAG action.
2327  *
2328  * @param[in] dev
2329  *   Pointer to the rte_eth_dev structure.
2330  * @param[in] action_flags
2331  *   Holds the actions detected until now.
2332  * @param[in] attr
2333  *   Pointer to flow attributes
2334  * @param[out] error
2335  *   Pointer to error structure.
2336  *
2337  * @return
2338  *   0 on success, a negative errno value otherwise and rte_errno is set.
2339  */
2340 static int
2341 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2342                              uint64_t action_flags,
2343                              const struct rte_flow_attr *attr,
2344                              struct rte_flow_error *error)
2345 {
2346         struct mlx5_priv *priv = dev->data->dev_private;
2347         struct mlx5_dev_config *config = &priv->config;
2348         int ret;
2349
2350         /* Fall back if no extended metadata register support. */
2351         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2352                 return mlx5_flow_validate_action_flag(action_flags, attr,
2353                                                       error);
2354         /* Extensive metadata mode requires registers. */
2355         if (!mlx5_flow_ext_mreg_supported(dev))
2356                 return rte_flow_error_set(error, ENOTSUP,
2357                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2358                                           "no metadata registers "
2359                                           "to support flag action");
2360         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2361                 return rte_flow_error_set(error, ENOTSUP,
2362                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2363                                           "extended metadata register"
2364                                           " isn't available");
2365         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2366         if (ret < 0)
2367                 return ret;
2368         MLX5_ASSERT(ret > 0);
2369         if (action_flags & MLX5_FLOW_ACTION_MARK)
2370                 return rte_flow_error_set(error, EINVAL,
2371                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2372                                           "can't mark and flag in same flow");
2373         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2374                 return rte_flow_error_set(error, EINVAL,
2375                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2376                                           "can't have 2 flag"
2377                                           " actions in same flow");
2378         return 0;
2379 }
2380
2381 /**
2382  * Validate MARK action.
2383  *
2384  * @param[in] dev
2385  *   Pointer to the rte_eth_dev structure.
2386  * @param[in] action
2387  *   Pointer to action.
2388  * @param[in] action_flags
2389  *   Holds the actions detected until now.
2390  * @param[in] attr
2391  *   Pointer to flow attributes
2392  * @param[out] error
2393  *   Pointer to error structure.
2394  *
2395  * @return
2396  *   0 on success, a negative errno value otherwise and rte_errno is set.
2397  */
2398 static int
2399 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2400                              const struct rte_flow_action *action,
2401                              uint64_t action_flags,
2402                              const struct rte_flow_attr *attr,
2403                              struct rte_flow_error *error)
2404 {
2405         struct mlx5_priv *priv = dev->data->dev_private;
2406         struct mlx5_dev_config *config = &priv->config;
2407         const struct rte_flow_action_mark *mark = action->conf;
2408         int ret;
2409
2410         /* Fall back if no extended metadata register support. */
2411         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2412                 return mlx5_flow_validate_action_mark(action, action_flags,
2413                                                       attr, error);
2414         /* Extensive metadata mode requires registers. */
2415         if (!mlx5_flow_ext_mreg_supported(dev))
2416                 return rte_flow_error_set(error, ENOTSUP,
2417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2418                                           "no metadata registers "
2419                                           "to support mark action");
2420         if (!priv->sh->dv_mark_mask)
2421                 return rte_flow_error_set(error, ENOTSUP,
2422                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2423                                           "extended metadata register"
2424                                           " isn't available");
2425         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2426         if (ret < 0)
2427                 return ret;
2428         MLX5_ASSERT(ret > 0);
2429         if (!mark)
2430                 return rte_flow_error_set(error, EINVAL,
2431                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2432                                           "configuration cannot be null");
2433         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2434                 return rte_flow_error_set(error, EINVAL,
2435                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2436                                           &mark->id,
2437                                           "mark id exceeds the limit");
2438         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2439                 return rte_flow_error_set(error, EINVAL,
2440                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2441                                           "can't flag and mark in same flow");
2442         if (action_flags & MLX5_FLOW_ACTION_MARK)
2443                 return rte_flow_error_set(error, EINVAL,
2444                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2445                                           "can't have 2 mark actions in same"
2446                                           " flow");
2447         return 0;
2448 }
2449
2450 /**
2451  * Validate SET_META action.
2452  *
2453  * @param[in] dev
2454  *   Pointer to the rte_eth_dev structure.
2455  * @param[in] action
2456  *   Pointer to the action structure.
2457  * @param[in] action_flags
2458  *   Holds the actions detected until now.
2459  * @param[in] attr
2460  *   Pointer to flow attributes
2461  * @param[out] error
2462  *   Pointer to error structure.
2463  *
2464  * @return
2465  *   0 on success, a negative errno value otherwise and rte_errno is set.
2466  */
2467 static int
2468 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2469                                  const struct rte_flow_action *action,
2470                                  uint64_t action_flags __rte_unused,
2471                                  const struct rte_flow_attr *attr,
2472                                  struct rte_flow_error *error)
2473 {
2474         const struct rte_flow_action_set_meta *conf;
2475         uint32_t nic_mask = UINT32_MAX;
2476         int reg;
2477
2478         if (!mlx5_flow_ext_mreg_supported(dev))
2479                 return rte_flow_error_set(error, ENOTSUP,
2480                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2481                                           "extended metadata register"
2482                                           " isn't supported");
2483         reg = flow_dv_get_metadata_reg(dev, attr, error);
2484         if (reg < 0)
2485                 return reg;
2486         if (reg != REG_A && reg != REG_B) {
2487                 struct mlx5_priv *priv = dev->data->dev_private;
2488
2489                 nic_mask = priv->sh->dv_meta_mask;
2490         }
2491         if (!(action->conf))
2492                 return rte_flow_error_set(error, EINVAL,
2493                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2494                                           "configuration cannot be null");
2495         conf = (const struct rte_flow_action_set_meta *)action->conf;
2496         if (!conf->mask)
2497                 return rte_flow_error_set(error, EINVAL,
2498                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2499                                           "zero mask doesn't have any effect");
2500         if (conf->mask & ~nic_mask)
2501                 return rte_flow_error_set(error, EINVAL,
2502                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2503                                           "meta data must be within reg C0");
2504         return 0;
2505 }
2506
2507 /**
2508  * Validate SET_TAG action.
2509  *
2510  * @param[in] dev
2511  *   Pointer to the rte_eth_dev structure.
2512  * @param[in] action
2513  *   Pointer to the action structure.
2514  * @param[in] action_flags
2515  *   Holds the actions detected until now.
2516  * @param[in] attr
2517  *   Pointer to flow attributes
2518  * @param[out] error
2519  *   Pointer to error structure.
2520  *
2521  * @return
2522  *   0 on success, a negative errno value otherwise and rte_errno is set.
2523  */
2524 static int
2525 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2526                                 const struct rte_flow_action *action,
2527                                 uint64_t action_flags,
2528                                 const struct rte_flow_attr *attr,
2529                                 struct rte_flow_error *error)
2530 {
2531         const struct rte_flow_action_set_tag *conf;
2532         const uint64_t terminal_action_flags =
2533                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2534                 MLX5_FLOW_ACTION_RSS;
2535         int ret;
2536
2537         if (!mlx5_flow_ext_mreg_supported(dev))
2538                 return rte_flow_error_set(error, ENOTSUP,
2539                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2540                                           "extensive metadata register"
2541                                           " isn't supported");
2542         if (!(action->conf))
2543                 return rte_flow_error_set(error, EINVAL,
2544                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2545                                           "configuration cannot be null");
2546         conf = (const struct rte_flow_action_set_tag *)action->conf;
2547         if (!conf->mask)
2548                 return rte_flow_error_set(error, EINVAL,
2549                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2550                                           "zero mask doesn't have any effect");
2551         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2552         if (ret < 0)
2553                 return ret;
2554         if (!attr->transfer && attr->ingress &&
2555             (action_flags & terminal_action_flags))
2556                 return rte_flow_error_set(error, EINVAL,
2557                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2558                                           "set_tag has no effect"
2559                                           " with terminal actions");
2560         return 0;
2561 }
2562
2563 /**
2564  * Validate count action.
2565  *
2566  * @param[in] dev
2567  *   Pointer to rte_eth_dev structure.
2568  * @param[out] error
2569  *   Pointer to error structure.
2570  *
2571  * @return
2572  *   0 on success, a negative errno value otherwise and rte_errno is set.
2573  */
2574 static int
2575 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2576                               struct rte_flow_error *error)
2577 {
2578         struct mlx5_priv *priv = dev->data->dev_private;
2579
2580         if (!priv->config.devx)
2581                 goto notsup_err;
2582 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2583         return 0;
2584 #endif
2585 notsup_err:
2586         return rte_flow_error_set
2587                       (error, ENOTSUP,
2588                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2589                        NULL,
2590                        "count action not supported");
2591 }
2592
2593 /**
2594  * Validate the L2 encap action.
2595  *
2596  * @param[in] dev
2597  *   Pointer to the rte_eth_dev structure.
2598  * @param[in] action_flags
2599  *   Holds the actions detected until now.
2600  * @param[in] action
2601  *   Pointer to the action structure.
2602  * @param[in] attr
2603  *   Pointer to flow attributes.
2604  * @param[out] error
2605  *   Pointer to error structure.
2606  *
2607  * @return
2608  *   0 on success, a negative errno value otherwise and rte_errno is set.
2609  */
2610 static int
2611 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2612                                  uint64_t action_flags,
2613                                  const struct rte_flow_action *action,
2614                                  const struct rte_flow_attr *attr,
2615                                  struct rte_flow_error *error)
2616 {
2617         const struct mlx5_priv *priv = dev->data->dev_private;
2618
2619         if (!(action->conf))
2620                 return rte_flow_error_set(error, EINVAL,
2621                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2622                                           "configuration cannot be null");
2623         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2624                 return rte_flow_error_set(error, EINVAL,
2625                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2626                                           "can only have a single encap action "
2627                                           "in a flow");
2628         if (!attr->transfer && priv->representor)
2629                 return rte_flow_error_set(error, ENOTSUP,
2630                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2631                                           "encap action for VF representor "
2632                                           "not supported on NIC table");
2633         return 0;
2634 }
2635
2636 /**
2637  * Validate a decap action.
2638  *
2639  * @param[in] dev
2640  *   Pointer to the rte_eth_dev structure.
2641  * @param[in] action_flags
2642  *   Holds the actions detected until now.
2643  * @param[in] attr
2644  *   Pointer to flow attributes
2645  * @param[out] error
2646  *   Pointer to error structure.
2647  *
2648  * @return
2649  *   0 on success, a negative errno value otherwise and rte_errno is set.
2650  */
2651 static int
2652 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2653                               uint64_t action_flags,
2654                               const struct rte_flow_attr *attr,
2655                               struct rte_flow_error *error)
2656 {
2657         const struct mlx5_priv *priv = dev->data->dev_private;
2658
2659         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2660             !priv->config.decap_en)
2661                 return rte_flow_error_set(error, ENOTSUP,
2662                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2663                                           "decap is not enabled");
2664         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2665                 return rte_flow_error_set(error, ENOTSUP,
2666                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2667                                           action_flags &
2668                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2669                                           "have a single decap action" : "decap "
2670                                           "after encap is not supported");
2671         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2672                 return rte_flow_error_set(error, EINVAL,
2673                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2674                                           "can't have decap action after"
2675                                           " modify action");
2676         if (attr->egress)
2677                 return rte_flow_error_set(error, ENOTSUP,
2678                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2679                                           NULL,
2680                                           "decap action not supported for "
2681                                           "egress");
2682         if (!attr->transfer && priv->representor)
2683                 return rte_flow_error_set(error, ENOTSUP,
2684                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2685                                           "decap action for VF representor "
2686                                           "not supported on NIC table");
2687         return 0;
2688 }
2689
2690 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2691
2692 /**
2693  * Validate the raw encap and decap actions.
2694  *
2695  * @param[in] dev
2696  *   Pointer to the rte_eth_dev structure.
2697  * @param[in] decap
2698  *   Pointer to the decap action.
2699  * @param[in] encap
2700  *   Pointer to the encap action.
2701  * @param[in] attr
2702  *   Pointer to flow attributes
2703  * @param[in/out] action_flags
2704  *   Holds the actions detected until now.
2705  * @param[out] actions_n
2706  *   pointer to the number of actions counter.
2707  * @param[out] error
2708  *   Pointer to error structure.
2709  *
2710  * @return
2711  *   0 on success, a negative errno value otherwise and rte_errno is set.
2712  */
2713 static int
2714 flow_dv_validate_action_raw_encap_decap
2715         (struct rte_eth_dev *dev,
2716          const struct rte_flow_action_raw_decap *decap,
2717          const struct rte_flow_action_raw_encap *encap,
2718          const struct rte_flow_attr *attr, uint64_t *action_flags,
2719          int *actions_n, struct rte_flow_error *error)
2720 {
2721         const struct mlx5_priv *priv = dev->data->dev_private;
2722         int ret;
2723
2724         if (encap && (!encap->size || !encap->data))
2725                 return rte_flow_error_set(error, EINVAL,
2726                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2727                                           "raw encap data cannot be empty");
2728         if (decap && encap) {
2729                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2730                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2731                         /* L3 encap. */
2732                         decap = NULL;
2733                 else if (encap->size <=
2734                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2735                            decap->size >
2736                            MLX5_ENCAPSULATION_DECISION_SIZE)
2737                         /* L3 decap. */
2738                         encap = NULL;
2739                 else if (encap->size >
2740                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2741                            decap->size >
2742                            MLX5_ENCAPSULATION_DECISION_SIZE)
2743                         /* 2 L2 actions: encap and decap. */
2744                         ;
2745                 else
2746                         return rte_flow_error_set(error,
2747                                 ENOTSUP,
2748                                 RTE_FLOW_ERROR_TYPE_ACTION,
2749                                 NULL, "unsupported too small "
2750                                 "raw decap and too small raw "
2751                                 "encap combination");
2752         }
2753         if (decap) {
2754                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2755                                                     error);
2756                 if (ret < 0)
2757                         return ret;
2758                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2759                 ++(*actions_n);
2760         }
2761         if (encap) {
2762                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2763                         return rte_flow_error_set(error, ENOTSUP,
2764                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2765                                                   NULL,
2766                                                   "small raw encap size");
2767                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2768                         return rte_flow_error_set(error, EINVAL,
2769                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2770                                                   NULL,
2771                                                   "more than one encap action");
2772                 if (!attr->transfer && priv->representor)
2773                         return rte_flow_error_set
2774                                         (error, ENOTSUP,
2775                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2776                                          "encap action for VF representor "
2777                                          "not supported on NIC table");
2778                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2779                 ++(*actions_n);
2780         }
2781         return 0;
2782 }
2783
2784 /**
2785  * Match encap_decap resource.
2786  *
2787  * @param list
2788  *   Pointer to the hash list.
2789  * @param entry
2790  *   Pointer to exist resource entry object.
2791  * @param key
2792  *   Key of the new entry.
2793  * @param ctx_cb
2794  *   Pointer to new encap_decap resource.
2795  *
2796  * @return
2797  *   0 on matching, none-zero otherwise.
2798  */
2799 int
2800 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2801                              struct mlx5_hlist_entry *entry,
2802                              uint64_t key __rte_unused, void *cb_ctx)
2803 {
2804         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2805         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2806         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2807
2808         cache_resource = container_of(entry,
2809                                       struct mlx5_flow_dv_encap_decap_resource,
2810                                       entry);
2811         if (resource->entry.key == cache_resource->entry.key &&
2812             resource->reformat_type == cache_resource->reformat_type &&
2813             resource->ft_type == cache_resource->ft_type &&
2814             resource->flags == cache_resource->flags &&
2815             resource->size == cache_resource->size &&
2816             !memcmp((const void *)resource->buf,
2817                     (const void *)cache_resource->buf,
2818                     resource->size))
2819                 return 0;
2820         return -1;
2821 }
2822
2823 /**
2824  * Allocate encap_decap resource.
2825  *
2826  * @param list
2827  *   Pointer to the hash list.
2828  * @param entry
2829  *   Pointer to exist resource entry object.
2830  * @param ctx_cb
2831  *   Pointer to new encap_decap resource.
2832  *
2833  * @return
2834  *   0 on matching, none-zero otherwise.
2835  */
2836 struct mlx5_hlist_entry *
2837 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2838                               uint64_t key __rte_unused,
2839                               void *cb_ctx)
2840 {
2841         struct mlx5_dev_ctx_shared *sh = list->ctx;
2842         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2843         struct mlx5dv_dr_domain *domain;
2844         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2845         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2846         uint32_t idx;
2847         int ret;
2848
2849         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2850                 domain = sh->fdb_domain;
2851         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2852                 domain = sh->rx_domain;
2853         else
2854                 domain = sh->tx_domain;
2855         /* Register new encap/decap resource. */
2856         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2857                                        &idx);
2858         if (!cache_resource) {
2859                 rte_flow_error_set(ctx->error, ENOMEM,
2860                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2861                                    "cannot allocate resource memory");
2862                 return NULL;
2863         }
2864         *cache_resource = *resource;
2865         cache_resource->idx = idx;
2866         ret = mlx5_flow_os_create_flow_action_packet_reformat
2867                                         (sh->ctx, domain, cache_resource,
2868                                          &cache_resource->action);
2869         if (ret) {
2870                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2871                 rte_flow_error_set(ctx->error, ENOMEM,
2872                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2873                                    NULL, "cannot create action");
2874                 return NULL;
2875         }
2876
2877         return &cache_resource->entry;
2878 }
2879
2880 /**
2881  * Find existing encap/decap resource or create and register a new one.
2882  *
2883  * @param[in, out] dev
2884  *   Pointer to rte_eth_dev structure.
2885  * @param[in, out] resource
2886  *   Pointer to encap/decap resource.
2887  * @parm[in, out] dev_flow
2888  *   Pointer to the dev_flow.
2889  * @param[out] error
2890  *   pointer to error structure.
2891  *
2892  * @return
2893  *   0 on success otherwise -errno and errno is set.
2894  */
2895 static int
2896 flow_dv_encap_decap_resource_register
2897                         (struct rte_eth_dev *dev,
2898                          struct mlx5_flow_dv_encap_decap_resource *resource,
2899                          struct mlx5_flow *dev_flow,
2900                          struct rte_flow_error *error)
2901 {
2902         struct mlx5_priv *priv = dev->data->dev_private;
2903         struct mlx5_dev_ctx_shared *sh = priv->sh;
2904         struct mlx5_hlist_entry *entry;
2905         union mlx5_flow_encap_decap_key encap_decap_key = {
2906                 {
2907                         .ft_type = resource->ft_type,
2908                         .refmt_type = resource->reformat_type,
2909                         .buf_size = resource->size,
2910                         .table_level = !!dev_flow->dv.group,
2911                         .cksum = 0,
2912                 }
2913         };
2914         struct mlx5_flow_cb_ctx ctx = {
2915                 .error = error,
2916                 .data = resource,
2917         };
2918
2919         resource->flags = dev_flow->dv.group ? 0 : 1;
2920         encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2921                                                 resource->size, 0);
2922         resource->entry.key = encap_decap_key.v64;
2923         entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
2924                                     &ctx);
2925         if (!entry)
2926                 return -rte_errno;
2927         resource = container_of(entry, typeof(*resource), entry);
2928         dev_flow->dv.encap_decap = resource;
2929         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2930         return 0;
2931 }
2932
2933 /**
2934  * Find existing table jump resource or create and register a new one.
2935  *
2936  * @param[in, out] dev
2937  *   Pointer to rte_eth_dev structure.
2938  * @param[in, out] tbl
2939  *   Pointer to flow table resource.
2940  * @parm[in, out] dev_flow
2941  *   Pointer to the dev_flow.
2942  * @param[out] error
2943  *   pointer to error structure.
2944  *
2945  * @return
2946  *   0 on success otherwise -errno and errno is set.
2947  */
2948 static int
2949 flow_dv_jump_tbl_resource_register
2950                         (struct rte_eth_dev *dev __rte_unused,
2951                          struct mlx5_flow_tbl_resource *tbl,
2952                          struct mlx5_flow *dev_flow,
2953                          struct rte_flow_error *error __rte_unused)
2954 {
2955         struct mlx5_flow_tbl_data_entry *tbl_data =
2956                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2957
2958         MLX5_ASSERT(tbl);
2959         MLX5_ASSERT(tbl_data->jump.action);
2960         dev_flow->handle->rix_jump = tbl_data->idx;
2961         dev_flow->dv.jump = &tbl_data->jump;
2962         return 0;
2963 }
2964
2965 int
2966 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
2967                          struct mlx5_cache_entry *entry, void *cb_ctx)
2968 {
2969         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2970         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2971         struct mlx5_flow_dv_port_id_action_resource *res =
2972                         container_of(entry, typeof(*res), entry);
2973
2974         return ref->port_id != res->port_id;
2975 }
2976
2977 struct mlx5_cache_entry *
2978 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
2979                           struct mlx5_cache_entry *entry __rte_unused,
2980                           void *cb_ctx)
2981 {
2982         struct mlx5_dev_ctx_shared *sh = list->ctx;
2983         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2984         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2985         struct mlx5_flow_dv_port_id_action_resource *cache;
2986         uint32_t idx;
2987         int ret;
2988
2989         /* Register new port id action resource. */
2990         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
2991         if (!cache) {
2992                 rte_flow_error_set(ctx->error, ENOMEM,
2993                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2994                                    "cannot allocate port_id action cache memory");
2995                 return NULL;
2996         }
2997         *cache = *ref;
2998         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
2999                                                         ref->port_id,
3000                                                         &cache->action);
3001         if (ret) {
3002                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3003                 rte_flow_error_set(ctx->error, ENOMEM,
3004                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3005                                    "cannot create action");
3006                 return NULL;
3007         }
3008         return &cache->entry;
3009 }
3010
3011 /**
3012  * Find existing table port ID resource or create and register a new one.
3013  *
3014  * @param[in, out] dev
3015  *   Pointer to rte_eth_dev structure.
3016  * @param[in, out] resource
3017  *   Pointer to port ID action resource.
3018  * @parm[in, out] dev_flow
3019  *   Pointer to the dev_flow.
3020  * @param[out] error
3021  *   pointer to error structure.
3022  *
3023  * @return
3024  *   0 on success otherwise -errno and errno is set.
3025  */
3026 static int
3027 flow_dv_port_id_action_resource_register
3028                         (struct rte_eth_dev *dev,
3029                          struct mlx5_flow_dv_port_id_action_resource *resource,
3030                          struct mlx5_flow *dev_flow,
3031                          struct rte_flow_error *error)
3032 {
3033         struct mlx5_priv *priv = dev->data->dev_private;
3034         struct mlx5_cache_entry *entry;
3035         struct mlx5_flow_dv_port_id_action_resource *cache;
3036         struct mlx5_flow_cb_ctx ctx = {
3037                 .error = error,
3038                 .data = resource,
3039         };
3040
3041         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3042         if (!entry)
3043                 return -rte_errno;
3044         cache = container_of(entry, typeof(*cache), entry);
3045         dev_flow->dv.port_id_action = cache;
3046         dev_flow->handle->rix_port_id_action = cache->idx;
3047         return 0;
3048 }
3049
3050 int
3051 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3052                          struct mlx5_cache_entry *entry, void *cb_ctx)
3053 {
3054         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3055         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3056         struct mlx5_flow_dv_push_vlan_action_resource *res =
3057                         container_of(entry, typeof(*res), entry);
3058
3059         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3060 }
3061
3062 struct mlx5_cache_entry *
3063 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3064                           struct mlx5_cache_entry *entry __rte_unused,
3065                           void *cb_ctx)
3066 {
3067         struct mlx5_dev_ctx_shared *sh = list->ctx;
3068         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3069         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3070         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3071         struct mlx5dv_dr_domain *domain;
3072         uint32_t idx;
3073         int ret;
3074
3075         /* Register new port id action resource. */
3076         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3077         if (!cache) {
3078                 rte_flow_error_set(ctx->error, ENOMEM,
3079                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3080                                    "cannot allocate push_vlan action cache memory");
3081                 return NULL;
3082         }
3083         *cache = *ref;
3084         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3085                 domain = sh->fdb_domain;
3086         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3087                 domain = sh->rx_domain;
3088         else
3089                 domain = sh->tx_domain;
3090         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3091                                                         &cache->action);
3092         if (ret) {
3093                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3094                 rte_flow_error_set(ctx->error, ENOMEM,
3095                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3096                                    "cannot create push vlan action");
3097                 return NULL;
3098         }
3099         return &cache->entry;
3100 }
3101
3102 /**
3103  * Find existing push vlan resource or create and register a new one.
3104  *
3105  * @param [in, out] dev
3106  *   Pointer to rte_eth_dev structure.
3107  * @param[in, out] resource
3108  *   Pointer to port ID action resource.
3109  * @parm[in, out] dev_flow
3110  *   Pointer to the dev_flow.
3111  * @param[out] error
3112  *   pointer to error structure.
3113  *
3114  * @return
3115  *   0 on success otherwise -errno and errno is set.
3116  */
3117 static int
3118 flow_dv_push_vlan_action_resource_register
3119                        (struct rte_eth_dev *dev,
3120                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3121                         struct mlx5_flow *dev_flow,
3122                         struct rte_flow_error *error)
3123 {
3124         struct mlx5_priv *priv = dev->data->dev_private;
3125         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3126         struct mlx5_cache_entry *entry;
3127         struct mlx5_flow_cb_ctx ctx = {
3128                 .error = error,
3129                 .data = resource,
3130         };
3131
3132         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3133         if (!entry)
3134                 return -rte_errno;
3135         cache = container_of(entry, typeof(*cache), entry);
3136
3137         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3138         dev_flow->dv.push_vlan_res = cache;
3139         return 0;
3140 }
3141
3142 /**
3143  * Get the size of specific rte_flow_item_type hdr size
3144  *
3145  * @param[in] item_type
3146  *   Tested rte_flow_item_type.
3147  *
3148  * @return
3149  *   sizeof struct item_type, 0 if void or irrelevant.
3150  */
3151 static size_t
3152 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3153 {
3154         size_t retval;
3155
3156         switch (item_type) {
3157         case RTE_FLOW_ITEM_TYPE_ETH:
3158                 retval = sizeof(struct rte_ether_hdr);
3159                 break;
3160         case RTE_FLOW_ITEM_TYPE_VLAN:
3161                 retval = sizeof(struct rte_vlan_hdr);
3162                 break;
3163         case RTE_FLOW_ITEM_TYPE_IPV4:
3164                 retval = sizeof(struct rte_ipv4_hdr);
3165                 break;
3166         case RTE_FLOW_ITEM_TYPE_IPV6:
3167                 retval = sizeof(struct rte_ipv6_hdr);
3168                 break;
3169         case RTE_FLOW_ITEM_TYPE_UDP:
3170                 retval = sizeof(struct rte_udp_hdr);
3171                 break;
3172         case RTE_FLOW_ITEM_TYPE_TCP:
3173                 retval = sizeof(struct rte_tcp_hdr);
3174                 break;
3175         case RTE_FLOW_ITEM_TYPE_VXLAN:
3176         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3177                 retval = sizeof(struct rte_vxlan_hdr);
3178                 break;
3179         case RTE_FLOW_ITEM_TYPE_GRE:
3180         case RTE_FLOW_ITEM_TYPE_NVGRE:
3181                 retval = sizeof(struct rte_gre_hdr);
3182                 break;
3183         case RTE_FLOW_ITEM_TYPE_MPLS:
3184                 retval = sizeof(struct rte_mpls_hdr);
3185                 break;
3186         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3187         default:
3188                 retval = 0;
3189                 break;
3190         }
3191         return retval;
3192 }
3193
3194 #define MLX5_ENCAP_IPV4_VERSION         0x40
3195 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3196 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3197 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3198 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3199 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3200 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3201
3202 /**
3203  * Convert the encap action data from list of rte_flow_item to raw buffer
3204  *
3205  * @param[in] items
3206  *   Pointer to rte_flow_item objects list.
3207  * @param[out] buf
3208  *   Pointer to the output buffer.
3209  * @param[out] size
3210  *   Pointer to the output buffer size.
3211  * @param[out] error
3212  *   Pointer to the error structure.
3213  *
3214  * @return
3215  *   0 on success, a negative errno value otherwise and rte_errno is set.
3216  */
3217 static int
3218 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3219                            size_t *size, struct rte_flow_error *error)
3220 {
3221         struct rte_ether_hdr *eth = NULL;
3222         struct rte_vlan_hdr *vlan = NULL;
3223         struct rte_ipv4_hdr *ipv4 = NULL;
3224         struct rte_ipv6_hdr *ipv6 = NULL;
3225         struct rte_udp_hdr *udp = NULL;
3226         struct rte_vxlan_hdr *vxlan = NULL;
3227         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3228         struct rte_gre_hdr *gre = NULL;
3229         size_t len;
3230         size_t temp_size = 0;
3231
3232         if (!items)
3233                 return rte_flow_error_set(error, EINVAL,
3234                                           RTE_FLOW_ERROR_TYPE_ACTION,
3235                                           NULL, "invalid empty data");
3236         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3237                 len = flow_dv_get_item_hdr_len(items->type);
3238                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3239                         return rte_flow_error_set(error, EINVAL,
3240                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3241                                                   (void *)items->type,
3242                                                   "items total size is too big"
3243                                                   " for encap action");
3244                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3245                 switch (items->type) {
3246                 case RTE_FLOW_ITEM_TYPE_ETH:
3247                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3248                         break;
3249                 case RTE_FLOW_ITEM_TYPE_VLAN:
3250                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3251                         if (!eth)
3252                                 return rte_flow_error_set(error, EINVAL,
3253                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3254                                                 (void *)items->type,
3255                                                 "eth header not found");
3256                         if (!eth->ether_type)
3257                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3258                         break;
3259                 case RTE_FLOW_ITEM_TYPE_IPV4:
3260                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3261                         if (!vlan && !eth)
3262                                 return rte_flow_error_set(error, EINVAL,
3263                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3264                                                 (void *)items->type,
3265                                                 "neither eth nor vlan"
3266                                                 " header found");
3267                         if (vlan && !vlan->eth_proto)
3268                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3269                         else if (eth && !eth->ether_type)
3270                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3271                         if (!ipv4->version_ihl)
3272                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3273                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3274                         if (!ipv4->time_to_live)
3275                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3276                         break;
3277                 case RTE_FLOW_ITEM_TYPE_IPV6:
3278                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3279                         if (!vlan && !eth)
3280                                 return rte_flow_error_set(error, EINVAL,
3281                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3282                                                 (void *)items->type,
3283                                                 "neither eth nor vlan"
3284                                                 " header found");
3285                         if (vlan && !vlan->eth_proto)
3286                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3287                         else if (eth && !eth->ether_type)
3288                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3289                         if (!ipv6->vtc_flow)
3290                                 ipv6->vtc_flow =
3291                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3292                         if (!ipv6->hop_limits)
3293                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3294                         break;
3295                 case RTE_FLOW_ITEM_TYPE_UDP:
3296                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3297                         if (!ipv4 && !ipv6)
3298                                 return rte_flow_error_set(error, EINVAL,
3299                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3300                                                 (void *)items->type,
3301                                                 "ip header not found");
3302                         if (ipv4 && !ipv4->next_proto_id)
3303                                 ipv4->next_proto_id = IPPROTO_UDP;
3304                         else if (ipv6 && !ipv6->proto)
3305                                 ipv6->proto = IPPROTO_UDP;
3306                         break;
3307                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3308                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3309                         if (!udp)
3310                                 return rte_flow_error_set(error, EINVAL,
3311                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3312                                                 (void *)items->type,
3313                                                 "udp header not found");
3314                         if (!udp->dst_port)
3315                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3316                         if (!vxlan->vx_flags)
3317                                 vxlan->vx_flags =
3318                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3319                         break;
3320                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3321                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3322                         if (!udp)
3323                                 return rte_flow_error_set(error, EINVAL,
3324                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3325                                                 (void *)items->type,
3326                                                 "udp header not found");
3327                         if (!vxlan_gpe->proto)
3328                                 return rte_flow_error_set(error, EINVAL,
3329                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3330                                                 (void *)items->type,
3331                                                 "next protocol not found");
3332                         if (!udp->dst_port)
3333                                 udp->dst_port =
3334                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3335                         if (!vxlan_gpe->vx_flags)
3336                                 vxlan_gpe->vx_flags =
3337                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3338                         break;
3339                 case RTE_FLOW_ITEM_TYPE_GRE:
3340                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3341                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3342                         if (!gre->proto)
3343                                 return rte_flow_error_set(error, EINVAL,
3344                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3345                                                 (void *)items->type,
3346                                                 "next protocol not found");
3347                         if (!ipv4 && !ipv6)
3348                                 return rte_flow_error_set(error, EINVAL,
3349                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3350                                                 (void *)items->type,
3351                                                 "ip header not found");
3352                         if (ipv4 && !ipv4->next_proto_id)
3353                                 ipv4->next_proto_id = IPPROTO_GRE;
3354                         else if (ipv6 && !ipv6->proto)
3355                                 ipv6->proto = IPPROTO_GRE;
3356                         break;
3357                 case RTE_FLOW_ITEM_TYPE_VOID:
3358                         break;
3359                 default:
3360                         return rte_flow_error_set(error, EINVAL,
3361                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3362                                                   (void *)items->type,
3363                                                   "unsupported item type");
3364                         break;
3365                 }
3366                 temp_size += len;
3367         }
3368         *size = temp_size;
3369         return 0;
3370 }
3371
3372 static int
3373 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3374 {
3375         struct rte_ether_hdr *eth = NULL;
3376         struct rte_vlan_hdr *vlan = NULL;
3377         struct rte_ipv6_hdr *ipv6 = NULL;
3378         struct rte_udp_hdr *udp = NULL;
3379         char *next_hdr;
3380         uint16_t proto;
3381
3382         eth = (struct rte_ether_hdr *)data;
3383         next_hdr = (char *)(eth + 1);
3384         proto = RTE_BE16(eth->ether_type);
3385
3386         /* VLAN skipping */
3387         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3388                 vlan = (struct rte_vlan_hdr *)next_hdr;
3389                 proto = RTE_BE16(vlan->eth_proto);
3390                 next_hdr += sizeof(struct rte_vlan_hdr);
3391         }
3392
3393         /* HW calculates IPv4 csum. no need to proceed */
3394         if (proto == RTE_ETHER_TYPE_IPV4)
3395                 return 0;
3396
3397         /* non IPv4/IPv6 header. not supported */
3398         if (proto != RTE_ETHER_TYPE_IPV6) {
3399                 return rte_flow_error_set(error, ENOTSUP,
3400                                           RTE_FLOW_ERROR_TYPE_ACTION,
3401                                           NULL, "Cannot offload non IPv4/IPv6");
3402         }
3403
3404         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3405
3406         /* ignore non UDP */
3407         if (ipv6->proto != IPPROTO_UDP)
3408                 return 0;
3409
3410         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3411         udp->dgram_cksum = 0;
3412
3413         return 0;
3414 }
3415
3416 /**
3417  * Convert L2 encap action to DV specification.
3418  *
3419  * @param[in] dev
3420  *   Pointer to rte_eth_dev structure.
3421  * @param[in] action
3422  *   Pointer to action structure.
3423  * @param[in, out] dev_flow
3424  *   Pointer to the mlx5_flow.
3425  * @param[in] transfer
3426  *   Mark if the flow is E-Switch flow.
3427  * @param[out] error
3428  *   Pointer to the error structure.
3429  *
3430  * @return
3431  *   0 on success, a negative errno value otherwise and rte_errno is set.
3432  */
3433 static int
3434 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3435                                const struct rte_flow_action *action,
3436                                struct mlx5_flow *dev_flow,
3437                                uint8_t transfer,
3438                                struct rte_flow_error *error)
3439 {
3440         const struct rte_flow_item *encap_data;
3441         const struct rte_flow_action_raw_encap *raw_encap_data;
3442         struct mlx5_flow_dv_encap_decap_resource res = {
3443                 .reformat_type =
3444                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3445                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3446                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3447         };
3448
3449         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3450                 raw_encap_data =
3451                         (const struct rte_flow_action_raw_encap *)action->conf;
3452                 res.size = raw_encap_data->size;
3453                 memcpy(res.buf, raw_encap_data->data, res.size);
3454         } else {
3455                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3456                         encap_data =
3457                                 ((const struct rte_flow_action_vxlan_encap *)
3458                                                 action->conf)->definition;
3459                 else
3460                         encap_data =
3461                                 ((const struct rte_flow_action_nvgre_encap *)
3462                                                 action->conf)->definition;
3463                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3464                                                &res.size, error))
3465                         return -rte_errno;
3466         }
3467         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3468                 return -rte_errno;
3469         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3470                 return rte_flow_error_set(error, EINVAL,
3471                                           RTE_FLOW_ERROR_TYPE_ACTION,
3472                                           NULL, "can't create L2 encap action");
3473         return 0;
3474 }
3475
3476 /**
3477  * Convert L2 decap action to DV specification.
3478  *
3479  * @param[in] dev
3480  *   Pointer to rte_eth_dev structure.
3481  * @param[in, out] dev_flow
3482  *   Pointer to the mlx5_flow.
3483  * @param[in] transfer
3484  *   Mark if the flow is E-Switch flow.
3485  * @param[out] error
3486  *   Pointer to the error structure.
3487  *
3488  * @return
3489  *   0 on success, a negative errno value otherwise and rte_errno is set.
3490  */
3491 static int
3492 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3493                                struct mlx5_flow *dev_flow,
3494                                uint8_t transfer,
3495                                struct rte_flow_error *error)
3496 {
3497         struct mlx5_flow_dv_encap_decap_resource res = {
3498                 .size = 0,
3499                 .reformat_type =
3500                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3501                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3502                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3503         };
3504
3505         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3506                 return rte_flow_error_set(error, EINVAL,
3507                                           RTE_FLOW_ERROR_TYPE_ACTION,
3508                                           NULL, "can't create L2 decap action");
3509         return 0;
3510 }
3511
3512 /**
3513  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3514  *
3515  * @param[in] dev
3516  *   Pointer to rte_eth_dev structure.
3517  * @param[in] action
3518  *   Pointer to action structure.
3519  * @param[in, out] dev_flow
3520  *   Pointer to the mlx5_flow.
3521  * @param[in] attr
3522  *   Pointer to the flow attributes.
3523  * @param[out] error
3524  *   Pointer to the error structure.
3525  *
3526  * @return
3527  *   0 on success, a negative errno value otherwise and rte_errno is set.
3528  */
3529 static int
3530 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3531                                 const struct rte_flow_action *action,
3532                                 struct mlx5_flow *dev_flow,
3533                                 const struct rte_flow_attr *attr,
3534                                 struct rte_flow_error *error)
3535 {
3536         const struct rte_flow_action_raw_encap *encap_data;
3537         struct mlx5_flow_dv_encap_decap_resource res;
3538
3539         memset(&res, 0, sizeof(res));
3540         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3541         res.size = encap_data->size;
3542         memcpy(res.buf, encap_data->data, res.size);
3543         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3544                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3545                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3546         if (attr->transfer)
3547                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3548         else
3549                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3550                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3551         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3552                 return rte_flow_error_set(error, EINVAL,
3553                                           RTE_FLOW_ERROR_TYPE_ACTION,
3554                                           NULL, "can't create encap action");
3555         return 0;
3556 }
3557
3558 /**
3559  * Create action push VLAN.
3560  *
3561  * @param[in] dev
3562  *   Pointer to rte_eth_dev structure.
3563  * @param[in] attr
3564  *   Pointer to the flow attributes.
3565  * @param[in] vlan
3566  *   Pointer to the vlan to push to the Ethernet header.
3567  * @param[in, out] dev_flow
3568  *   Pointer to the mlx5_flow.
3569  * @param[out] error
3570  *   Pointer to the error structure.
3571  *
3572  * @return
3573  *   0 on success, a negative errno value otherwise and rte_errno is set.
3574  */
3575 static int
3576 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3577                                 const struct rte_flow_attr *attr,
3578                                 const struct rte_vlan_hdr *vlan,
3579                                 struct mlx5_flow *dev_flow,
3580                                 struct rte_flow_error *error)
3581 {
3582         struct mlx5_flow_dv_push_vlan_action_resource res;
3583
3584         memset(&res, 0, sizeof(res));
3585         res.vlan_tag =
3586                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3587                                  vlan->vlan_tci);
3588         if (attr->transfer)
3589                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3590         else
3591                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3592                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3593         return flow_dv_push_vlan_action_resource_register
3594                                             (dev, &res, dev_flow, error);
3595 }
3596
3597 static int fdb_mirror;
3598
3599 /**
3600  * Validate the modify-header actions.
3601  *
3602  * @param[in] action_flags
3603  *   Holds the actions detected until now.
3604  * @param[in] action
3605  *   Pointer to the modify action.
3606  * @param[out] error
3607  *   Pointer to error structure.
3608  *
3609  * @return
3610  *   0 on success, a negative errno value otherwise and rte_errno is set.
3611  */
3612 static int
3613 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3614                                    const struct rte_flow_action *action,
3615                                    struct rte_flow_error *error)
3616 {
3617         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3618                 return rte_flow_error_set(error, EINVAL,
3619                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3620                                           NULL, "action configuration not set");
3621         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3622                 return rte_flow_error_set(error, EINVAL,
3623                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3624                                           "can't have encap action before"
3625                                           " modify action");
3626         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3627                 return rte_flow_error_set(error, EINVAL,
3628                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3629                                           "can't support sample action before"
3630                                           " modify action for E-Switch"
3631                                           " mirroring");
3632         return 0;
3633 }
3634
3635 /**
3636  * Validate the modify-header MAC address actions.
3637  *
3638  * @param[in] action_flags
3639  *   Holds the actions detected until now.
3640  * @param[in] action
3641  *   Pointer to the modify action.
3642  * @param[in] item_flags
3643  *   Holds the items detected.
3644  * @param[out] error
3645  *   Pointer to error structure.
3646  *
3647  * @return
3648  *   0 on success, a negative errno value otherwise and rte_errno is set.
3649  */
3650 static int
3651 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3652                                    const struct rte_flow_action *action,
3653                                    const uint64_t item_flags,
3654                                    struct rte_flow_error *error)
3655 {
3656         int ret = 0;
3657
3658         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3659         if (!ret) {
3660                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3661                         return rte_flow_error_set(error, EINVAL,
3662                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3663                                                   NULL,
3664                                                   "no L2 item in pattern");
3665         }
3666         return ret;
3667 }
3668
3669 /**
3670  * Validate the modify-header IPv4 address actions.
3671  *
3672  * @param[in] action_flags
3673  *   Holds the actions detected until now.
3674  * @param[in] action
3675  *   Pointer to the modify action.
3676  * @param[in] item_flags
3677  *   Holds the items detected.
3678  * @param[out] error
3679  *   Pointer to error structure.
3680  *
3681  * @return
3682  *   0 on success, a negative errno value otherwise and rte_errno is set.
3683  */
3684 static int
3685 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3686                                     const struct rte_flow_action *action,
3687                                     const uint64_t item_flags,
3688                                     struct rte_flow_error *error)
3689 {
3690         int ret = 0;
3691         uint64_t layer;
3692
3693         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3694         if (!ret) {
3695                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3696                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3697                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3698                 if (!(item_flags & layer))
3699                         return rte_flow_error_set(error, EINVAL,
3700                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3701                                                   NULL,
3702                                                   "no ipv4 item in pattern");
3703         }
3704         return ret;
3705 }
3706
3707 /**
3708  * Validate the modify-header IPv6 address actions.
3709  *
3710  * @param[in] action_flags
3711  *   Holds the actions detected until now.
3712  * @param[in] action
3713  *   Pointer to the modify action.
3714  * @param[in] item_flags
3715  *   Holds the items detected.
3716  * @param[out] error
3717  *   Pointer to error structure.
3718  *
3719  * @return
3720  *   0 on success, a negative errno value otherwise and rte_errno is set.
3721  */
3722 static int
3723 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3724                                     const struct rte_flow_action *action,
3725                                     const uint64_t item_flags,
3726                                     struct rte_flow_error *error)
3727 {
3728         int ret = 0;
3729         uint64_t layer;
3730
3731         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3732         if (!ret) {
3733                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3734                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3735                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3736                 if (!(item_flags & layer))
3737                         return rte_flow_error_set(error, EINVAL,
3738                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3739                                                   NULL,
3740                                                   "no ipv6 item in pattern");
3741         }
3742         return ret;
3743 }
3744
3745 /**
3746  * Validate the modify-header TP actions.
3747  *
3748  * @param[in] action_flags
3749  *   Holds the actions detected until now.
3750  * @param[in] action
3751  *   Pointer to the modify action.
3752  * @param[in] item_flags
3753  *   Holds the items detected.
3754  * @param[out] error
3755  *   Pointer to error structure.
3756  *
3757  * @return
3758  *   0 on success, a negative errno value otherwise and rte_errno is set.
3759  */
3760 static int
3761 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3762                                   const struct rte_flow_action *action,
3763                                   const uint64_t item_flags,
3764                                   struct rte_flow_error *error)
3765 {
3766         int ret = 0;
3767         uint64_t layer;
3768
3769         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3770         if (!ret) {
3771                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3772                                  MLX5_FLOW_LAYER_INNER_L4 :
3773                                  MLX5_FLOW_LAYER_OUTER_L4;
3774                 if (!(item_flags & layer))
3775                         return rte_flow_error_set(error, EINVAL,
3776                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3777                                                   NULL, "no transport layer "
3778                                                   "in pattern");
3779         }
3780         return ret;
3781 }
3782
3783 /**
3784  * Validate the modify-header actions of increment/decrement
3785  * TCP Sequence-number.
3786  *
3787  * @param[in] action_flags
3788  *   Holds the actions detected until now.
3789  * @param[in] action
3790  *   Pointer to the modify action.
3791  * @param[in] item_flags
3792  *   Holds the items detected.
3793  * @param[out] error
3794  *   Pointer to error structure.
3795  *
3796  * @return
3797  *   0 on success, a negative errno value otherwise and rte_errno is set.
3798  */
3799 static int
3800 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3801                                        const struct rte_flow_action *action,
3802                                        const uint64_t item_flags,
3803                                        struct rte_flow_error *error)
3804 {
3805         int ret = 0;
3806         uint64_t layer;
3807
3808         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3809         if (!ret) {
3810                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3811                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3812                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3813                 if (!(item_flags & layer))
3814                         return rte_flow_error_set(error, EINVAL,
3815                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3816                                                   NULL, "no TCP item in"
3817                                                   " pattern");
3818                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3819                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3820                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3821                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3822                         return rte_flow_error_set(error, EINVAL,
3823                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3824                                                   NULL,
3825                                                   "cannot decrease and increase"
3826                                                   " TCP sequence number"
3827                                                   " at the same time");
3828         }
3829         return ret;
3830 }
3831
3832 /**
3833  * Validate the modify-header actions of increment/decrement
3834  * TCP Acknowledgment number.
3835  *
3836  * @param[in] action_flags
3837  *   Holds the actions detected until now.
3838  * @param[in] action
3839  *   Pointer to the modify action.
3840  * @param[in] item_flags
3841  *   Holds the items detected.
3842  * @param[out] error
3843  *   Pointer to error structure.
3844  *
3845  * @return
3846  *   0 on success, a negative errno value otherwise and rte_errno is set.
3847  */
3848 static int
3849 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3850                                        const struct rte_flow_action *action,
3851                                        const uint64_t item_flags,
3852                                        struct rte_flow_error *error)
3853 {
3854         int ret = 0;
3855         uint64_t layer;
3856
3857         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3858         if (!ret) {
3859                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3860                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3861                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3862                 if (!(item_flags & layer))
3863                         return rte_flow_error_set(error, EINVAL,
3864                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3865                                                   NULL, "no TCP item in"
3866                                                   " pattern");
3867                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3868                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3869                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3870                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3871                         return rte_flow_error_set(error, EINVAL,
3872                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3873                                                   NULL,
3874                                                   "cannot decrease and increase"
3875                                                   " TCP acknowledgment number"
3876                                                   " at the same time");
3877         }
3878         return ret;
3879 }
3880
3881 /**
3882  * Validate the modify-header TTL actions.
3883  *
3884  * @param[in] action_flags
3885  *   Holds the actions detected until now.
3886  * @param[in] action
3887  *   Pointer to the modify action.
3888  * @param[in] item_flags
3889  *   Holds the items detected.
3890  * @param[out] error
3891  *   Pointer to error structure.
3892  *
3893  * @return
3894  *   0 on success, a negative errno value otherwise and rte_errno is set.
3895  */
3896 static int
3897 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3898                                    const struct rte_flow_action *action,
3899                                    const uint64_t item_flags,
3900                                    struct rte_flow_error *error)
3901 {
3902         int ret = 0;
3903         uint64_t layer;
3904
3905         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3906         if (!ret) {
3907                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3908                                  MLX5_FLOW_LAYER_INNER_L3 :
3909                                  MLX5_FLOW_LAYER_OUTER_L3;
3910                 if (!(item_flags & layer))
3911                         return rte_flow_error_set(error, EINVAL,
3912                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3913                                                   NULL,
3914                                                   "no IP protocol in pattern");
3915         }
3916         return ret;
3917 }
3918
3919 /**
3920  * Validate jump action.
3921  *
3922  * @param[in] action
3923  *   Pointer to the jump action.
3924  * @param[in] action_flags
3925  *   Holds the actions detected until now.
3926  * @param[in] attributes
3927  *   Pointer to flow attributes
3928  * @param[in] external
3929  *   Action belongs to flow rule created by request external to PMD.
3930  * @param[out] error
3931  *   Pointer to error structure.
3932  *
3933  * @return
3934  *   0 on success, a negative errno value otherwise and rte_errno is set.
3935  */
3936 static int
3937 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3938                              const struct mlx5_flow_tunnel *tunnel,
3939                              const struct rte_flow_action *action,
3940                              uint64_t action_flags,
3941                              const struct rte_flow_attr *attributes,
3942                              bool external, struct rte_flow_error *error)
3943 {
3944         uint32_t target_group, table;
3945         int ret = 0;
3946         struct flow_grp_info grp_info = {
3947                 .external = !!external,
3948                 .transfer = !!attributes->transfer,
3949                 .fdb_def_rule = 1,
3950                 .std_tbl_fix = 0
3951         };
3952         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3953                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3954                 return rte_flow_error_set(error, EINVAL,
3955                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3956                                           "can't have 2 fate actions in"
3957                                           " same flow");
3958         if (action_flags & MLX5_FLOW_ACTION_METER)
3959                 return rte_flow_error_set(error, ENOTSUP,
3960                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3961                                           "jump with meter not support");
3962         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3963                 return rte_flow_error_set(error, EINVAL,
3964                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3965                                           "E-Switch mirroring can't support"
3966                                           " Sample action and jump action in"
3967                                           " same flow now");
3968         if (!action->conf)
3969                 return rte_flow_error_set(error, EINVAL,
3970                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3971                                           NULL, "action configuration not set");
3972         target_group =
3973                 ((const struct rte_flow_action_jump *)action->conf)->group;
3974         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3975                                        grp_info, error);
3976         if (ret)
3977                 return ret;
3978         if (attributes->group == target_group &&
3979             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3980                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3981                 return rte_flow_error_set(error, EINVAL,
3982                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3983                                           "target group must be other than"
3984                                           " the current flow group");
3985         return 0;
3986 }
3987
3988 /*
3989  * Validate the port_id action.
3990  *
3991  * @param[in] dev
3992  *   Pointer to rte_eth_dev structure.
3993  * @param[in] action_flags
3994  *   Bit-fields that holds the actions detected until now.
3995  * @param[in] action
3996  *   Port_id RTE action structure.
3997  * @param[in] attr
3998  *   Attributes of flow that includes this action.
3999  * @param[out] error
4000  *   Pointer to error structure.
4001  *
4002  * @return
4003  *   0 on success, a negative errno value otherwise and rte_errno is set.
4004  */
4005 static int
4006 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4007                                 uint64_t action_flags,
4008                                 const struct rte_flow_action *action,
4009                                 const struct rte_flow_attr *attr,
4010                                 struct rte_flow_error *error)
4011 {
4012         const struct rte_flow_action_port_id *port_id;
4013         struct mlx5_priv *act_priv;
4014         struct mlx5_priv *dev_priv;
4015         uint16_t port;
4016
4017         if (!attr->transfer)
4018                 return rte_flow_error_set(error, ENOTSUP,
4019                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4020                                           NULL,
4021                                           "port id action is valid in transfer"
4022                                           " mode only");
4023         if (!action || !action->conf)
4024                 return rte_flow_error_set(error, ENOTSUP,
4025                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4026                                           NULL,
4027                                           "port id action parameters must be"
4028                                           " specified");
4029         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4030                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4031                 return rte_flow_error_set(error, EINVAL,
4032                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4033                                           "can have only one fate actions in"
4034                                           " a flow");
4035         dev_priv = mlx5_dev_to_eswitch_info(dev);
4036         if (!dev_priv)
4037                 return rte_flow_error_set(error, rte_errno,
4038                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4039                                           NULL,
4040                                           "failed to obtain E-Switch info");
4041         port_id = action->conf;
4042         port = port_id->original ? dev->data->port_id : port_id->id;
4043         act_priv = mlx5_port_to_eswitch_info(port, false);
4044         if (!act_priv)
4045                 return rte_flow_error_set
4046                                 (error, rte_errno,
4047                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4048                                  "failed to obtain E-Switch port id for port");
4049         if (act_priv->domain_id != dev_priv->domain_id)
4050                 return rte_flow_error_set
4051                                 (error, EINVAL,
4052                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4053                                  "port does not belong to"
4054                                  " E-Switch being configured");
4055         return 0;
4056 }
4057
4058 /**
4059  * Get the maximum number of modify header actions.
4060  *
4061  * @param dev
4062  *   Pointer to rte_eth_dev structure.
4063  * @param flags
4064  *   Flags bits to check if root level.
4065  *
4066  * @return
4067  *   Max number of modify header actions device can support.
4068  */
4069 static inline unsigned int
4070 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4071                               uint64_t flags)
4072 {
4073         /*
4074          * There's no way to directly query the max capacity from FW.
4075          * The maximal value on root table should be assumed to be supported.
4076          */
4077         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4078                 return MLX5_MAX_MODIFY_NUM;
4079         else
4080                 return MLX5_ROOT_TBL_MODIFY_NUM;
4081 }
4082
4083 /**
4084  * Validate the meter action.
4085  *
4086  * @param[in] dev
4087  *   Pointer to rte_eth_dev structure.
4088  * @param[in] action_flags
4089  *   Bit-fields that holds the actions detected until now.
4090  * @param[in] action
4091  *   Pointer to the meter action.
4092  * @param[in] attr
4093  *   Attributes of flow that includes this action.
4094  * @param[out] error
4095  *   Pointer to error structure.
4096  *
4097  * @return
4098  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4099  */
4100 static int
4101 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4102                                 uint64_t action_flags,
4103                                 const struct rte_flow_action *action,
4104                                 const struct rte_flow_attr *attr,
4105                                 struct rte_flow_error *error)
4106 {
4107         struct mlx5_priv *priv = dev->data->dev_private;
4108         const struct rte_flow_action_meter *am = action->conf;
4109         struct mlx5_flow_meter *fm;
4110
4111         if (!am)
4112                 return rte_flow_error_set(error, EINVAL,
4113                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4114                                           "meter action conf is NULL");
4115
4116         if (action_flags & MLX5_FLOW_ACTION_METER)
4117                 return rte_flow_error_set(error, ENOTSUP,
4118                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4119                                           "meter chaining not support");
4120         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4121                 return rte_flow_error_set(error, ENOTSUP,
4122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4123                                           "meter with jump not support");
4124         if (!priv->mtr_en)
4125                 return rte_flow_error_set(error, ENOTSUP,
4126                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4127                                           NULL,
4128                                           "meter action not supported");
4129         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4130         if (!fm)
4131                 return rte_flow_error_set(error, EINVAL,
4132                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4133                                           "Meter not found");
4134         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4135               (!fm->ingress && !attr->ingress && attr->egress) ||
4136               (!fm->egress && !attr->egress && attr->ingress))))
4137                 return rte_flow_error_set(error, EINVAL,
4138                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4139                                           "Flow attributes are either invalid "
4140                                           "or have a conflict with current "
4141                                           "meter attributes");
4142         return 0;
4143 }
4144
4145 /**
4146  * Validate the age action.
4147  *
4148  * @param[in] action_flags
4149  *   Holds the actions detected until now.
4150  * @param[in] action
4151  *   Pointer to the age action.
4152  * @param[in] dev
4153  *   Pointer to the Ethernet device structure.
4154  * @param[out] error
4155  *   Pointer to error structure.
4156  *
4157  * @return
4158  *   0 on success, a negative errno value otherwise and rte_errno is set.
4159  */
4160 static int
4161 flow_dv_validate_action_age(uint64_t action_flags,
4162                             const struct rte_flow_action *action,
4163                             struct rte_eth_dev *dev,
4164                             struct rte_flow_error *error)
4165 {
4166         struct mlx5_priv *priv = dev->data->dev_private;
4167         const struct rte_flow_action_age *age = action->conf;
4168
4169         if (!priv->config.devx || priv->sh->cmng.counter_fallback)
4170                 return rte_flow_error_set(error, ENOTSUP,
4171                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4172                                           NULL,
4173                                           "age action not supported");
4174         if (!(action->conf))
4175                 return rte_flow_error_set(error, EINVAL,
4176                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4177                                           "configuration cannot be null");
4178         if (!(age->timeout))
4179                 return rte_flow_error_set(error, EINVAL,
4180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4181                                           "invalid timeout value 0");
4182         if (action_flags & MLX5_FLOW_ACTION_AGE)
4183                 return rte_flow_error_set(error, EINVAL,
4184                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4185                                           "duplicate age actions set");
4186         return 0;
4187 }
4188
4189 /**
4190  * Validate the modify-header IPv4 DSCP actions.
4191  *
4192  * @param[in] action_flags
4193  *   Holds the actions detected until now.
4194  * @param[in] action
4195  *   Pointer to the modify action.
4196  * @param[in] item_flags
4197  *   Holds the items detected.
4198  * @param[out] error
4199  *   Pointer to error structure.
4200  *
4201  * @return
4202  *   0 on success, a negative errno value otherwise and rte_errno is set.
4203  */
4204 static int
4205 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4206                                          const struct rte_flow_action *action,
4207                                          const uint64_t item_flags,
4208                                          struct rte_flow_error *error)
4209 {
4210         int ret = 0;
4211
4212         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4213         if (!ret) {
4214                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4215                         return rte_flow_error_set(error, EINVAL,
4216                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4217                                                   NULL,
4218                                                   "no ipv4 item in pattern");
4219         }
4220         return ret;
4221 }
4222
4223 /**
4224  * Validate the modify-header IPv6 DSCP actions.
4225  *
4226  * @param[in] action_flags
4227  *   Holds the actions detected until now.
4228  * @param[in] action
4229  *   Pointer to the modify action.
4230  * @param[in] item_flags
4231  *   Holds the items detected.
4232  * @param[out] error
4233  *   Pointer to error structure.
4234  *
4235  * @return
4236  *   0 on success, a negative errno value otherwise and rte_errno is set.
4237  */
4238 static int
4239 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4240                                          const struct rte_flow_action *action,
4241                                          const uint64_t item_flags,
4242                                          struct rte_flow_error *error)
4243 {
4244         int ret = 0;
4245
4246         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4247         if (!ret) {
4248                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4249                         return rte_flow_error_set(error, EINVAL,
4250                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4251                                                   NULL,
4252                                                   "no ipv6 item in pattern");
4253         }
4254         return ret;
4255 }
4256
4257 /**
4258  * Match modify-header resource.
4259  *
4260  * @param list
4261  *   Pointer to the hash list.
4262  * @param entry
4263  *   Pointer to exist resource entry object.
4264  * @param key
4265  *   Key of the new entry.
4266  * @param ctx
4267  *   Pointer to new modify-header resource.
4268  *
4269  * @return
4270  *   0 on matching, non-zero otherwise.
4271  */
4272 int
4273 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4274                         struct mlx5_hlist_entry *entry,
4275                         uint64_t key __rte_unused, void *cb_ctx)
4276 {
4277         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4278         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4279         struct mlx5_flow_dv_modify_hdr_resource *resource =
4280                         container_of(entry, typeof(*resource), entry);
4281         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4282
4283         key_len += ref->actions_num * sizeof(ref->actions[0]);
4284         return ref->actions_num != resource->actions_num ||
4285                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4286 }
4287
4288 struct mlx5_hlist_entry *
4289 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4290                          void *cb_ctx)
4291 {
4292         struct mlx5_dev_ctx_shared *sh = list->ctx;
4293         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4294         struct mlx5dv_dr_domain *ns;
4295         struct mlx5_flow_dv_modify_hdr_resource *entry;
4296         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4297         int ret;
4298         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4299         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4300
4301         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4302                             SOCKET_ID_ANY);
4303         if (!entry) {
4304                 rte_flow_error_set(ctx->error, ENOMEM,
4305                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4306                                    "cannot allocate resource memory");
4307                 return NULL;
4308         }
4309         rte_memcpy(&entry->ft_type,
4310                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4311                    key_len + data_len);
4312         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4313                 ns = sh->fdb_domain;
4314         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4315                 ns = sh->tx_domain;
4316         else
4317                 ns = sh->rx_domain;
4318         ret = mlx5_flow_os_create_flow_action_modify_header
4319                                         (sh->ctx, ns, entry,
4320                                          data_len, &entry->action);
4321         if (ret) {
4322                 mlx5_free(entry);
4323                 rte_flow_error_set(ctx->error, ENOMEM,
4324                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4325                                    NULL, "cannot create modification action");
4326                 return NULL;
4327         }
4328         return &entry->entry;
4329 }
4330
4331 /**
4332  * Validate the sample action.
4333  *
4334  * @param[in] action_flags
4335  *   Holds the actions detected until now.
4336  * @param[in] action
4337  *   Pointer to the sample action.
4338  * @param[in] dev
4339  *   Pointer to the Ethernet device structure.
4340  * @param[in] attr
4341  *   Attributes of flow that includes this action.
4342  * @param[out] error
4343  *   Pointer to error structure.
4344  *
4345  * @return
4346  *   0 on success, a negative errno value otherwise and rte_errno is set.
4347  */
4348 static int
4349 flow_dv_validate_action_sample(uint64_t action_flags,
4350                                const struct rte_flow_action *action,
4351                                struct rte_eth_dev *dev,
4352                                const struct rte_flow_attr *attr,
4353                                struct rte_flow_error *error)
4354 {
4355         struct mlx5_priv *priv = dev->data->dev_private;
4356         struct mlx5_dev_config *dev_conf = &priv->config;
4357         const struct rte_flow_action_sample *sample = action->conf;
4358         const struct rte_flow_action *act;
4359         uint64_t sub_action_flags = 0;
4360         uint16_t queue_index = 0xFFFF;
4361         int actions_n = 0;
4362         int ret;
4363         fdb_mirror = 0;
4364
4365         if (!sample)
4366                 return rte_flow_error_set(error, EINVAL,
4367                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4368                                           "configuration cannot be NULL");
4369         if (sample->ratio == 0)
4370                 return rte_flow_error_set(error, EINVAL,
4371                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4372                                           "ratio value starts from 1");
4373         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4374                 return rte_flow_error_set(error, ENOTSUP,
4375                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4376                                           NULL,
4377                                           "sample action not supported");
4378         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4379                 return rte_flow_error_set(error, EINVAL,
4380                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4381                                           "Multiple sample actions not "
4382                                           "supported");
4383         if (action_flags & MLX5_FLOW_ACTION_METER)
4384                 return rte_flow_error_set(error, EINVAL,
4385                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4386                                           "wrong action order, meter should "
4387                                           "be after sample action");
4388         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4389                 return rte_flow_error_set(error, EINVAL,
4390                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4391                                           "wrong action order, jump should "
4392                                           "be after sample action");
4393         act = sample->actions;
4394         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4395                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4396                         return rte_flow_error_set(error, ENOTSUP,
4397                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4398                                                   act, "too many actions");
4399                 switch (act->type) {
4400                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4401                         ret = mlx5_flow_validate_action_queue(act,
4402                                                               sub_action_flags,
4403                                                               dev,
4404                                                               attr, error);
4405                         if (ret < 0)
4406                                 return ret;
4407                         queue_index = ((const struct rte_flow_action_queue *)
4408                                                         (act->conf))->index;
4409                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4410                         ++actions_n;
4411                         break;
4412                 case RTE_FLOW_ACTION_TYPE_MARK:
4413                         ret = flow_dv_validate_action_mark(dev, act,
4414                                                            sub_action_flags,
4415                                                            attr, error);
4416                         if (ret < 0)
4417                                 return ret;
4418                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4419                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4420                                                 MLX5_FLOW_ACTION_MARK_EXT;
4421                         else
4422                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4423                         ++actions_n;
4424                         break;
4425                 case RTE_FLOW_ACTION_TYPE_COUNT:
4426                         ret = flow_dv_validate_action_count(dev, error);
4427                         if (ret < 0)
4428                                 return ret;
4429                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4430                         ++actions_n;
4431                         break;
4432                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4433                         ret = flow_dv_validate_action_port_id(dev,
4434                                                               sub_action_flags,
4435                                                               act,
4436                                                               attr,
4437                                                               error);
4438                         if (ret)
4439                                 return ret;
4440                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4441                         ++actions_n;
4442                         break;
4443                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4444                         ret = flow_dv_validate_action_raw_encap_decap
4445                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4446                                  &actions_n, error);
4447                         if (ret < 0)
4448                                 return ret;
4449                         ++actions_n;
4450                         break;
4451                 default:
4452                         return rte_flow_error_set(error, ENOTSUP,
4453                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4454                                                   NULL,
4455                                                   "Doesn't support optional "
4456                                                   "action");
4457                 }
4458         }
4459         if (attr->ingress && !attr->transfer) {
4460                 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4461                         return rte_flow_error_set(error, EINVAL,
4462                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4463                                                   NULL,
4464                                                   "Ingress must has a dest "
4465                                                   "QUEUE for Sample");
4466         } else if (attr->egress && !attr->transfer) {
4467                 return rte_flow_error_set(error, ENOTSUP,
4468                                           RTE_FLOW_ERROR_TYPE_ACTION,
4469                                           NULL,
4470                                           "Sample Only support Ingress "
4471                                           "or E-Switch");
4472         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4473                 MLX5_ASSERT(attr->transfer);
4474                 if (sample->ratio > 1)
4475                         return rte_flow_error_set(error, ENOTSUP,
4476                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4477                                                   NULL,
4478                                                   "E-Switch doesn't support "
4479                                                   "any optional action "
4480                                                   "for sampling");
4481                 fdb_mirror = 1;
4482                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4483                         return rte_flow_error_set(error, ENOTSUP,
4484                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4485                                                   NULL,
4486                                                   "unsupported action QUEUE");
4487                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4488                         return rte_flow_error_set(error, EINVAL,
4489                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4490                                                   NULL,
4491                                                   "E-Switch must has a dest "
4492                                                   "port for mirroring");
4493         }
4494         /* Continue validation for Xcap actions.*/
4495         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4496             (queue_index == 0xFFFF ||
4497              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4498                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4499                      MLX5_FLOW_XCAP_ACTIONS)
4500                         return rte_flow_error_set(error, ENOTSUP,
4501                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4502                                                   NULL, "encap and decap "
4503                                                   "combination aren't "
4504                                                   "supported");
4505                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4506                                                         MLX5_FLOW_ACTION_ENCAP))
4507                         return rte_flow_error_set(error, ENOTSUP,
4508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4509                                                   NULL, "encap is not supported"
4510                                                   " for ingress traffic");
4511         }
4512         return 0;
4513 }
4514
4515 /**
4516  * Find existing modify-header resource or create and register a new one.
4517  *
4518  * @param dev[in, out]
4519  *   Pointer to rte_eth_dev structure.
4520  * @param[in, out] resource
4521  *   Pointer to modify-header resource.
4522  * @parm[in, out] dev_flow
4523  *   Pointer to the dev_flow.
4524  * @param[out] error
4525  *   pointer to error structure.
4526  *
4527  * @return
4528  *   0 on success otherwise -errno and errno is set.
4529  */
4530 static int
4531 flow_dv_modify_hdr_resource_register
4532                         (struct rte_eth_dev *dev,
4533                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4534                          struct mlx5_flow *dev_flow,
4535                          struct rte_flow_error *error)
4536 {
4537         struct mlx5_priv *priv = dev->data->dev_private;
4538         struct mlx5_dev_ctx_shared *sh = priv->sh;
4539         uint32_t key_len = sizeof(*resource) -
4540                            offsetof(typeof(*resource), ft_type) +
4541                            resource->actions_num * sizeof(resource->actions[0]);
4542         struct mlx5_hlist_entry *entry;
4543         struct mlx5_flow_cb_ctx ctx = {
4544                 .error = error,
4545                 .data = resource,
4546         };
4547
4548         resource->flags = dev_flow->dv.group ? 0 :
4549                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4550         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4551                                     resource->flags))
4552                 return rte_flow_error_set(error, EOVERFLOW,
4553                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4554                                           "too many modify header items");
4555         resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4556         entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
4557         if (!entry)
4558                 return -rte_errno;
4559         resource = container_of(entry, typeof(*resource), entry);
4560         dev_flow->handle->dvh.modify_hdr = resource;
4561         return 0;
4562 }
4563
4564 /**
4565  * Get DV flow counter by index.
4566  *
4567  * @param[in] dev
4568  *   Pointer to the Ethernet device structure.
4569  * @param[in] idx
4570  *   mlx5 flow counter index in the container.
4571  * @param[out] ppool
4572  *   mlx5 flow counter pool in the container,
4573  *
4574  * @return
4575  *   Pointer to the counter, NULL otherwise.
4576  */
4577 static struct mlx5_flow_counter *
4578 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4579                            uint32_t idx,
4580                            struct mlx5_flow_counter_pool **ppool)
4581 {
4582         struct mlx5_priv *priv = dev->data->dev_private;
4583         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4584         struct mlx5_flow_counter_pool *pool;
4585
4586         /* Decrease to original index and clear shared bit. */
4587         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4588         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4589         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4590         MLX5_ASSERT(pool);
4591         if (ppool)
4592                 *ppool = pool;
4593         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4594 }
4595
4596 /**
4597  * Check the devx counter belongs to the pool.
4598  *
4599  * @param[in] pool
4600  *   Pointer to the counter pool.
4601  * @param[in] id
4602  *   The counter devx ID.
4603  *
4604  * @return
4605  *   True if counter belongs to the pool, false otherwise.
4606  */
4607 static bool
4608 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4609 {
4610         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4611                    MLX5_COUNTERS_PER_POOL;
4612
4613         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4614                 return true;
4615         return false;
4616 }
4617
4618 /**
4619  * Get a pool by devx counter ID.
4620  *
4621  * @param[in] cmng
4622  *   Pointer to the counter management.
4623  * @param[in] id
4624  *   The counter devx ID.
4625  *
4626  * @return
4627  *   The counter pool pointer if exists, NULL otherwise,
4628  */
4629 static struct mlx5_flow_counter_pool *
4630 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4631 {
4632         uint32_t i;
4633         struct mlx5_flow_counter_pool *pool = NULL;
4634
4635         rte_spinlock_lock(&cmng->pool_update_sl);
4636         /* Check last used pool. */
4637         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4638             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4639                 pool = cmng->pools[cmng->last_pool_idx];
4640                 goto out;
4641         }
4642         /* ID out of range means no suitable pool in the container. */
4643         if (id > cmng->max_id || id < cmng->min_id)
4644                 goto out;
4645         /*
4646          * Find the pool from the end of the container, since mostly counter
4647          * ID is sequence increasing, and the last pool should be the needed
4648          * one.
4649          */
4650         i = cmng->n_valid;
4651         while (i--) {
4652                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4653
4654                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4655                         pool = pool_tmp;
4656                         break;
4657                 }
4658         }
4659 out:
4660         rte_spinlock_unlock(&cmng->pool_update_sl);
4661         return pool;
4662 }
4663
4664 /**
4665  * Resize a counter container.
4666  *
4667  * @param[in] dev
4668  *   Pointer to the Ethernet device structure.
4669  *
4670  * @return
4671  *   0 on success, otherwise negative errno value and rte_errno is set.
4672  */
4673 static int
4674 flow_dv_container_resize(struct rte_eth_dev *dev)
4675 {
4676         struct mlx5_priv *priv = dev->data->dev_private;
4677         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4678         void *old_pools = cmng->pools;
4679         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4680         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4681         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4682
4683         if (!pools) {
4684                 rte_errno = ENOMEM;
4685                 return -ENOMEM;
4686         }
4687         if (old_pools)
4688                 memcpy(pools, old_pools, cmng->n *
4689                                        sizeof(struct mlx5_flow_counter_pool *));
4690         cmng->n = resize;
4691         cmng->pools = pools;
4692         if (old_pools)
4693                 mlx5_free(old_pools);
4694         return 0;
4695 }
4696
4697 /**
4698  * Query a devx flow counter.
4699  *
4700  * @param[in] dev
4701  *   Pointer to the Ethernet device structure.
4702  * @param[in] cnt
4703  *   Index to the flow counter.
4704  * @param[out] pkts
4705  *   The statistics value of packets.
4706  * @param[out] bytes
4707  *   The statistics value of bytes.
4708  *
4709  * @return
4710  *   0 on success, otherwise a negative errno value and rte_errno is set.
4711  */
4712 static inline int
4713 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4714                      uint64_t *bytes)
4715 {
4716         struct mlx5_priv *priv = dev->data->dev_private;
4717         struct mlx5_flow_counter_pool *pool = NULL;
4718         struct mlx5_flow_counter *cnt;
4719         int offset;
4720
4721         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4722         MLX5_ASSERT(pool);
4723         if (priv->sh->cmng.counter_fallback)
4724                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4725                                         0, pkts, bytes, 0, NULL, NULL, 0);
4726         rte_spinlock_lock(&pool->sl);
4727         if (!pool->raw) {
4728                 *pkts = 0;
4729                 *bytes = 0;
4730         } else {
4731                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4732                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4733                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4734         }
4735         rte_spinlock_unlock(&pool->sl);
4736         return 0;
4737 }
4738
4739 /**
4740  * Create and initialize a new counter pool.
4741  *
4742  * @param[in] dev
4743  *   Pointer to the Ethernet device structure.
4744  * @param[out] dcs
4745  *   The devX counter handle.
4746  * @param[in] age
4747  *   Whether the pool is for counter that was allocated for aging.
4748  * @param[in/out] cont_cur
4749  *   Pointer to the container pointer, it will be update in pool resize.
4750  *
4751  * @return
4752  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4753  */
4754 static struct mlx5_flow_counter_pool *
4755 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4756                     uint32_t age)
4757 {
4758         struct mlx5_priv *priv = dev->data->dev_private;
4759         struct mlx5_flow_counter_pool *pool;
4760         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4761         bool fallback = priv->sh->cmng.counter_fallback;
4762         uint32_t size = sizeof(*pool);
4763
4764         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4765         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4766         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4767         if (!pool) {
4768                 rte_errno = ENOMEM;
4769                 return NULL;
4770         }
4771         pool->raw = NULL;
4772         pool->is_aged = !!age;
4773         pool->query_gen = 0;
4774         pool->min_dcs = dcs;
4775         rte_spinlock_init(&pool->sl);
4776         rte_spinlock_init(&pool->csl);
4777         TAILQ_INIT(&pool->counters[0]);
4778         TAILQ_INIT(&pool->counters[1]);
4779         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4780         rte_spinlock_lock(&cmng->pool_update_sl);
4781         pool->index = cmng->n_valid;
4782         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4783                 mlx5_free(pool);
4784                 rte_spinlock_unlock(&cmng->pool_update_sl);
4785                 return NULL;
4786         }
4787         cmng->pools[pool->index] = pool;
4788         cmng->n_valid++;
4789         if (unlikely(fallback)) {
4790                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4791
4792                 if (base < cmng->min_id)
4793                         cmng->min_id = base;
4794                 if (base > cmng->max_id)
4795                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4796                 cmng->last_pool_idx = pool->index;
4797         }
4798         rte_spinlock_unlock(&cmng->pool_update_sl);
4799         return pool;
4800 }
4801
4802 /**
4803  * Prepare a new counter and/or a new counter pool.
4804  *
4805  * @param[in] dev
4806  *   Pointer to the Ethernet device structure.
4807  * @param[out] cnt_free
4808  *   Where to put the pointer of a new counter.
4809  * @param[in] age
4810  *   Whether the pool is for counter that was allocated for aging.
4811  *
4812  * @return
4813  *   The counter pool pointer and @p cnt_free is set on success,
4814  *   NULL otherwise and rte_errno is set.
4815  */
4816 static struct mlx5_flow_counter_pool *
4817 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4818                              struct mlx5_flow_counter **cnt_free,
4819                              uint32_t age)
4820 {
4821         struct mlx5_priv *priv = dev->data->dev_private;
4822         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4823         struct mlx5_flow_counter_pool *pool;
4824         struct mlx5_counters tmp_tq;
4825         struct mlx5_devx_obj *dcs = NULL;
4826         struct mlx5_flow_counter *cnt;
4827         enum mlx5_counter_type cnt_type =
4828                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4829         bool fallback = priv->sh->cmng.counter_fallback;
4830         uint32_t i;
4831
4832         if (fallback) {
4833                 /* bulk_bitmap must be 0 for single counter allocation. */
4834                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4835                 if (!dcs)
4836                         return NULL;
4837                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4838                 if (!pool) {
4839                         pool = flow_dv_pool_create(dev, dcs, age);
4840                         if (!pool) {
4841                                 mlx5_devx_cmd_destroy(dcs);
4842                                 return NULL;
4843                         }
4844                 }
4845                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4846                 cnt = MLX5_POOL_GET_CNT(pool, i);
4847                 cnt->pool = pool;
4848                 cnt->dcs_when_free = dcs;
4849                 *cnt_free = cnt;
4850                 return pool;
4851         }
4852         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4853         if (!dcs) {
4854                 rte_errno = ENODATA;
4855                 return NULL;
4856         }
4857         pool = flow_dv_pool_create(dev, dcs, age);
4858         if (!pool) {
4859                 mlx5_devx_cmd_destroy(dcs);
4860                 return NULL;
4861         }
4862         TAILQ_INIT(&tmp_tq);
4863         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4864                 cnt = MLX5_POOL_GET_CNT(pool, i);
4865                 cnt->pool = pool;
4866                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4867         }
4868         rte_spinlock_lock(&cmng->csl[cnt_type]);
4869         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4870         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4871         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4872         (*cnt_free)->pool = pool;
4873         return pool;
4874 }
4875
4876 /**
4877  * Allocate a flow counter.
4878  *
4879  * @param[in] dev
4880  *   Pointer to the Ethernet device structure.
4881  * @param[in] age
4882  *   Whether the counter was allocated for aging.
4883  *
4884  * @return
4885  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4886  */
4887 static uint32_t
4888 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4889 {
4890         struct mlx5_priv *priv = dev->data->dev_private;
4891         struct mlx5_flow_counter_pool *pool = NULL;
4892         struct mlx5_flow_counter *cnt_free = NULL;
4893         bool fallback = priv->sh->cmng.counter_fallback;
4894         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4895         enum mlx5_counter_type cnt_type =
4896                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4897         uint32_t cnt_idx;
4898
4899         if (!priv->config.devx) {
4900                 rte_errno = ENOTSUP;
4901                 return 0;
4902         }
4903         /* Get free counters from container. */
4904         rte_spinlock_lock(&cmng->csl[cnt_type]);
4905         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4906         if (cnt_free)
4907                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4908         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4909         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4910                 goto err;
4911         pool = cnt_free->pool;
4912         if (fallback)
4913                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4914         /* Create a DV counter action only in the first time usage. */
4915         if (!cnt_free->action) {
4916                 uint16_t offset;
4917                 struct mlx5_devx_obj *dcs;
4918                 int ret;
4919
4920                 if (!fallback) {
4921                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4922                         dcs = pool->min_dcs;
4923                 } else {
4924                         offset = 0;
4925                         dcs = cnt_free->dcs_when_free;
4926                 }
4927                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4928                                                             &cnt_free->action);
4929                 if (ret) {
4930                         rte_errno = errno;
4931                         goto err;
4932                 }
4933         }
4934         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4935                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4936         /* Update the counter reset values. */
4937         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4938                                  &cnt_free->bytes))
4939                 goto err;
4940         if (!fallback && !priv->sh->cmng.query_thread_on)
4941                 /* Start the asynchronous batch query by the host thread. */
4942                 mlx5_set_query_alarm(priv->sh);
4943         return cnt_idx;
4944 err:
4945         if (cnt_free) {
4946                 cnt_free->pool = pool;
4947                 if (fallback)
4948                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4949                 rte_spinlock_lock(&cmng->csl[cnt_type]);
4950                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4951                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4952         }
4953         return 0;
4954 }
4955
4956 /**
4957  * Allocate a shared flow counter.
4958  *
4959  * @param[in] ctx
4960  *   Pointer to the shared counter configuration.
4961  * @param[in] data
4962  *   Pointer to save the allocated counter index.
4963  *
4964  * @return
4965  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4966  */
4967
4968 static int32_t
4969 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4970 {
4971         struct mlx5_shared_counter_conf *conf = ctx;
4972         struct rte_eth_dev *dev = conf->dev;
4973         struct mlx5_flow_counter *cnt;
4974
4975         data->dword = flow_dv_counter_alloc(dev, 0);
4976         data->dword |= MLX5_CNT_SHARED_OFFSET;
4977         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4978         cnt->shared_info.id = conf->id;
4979         return 0;
4980 }
4981
4982 /**
4983  * Get a shared flow counter.
4984  *
4985  * @param[in] dev
4986  *   Pointer to the Ethernet device structure.
4987  * @param[in] id
4988  *   Counter identifier.
4989  *
4990  * @return
4991  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4992  */
4993 static uint32_t
4994 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4995 {
4996         struct mlx5_priv *priv = dev->data->dev_private;
4997         struct mlx5_shared_counter_conf conf = {
4998                 .dev = dev,
4999                 .id = id,
5000         };
5001         union mlx5_l3t_data data = {
5002                 .dword = 0,
5003         };
5004
5005         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5006                                flow_dv_counter_alloc_shared_cb, &conf);
5007         return data.dword;
5008 }
5009
5010 /**
5011  * Get age param from counter index.
5012  *
5013  * @param[in] dev
5014  *   Pointer to the Ethernet device structure.
5015  * @param[in] counter
5016  *   Index to the counter handler.
5017  *
5018  * @return
5019  *   The aging parameter specified for the counter index.
5020  */
5021 static struct mlx5_age_param*
5022 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5023                                 uint32_t counter)
5024 {
5025         struct mlx5_flow_counter *cnt;
5026         struct mlx5_flow_counter_pool *pool = NULL;
5027
5028         flow_dv_counter_get_by_idx(dev, counter, &pool);
5029         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5030         cnt = MLX5_POOL_GET_CNT(pool, counter);
5031         return MLX5_CNT_TO_AGE(cnt);
5032 }
5033
5034 /**
5035  * Remove a flow counter from aged counter list.
5036  *
5037  * @param[in] dev
5038  *   Pointer to the Ethernet device structure.
5039  * @param[in] counter
5040  *   Index to the counter handler.
5041  * @param[in] cnt
5042  *   Pointer to the counter handler.
5043  */
5044 static void
5045 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5046                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5047 {
5048         struct mlx5_age_info *age_info;
5049         struct mlx5_age_param *age_param;
5050         struct mlx5_priv *priv = dev->data->dev_private;
5051         uint16_t expected = AGE_CANDIDATE;
5052
5053         age_info = GET_PORT_AGE_INFO(priv);
5054         age_param = flow_dv_counter_idx_get_age(dev, counter);
5055         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5056                                          AGE_FREE, false, __ATOMIC_RELAXED,
5057                                          __ATOMIC_RELAXED)) {
5058                 /**
5059                  * We need the lock even it is age timeout,
5060                  * since counter may still in process.
5061                  */
5062                 rte_spinlock_lock(&age_info->aged_sl);
5063                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5064                 rte_spinlock_unlock(&age_info->aged_sl);
5065                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5066         }
5067 }
5068
5069 /**
5070  * Release a flow counter.
5071  *
5072  * @param[in] dev
5073  *   Pointer to the Ethernet device structure.
5074  * @param[in] counter
5075  *   Index to the counter handler.
5076  */
5077 static void
5078 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
5079 {
5080         struct mlx5_priv *priv = dev->data->dev_private;
5081         struct mlx5_flow_counter_pool *pool = NULL;
5082         struct mlx5_flow_counter *cnt;
5083         enum mlx5_counter_type cnt_type;
5084
5085         if (!counter)
5086                 return;
5087         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5088         MLX5_ASSERT(pool);
5089         if (IS_SHARED_CNT(counter) &&
5090             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5091                 return;
5092         if (pool->is_aged)
5093                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5094         cnt->pool = pool;
5095         /*
5096          * Put the counter back to list to be updated in none fallback mode.
5097          * Currently, we are using two list alternately, while one is in query,
5098          * add the freed counter to the other list based on the pool query_gen
5099          * value. After query finishes, add counter the list to the global
5100          * container counter list. The list changes while query starts. In
5101          * this case, lock will not be needed as query callback and release
5102          * function both operate with the different list.
5103          *
5104          */
5105         if (!priv->sh->cmng.counter_fallback) {
5106                 rte_spinlock_lock(&pool->csl);
5107                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5108                 rte_spinlock_unlock(&pool->csl);
5109         } else {
5110                 cnt->dcs_when_free = cnt->dcs_when_active;
5111                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5112                                            MLX5_COUNTER_TYPE_ORIGIN;
5113                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5114                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5115                                   cnt, next);
5116                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5117         }
5118 }
5119
5120 /**
5121  * Verify the @p attributes will be correctly understood by the NIC and store
5122  * them in the @p flow if everything is correct.
5123  *
5124  * @param[in] dev
5125  *   Pointer to dev struct.
5126  * @param[in] attributes
5127  *   Pointer to flow attributes
5128  * @param[in] external
5129  *   This flow rule is created by request external to PMD.
5130  * @param[out] error
5131  *   Pointer to error structure.
5132  *
5133  * @return
5134  *   - 0 on success and non root table.
5135  *   - 1 on success and root table.
5136  *   - a negative errno value otherwise and rte_errno is set.
5137  */
5138 static int
5139 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5140                             const struct mlx5_flow_tunnel *tunnel,
5141                             const struct rte_flow_attr *attributes,
5142                             struct flow_grp_info grp_info,
5143                             struct rte_flow_error *error)
5144 {
5145         struct mlx5_priv *priv = dev->data->dev_private;
5146         uint32_t priority_max = priv->config.flow_prio - 1;
5147         int ret = 0;
5148
5149 #ifndef HAVE_MLX5DV_DR
5150         RTE_SET_USED(tunnel);
5151         RTE_SET_USED(grp_info);
5152         if (attributes->group)
5153                 return rte_flow_error_set(error, ENOTSUP,
5154                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5155                                           NULL,
5156                                           "groups are not supported");
5157 #else
5158         uint32_t table = 0;
5159
5160         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5161                                        grp_info, error);
5162         if (ret)
5163                 return ret;
5164         if (!table)
5165                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5166 #endif
5167         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5168             attributes->priority >= priority_max)
5169                 return rte_flow_error_set(error, ENOTSUP,
5170                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5171                                           NULL,
5172                                           "priority out of range");
5173         if (attributes->transfer) {
5174                 if (!priv->config.dv_esw_en)
5175                         return rte_flow_error_set
5176                                 (error, ENOTSUP,
5177                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5178                                  "E-Switch dr is not supported");
5179                 if (!(priv->representor || priv->master))
5180                         return rte_flow_error_set
5181                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5182                                  NULL, "E-Switch configuration can only be"
5183                                  " done by a master or a representor device");
5184                 if (attributes->egress)
5185                         return rte_flow_error_set
5186                                 (error, ENOTSUP,
5187                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5188                                  "egress is not supported");
5189         }
5190         if (!(attributes->egress ^ attributes->ingress))
5191                 return rte_flow_error_set(error, ENOTSUP,
5192                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5193                                           "must specify exactly one of "
5194                                           "ingress or egress");
5195         return ret;
5196 }
5197
5198 /**
5199  * Internal validation function. For validating both actions and items.
5200  *
5201  * @param[in] dev
5202  *   Pointer to the rte_eth_dev structure.
5203  * @param[in] attr
5204  *   Pointer to the flow attributes.
5205  * @param[in] items
5206  *   Pointer to the list of items.
5207  * @param[in] actions
5208  *   Pointer to the list of actions.
5209  * @param[in] external
5210  *   This flow rule is created by request external to PMD.
5211  * @param[in] hairpin
5212  *   Number of hairpin TX actions, 0 means classic flow.
5213  * @param[out] error
5214  *   Pointer to the error structure.
5215  *
5216  * @return
5217  *   0 on success, a negative errno value otherwise and rte_errno is set.
5218  */
5219 static int
5220 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5221                  const struct rte_flow_item items[],
5222                  const struct rte_flow_action actions[],
5223                  bool external, int hairpin, struct rte_flow_error *error)
5224 {
5225         int ret;
5226         uint64_t action_flags = 0;
5227         uint64_t item_flags = 0;
5228         uint64_t last_item = 0;
5229         uint8_t next_protocol = 0xff;
5230         uint16_t ether_type = 0;
5231         int actions_n = 0;
5232         uint8_t item_ipv6_proto = 0;
5233         const struct rte_flow_item *gre_item = NULL;
5234         const struct rte_flow_action_raw_decap *decap;
5235         const struct rte_flow_action_raw_encap *encap;
5236         const struct rte_flow_action_rss *rss;
5237         const struct rte_flow_item_tcp nic_tcp_mask = {
5238                 .hdr = {
5239                         .tcp_flags = 0xFF,
5240                         .src_port = RTE_BE16(UINT16_MAX),
5241                         .dst_port = RTE_BE16(UINT16_MAX),
5242                 }
5243         };
5244         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5245                 .hdr = {
5246                         .src_addr =
5247                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5248                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5249                         .dst_addr =
5250                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5251                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5252                         .vtc_flow = RTE_BE32(0xffffffff),
5253                         .proto = 0xff,
5254                         .hop_limits = 0xff,
5255                 },
5256                 .has_frag_ext = 1,
5257         };
5258         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5259                 .hdr = {
5260                         .common = {
5261                                 .u32 =
5262                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5263                                         .type = 0xFF,
5264                                         }).u32),
5265                         },
5266                         .dummy[0] = 0xffffffff,
5267                 },
5268         };
5269         struct mlx5_priv *priv = dev->data->dev_private;
5270         struct mlx5_dev_config *dev_conf = &priv->config;
5271         uint16_t queue_index = 0xFFFF;
5272         const struct rte_flow_item_vlan *vlan_m = NULL;
5273         int16_t rw_act_num = 0;
5274         uint64_t is_root;
5275         const struct mlx5_flow_tunnel *tunnel;
5276         struct flow_grp_info grp_info = {
5277                 .external = !!external,
5278                 .transfer = !!attr->transfer,
5279                 .fdb_def_rule = !!priv->fdb_def_rule,
5280         };
5281         const struct rte_eth_hairpin_conf *conf;
5282
5283         if (items == NULL)
5284                 return -1;
5285         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5286                 tunnel = flow_items_to_tunnel(items);
5287                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5288                                 MLX5_FLOW_ACTION_DECAP;
5289         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5290                 tunnel = flow_actions_to_tunnel(actions);
5291                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5292         } else {
5293                 tunnel = NULL;
5294         }
5295         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5296                                 (dev, tunnel, attr, items, actions);
5297         ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
5298         if (ret < 0)
5299                 return ret;
5300         is_root = (uint64_t)ret;
5301         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5302                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5303                 int type = items->type;
5304
5305                 if (!mlx5_flow_os_item_supported(type))
5306                         return rte_flow_error_set(error, ENOTSUP,
5307                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5308                                                   NULL, "item not supported");
5309                 switch (type) {
5310                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5311                         if (items[0].type != (typeof(items[0].type))
5312                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5313                                 return rte_flow_error_set
5314                                                 (error, EINVAL,
5315                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5316                                                 NULL, "MLX5 private items "
5317                                                 "must be the first");
5318                         break;
5319                 case RTE_FLOW_ITEM_TYPE_VOID:
5320                         break;
5321                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5322                         ret = flow_dv_validate_item_port_id
5323                                         (dev, items, attr, item_flags, error);
5324                         if (ret < 0)
5325                                 return ret;
5326                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5327                         break;
5328                 case RTE_FLOW_ITEM_TYPE_ETH:
5329                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5330                                                           true, error);
5331                         if (ret < 0)
5332                                 return ret;
5333                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5334                                              MLX5_FLOW_LAYER_OUTER_L2;
5335                         if (items->mask != NULL && items->spec != NULL) {
5336                                 ether_type =
5337                                         ((const struct rte_flow_item_eth *)
5338                                          items->spec)->type;
5339                                 ether_type &=
5340                                         ((const struct rte_flow_item_eth *)
5341                                          items->mask)->type;
5342                                 ether_type = rte_be_to_cpu_16(ether_type);
5343                         } else {
5344                                 ether_type = 0;
5345                         }
5346                         break;
5347                 case RTE_FLOW_ITEM_TYPE_VLAN:
5348                         ret = flow_dv_validate_item_vlan(items, item_flags,
5349                                                          dev, error);
5350                         if (ret < 0)
5351                                 return ret;
5352                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5353                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5354                         if (items->mask != NULL && items->spec != NULL) {
5355                                 ether_type =
5356                                         ((const struct rte_flow_item_vlan *)
5357                                          items->spec)->inner_type;
5358                                 ether_type &=
5359                                         ((const struct rte_flow_item_vlan *)
5360                                          items->mask)->inner_type;
5361                                 ether_type = rte_be_to_cpu_16(ether_type);
5362                         } else {
5363                                 ether_type = 0;
5364                         }
5365                         /* Store outer VLAN mask for of_push_vlan action. */
5366                         if (!tunnel)
5367                                 vlan_m = items->mask;
5368                         break;
5369                 case RTE_FLOW_ITEM_TYPE_IPV4:
5370                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5371                                                   &item_flags, &tunnel);
5372                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5373                                                          last_item, ether_type,
5374                                                          error);
5375                         if (ret < 0)
5376                                 return ret;
5377                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5378                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5379                         if (items->mask != NULL &&
5380                             ((const struct rte_flow_item_ipv4 *)
5381                              items->mask)->hdr.next_proto_id) {
5382                                 next_protocol =
5383                                         ((const struct rte_flow_item_ipv4 *)
5384                                          (items->spec))->hdr.next_proto_id;
5385                                 next_protocol &=
5386                                         ((const struct rte_flow_item_ipv4 *)
5387                                          (items->mask))->hdr.next_proto_id;
5388                         } else {
5389                                 /* Reset for inner layer. */
5390                                 next_protocol = 0xff;
5391                         }
5392                         break;
5393                 case RTE_FLOW_ITEM_TYPE_IPV6:
5394                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5395                                                   &item_flags, &tunnel);
5396                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5397                                                            last_item,
5398                                                            ether_type,
5399                                                            &nic_ipv6_mask,
5400                                                            error);
5401                         if (ret < 0)
5402                                 return ret;
5403                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5404                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5405                         if (items->mask != NULL &&
5406                             ((const struct rte_flow_item_ipv6 *)
5407                              items->mask)->hdr.proto) {
5408                                 item_ipv6_proto =
5409                                         ((const struct rte_flow_item_ipv6 *)
5410                                          items->spec)->hdr.proto;
5411                                 next_protocol =
5412                                         ((const struct rte_flow_item_ipv6 *)
5413                                          items->spec)->hdr.proto;
5414                                 next_protocol &=
5415                                         ((const struct rte_flow_item_ipv6 *)
5416                                          items->mask)->hdr.proto;
5417                         } else {
5418                                 /* Reset for inner layer. */
5419                                 next_protocol = 0xff;
5420                         }
5421                         break;
5422                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5423                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5424                                                                   item_flags,
5425                                                                   error);
5426                         if (ret < 0)
5427                                 return ret;
5428                         last_item = tunnel ?
5429                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5430                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5431                         if (items->mask != NULL &&
5432                             ((const struct rte_flow_item_ipv6_frag_ext *)
5433                              items->mask)->hdr.next_header) {
5434                                 next_protocol =
5435                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5436                                  items->spec)->hdr.next_header;
5437                                 next_protocol &=
5438                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5439                                  items->mask)->hdr.next_header;
5440                         } else {
5441                                 /* Reset for inner layer. */
5442                                 next_protocol = 0xff;
5443                         }
5444                         break;
5445                 case RTE_FLOW_ITEM_TYPE_TCP:
5446                         ret = mlx5_flow_validate_item_tcp
5447                                                 (items, item_flags,
5448                                                  next_protocol,
5449                                                  &nic_tcp_mask,
5450                                                  error);
5451                         if (ret < 0)
5452                                 return ret;
5453                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5454                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5455                         break;
5456                 case RTE_FLOW_ITEM_TYPE_UDP:
5457                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5458                                                           next_protocol,
5459                                                           error);
5460                         if (ret < 0)
5461                                 return ret;
5462                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5463                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5464                         break;
5465                 case RTE_FLOW_ITEM_TYPE_GRE:
5466                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5467                                                           next_protocol, error);
5468                         if (ret < 0)
5469                                 return ret;
5470                         gre_item = items;
5471                         last_item = MLX5_FLOW_LAYER_GRE;
5472                         break;
5473                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5474                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5475                                                             next_protocol,
5476                                                             error);
5477                         if (ret < 0)
5478                                 return ret;
5479                         last_item = MLX5_FLOW_LAYER_NVGRE;
5480                         break;
5481                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5482                         ret = mlx5_flow_validate_item_gre_key
5483                                 (items, item_flags, gre_item, error);
5484                         if (ret < 0)
5485                                 return ret;
5486                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5487                         break;
5488                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5489                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5490                                                             error);
5491                         if (ret < 0)
5492                                 return ret;
5493                         last_item = MLX5_FLOW_LAYER_VXLAN;
5494                         break;
5495                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5496                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5497                                                                 item_flags, dev,
5498                                                                 error);
5499                         if (ret < 0)
5500                                 return ret;
5501                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5502                         break;
5503                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5504                         ret = mlx5_flow_validate_item_geneve(items,
5505                                                              item_flags, dev,
5506                                                              error);
5507                         if (ret < 0)
5508                                 return ret;
5509                         last_item = MLX5_FLOW_LAYER_GENEVE;
5510                         break;
5511                 case RTE_FLOW_ITEM_TYPE_MPLS:
5512                         ret = mlx5_flow_validate_item_mpls(dev, items,
5513                                                            item_flags,
5514                                                            last_item, error);
5515                         if (ret < 0)
5516                                 return ret;
5517                         last_item = MLX5_FLOW_LAYER_MPLS;
5518                         break;
5519
5520                 case RTE_FLOW_ITEM_TYPE_MARK:
5521                         ret = flow_dv_validate_item_mark(dev, items, attr,
5522                                                          error);
5523                         if (ret < 0)
5524                                 return ret;
5525                         last_item = MLX5_FLOW_ITEM_MARK;
5526                         break;
5527                 case RTE_FLOW_ITEM_TYPE_META:
5528                         ret = flow_dv_validate_item_meta(dev, items, attr,
5529                                                          error);
5530                         if (ret < 0)
5531                                 return ret;
5532                         last_item = MLX5_FLOW_ITEM_METADATA;
5533                         break;
5534                 case RTE_FLOW_ITEM_TYPE_ICMP:
5535                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5536                                                            next_protocol,
5537                                                            error);
5538                         if (ret < 0)
5539                                 return ret;
5540                         last_item = MLX5_FLOW_LAYER_ICMP;
5541                         break;
5542                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5543                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5544                                                             next_protocol,
5545                                                             error);
5546                         if (ret < 0)
5547                                 return ret;
5548                         item_ipv6_proto = IPPROTO_ICMPV6;
5549                         last_item = MLX5_FLOW_LAYER_ICMP6;
5550                         break;
5551                 case RTE_FLOW_ITEM_TYPE_TAG:
5552                         ret = flow_dv_validate_item_tag(dev, items,
5553                                                         attr, error);
5554                         if (ret < 0)
5555                                 return ret;
5556                         last_item = MLX5_FLOW_ITEM_TAG;
5557                         break;
5558                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5559                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5560                         break;
5561                 case RTE_FLOW_ITEM_TYPE_GTP:
5562                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5563                                                         error);
5564                         if (ret < 0)
5565                                 return ret;
5566                         last_item = MLX5_FLOW_LAYER_GTP;
5567                         break;
5568                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5569                         /* Capacity will be checked in the translate stage. */
5570                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5571                                                             last_item,
5572                                                             ether_type,
5573                                                             &nic_ecpri_mask,
5574                                                             error);
5575                         if (ret < 0)
5576                                 return ret;
5577                         last_item = MLX5_FLOW_LAYER_ECPRI;
5578                         break;
5579                 default:
5580                         return rte_flow_error_set(error, ENOTSUP,
5581                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5582                                                   NULL, "item not supported");
5583                 }
5584                 item_flags |= last_item;
5585         }
5586         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5587                 int type = actions->type;
5588
5589                 if (!mlx5_flow_os_action_supported(type))
5590                         return rte_flow_error_set(error, ENOTSUP,
5591                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5592                                                   actions,
5593                                                   "action not supported");
5594                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5595                         return rte_flow_error_set(error, ENOTSUP,
5596                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5597                                                   actions, "too many actions");
5598                 switch (type) {
5599                 case RTE_FLOW_ACTION_TYPE_VOID:
5600                         break;
5601                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5602                         ret = flow_dv_validate_action_port_id(dev,
5603                                                               action_flags,
5604                                                               actions,
5605                                                               attr,
5606                                                               error);
5607                         if (ret)
5608                                 return ret;
5609                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5610                         ++actions_n;
5611                         break;
5612                 case RTE_FLOW_ACTION_TYPE_FLAG:
5613                         ret = flow_dv_validate_action_flag(dev, action_flags,
5614                                                            attr, error);
5615                         if (ret < 0)
5616                                 return ret;
5617                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5618                                 /* Count all modify-header actions as one. */
5619                                 if (!(action_flags &
5620                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5621                                         ++actions_n;
5622                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5623                                                 MLX5_FLOW_ACTION_MARK_EXT;
5624                         } else {
5625                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5626                                 ++actions_n;
5627                         }
5628                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5629                         break;
5630                 case RTE_FLOW_ACTION_TYPE_MARK:
5631                         ret = flow_dv_validate_action_mark(dev, actions,
5632                                                            action_flags,
5633                                                            attr, error);
5634                         if (ret < 0)
5635                                 return ret;
5636                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5637                                 /* Count all modify-header actions as one. */
5638                                 if (!(action_flags &
5639                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5640                                         ++actions_n;
5641                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5642                                                 MLX5_FLOW_ACTION_MARK_EXT;
5643                         } else {
5644                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5645                                 ++actions_n;
5646                         }
5647                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5648                         break;
5649                 case RTE_FLOW_ACTION_TYPE_SET_META:
5650                         ret = flow_dv_validate_action_set_meta(dev, actions,
5651                                                                action_flags,
5652                                                                attr, error);
5653                         if (ret < 0)
5654                                 return ret;
5655                         /* Count all modify-header actions as one action. */
5656                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5657                                 ++actions_n;
5658                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5659                         rw_act_num += MLX5_ACT_NUM_SET_META;
5660                         break;
5661                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5662                         ret = flow_dv_validate_action_set_tag(dev, actions,
5663                                                               action_flags,
5664                                                               attr, error);
5665                         if (ret < 0)
5666                                 return ret;
5667                         /* Count all modify-header actions as one action. */
5668                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5669                                 ++actions_n;
5670                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5671                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5672                         break;
5673                 case RTE_FLOW_ACTION_TYPE_DROP:
5674                         ret = mlx5_flow_validate_action_drop(action_flags,
5675                                                              attr, error);
5676                         if (ret < 0)
5677                                 return ret;
5678                         action_flags |= MLX5_FLOW_ACTION_DROP;
5679                         ++actions_n;
5680                         break;
5681                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5682                         ret = mlx5_flow_validate_action_queue(actions,
5683                                                               action_flags, dev,
5684                                                               attr, error);
5685                         if (ret < 0)
5686                                 return ret;
5687                         queue_index = ((const struct rte_flow_action_queue *)
5688                                                         (actions->conf))->index;
5689                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5690                         ++actions_n;
5691                         break;
5692                 case RTE_FLOW_ACTION_TYPE_RSS:
5693                         rss = actions->conf;
5694                         ret = mlx5_flow_validate_action_rss(actions,
5695                                                             action_flags, dev,
5696                                                             attr, item_flags,
5697                                                             error);
5698                         if (ret < 0)
5699                                 return ret;
5700                         if (rss != NULL && rss->queue_num)
5701                                 queue_index = rss->queue[0];
5702                         action_flags |= MLX5_FLOW_ACTION_RSS;
5703                         ++actions_n;
5704                         break;
5705                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5706                         ret =
5707                         mlx5_flow_validate_action_default_miss(action_flags,
5708                                         attr, error);
5709                         if (ret < 0)
5710                                 return ret;
5711                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5712                         ++actions_n;
5713                         break;
5714                 case RTE_FLOW_ACTION_TYPE_COUNT:
5715                         ret = flow_dv_validate_action_count(dev, error);
5716                         if (ret < 0)
5717                                 return ret;
5718                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5719                         ++actions_n;
5720                         break;
5721                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5722                         if (flow_dv_validate_action_pop_vlan(dev,
5723                                                              action_flags,
5724                                                              actions,
5725                                                              item_flags, attr,
5726                                                              error))
5727                                 return -rte_errno;
5728                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5729                         ++actions_n;
5730                         break;
5731                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5732                         ret = flow_dv_validate_action_push_vlan(dev,
5733                                                                 action_flags,
5734                                                                 vlan_m,
5735                                                                 actions, attr,
5736                                                                 error);
5737                         if (ret < 0)
5738                                 return ret;
5739                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5740                         ++actions_n;
5741                         break;
5742                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5743                         ret = flow_dv_validate_action_set_vlan_pcp
5744                                                 (action_flags, actions, error);
5745                         if (ret < 0)
5746                                 return ret;
5747                         /* Count PCP with push_vlan command. */
5748                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5749                         break;
5750                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5751                         ret = flow_dv_validate_action_set_vlan_vid
5752                                                 (item_flags, action_flags,
5753                                                  actions, error);
5754                         if (ret < 0)
5755                                 return ret;
5756                         /* Count VID with push_vlan command. */
5757                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5758                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5759                         break;
5760                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5761                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5762                         ret = flow_dv_validate_action_l2_encap(dev,
5763                                                                action_flags,
5764                                                                actions, attr,
5765                                                                error);
5766                         if (ret < 0)
5767                                 return ret;
5768                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5769                         ++actions_n;
5770                         break;
5771                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5772                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5773                         ret = flow_dv_validate_action_decap(dev, action_flags,
5774                                                             attr, error);
5775                         if (ret < 0)
5776                                 return ret;
5777                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5778                         ++actions_n;
5779                         break;
5780                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5781                         ret = flow_dv_validate_action_raw_encap_decap
5782                                 (dev, NULL, actions->conf, attr, &action_flags,
5783                                  &actions_n, error);
5784                         if (ret < 0)
5785                                 return ret;
5786                         break;
5787                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5788                         decap = actions->conf;
5789                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5790                                 ;
5791                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5792                                 encap = NULL;
5793                                 actions--;
5794                         } else {
5795                                 encap = actions->conf;
5796                         }
5797                         ret = flow_dv_validate_action_raw_encap_decap
5798                                            (dev,
5799                                             decap ? decap : &empty_decap, encap,
5800                                             attr, &action_flags, &actions_n,
5801                                             error);
5802                         if (ret < 0)
5803                                 return ret;
5804                         break;
5805                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5806                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5807                         ret = flow_dv_validate_action_modify_mac(action_flags,
5808                                                                  actions,
5809                                                                  item_flags,
5810                                                                  error);
5811                         if (ret < 0)
5812                                 return ret;
5813                         /* Count all modify-header actions as one action. */
5814                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5815                                 ++actions_n;
5816                         action_flags |= actions->type ==
5817                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5818                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5819                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5820                         /*
5821                          * Even if the source and destination MAC addresses have
5822                          * overlap in the header with 4B alignment, the convert
5823                          * function will handle them separately and 4 SW actions
5824                          * will be created. And 2 actions will be added each
5825                          * time no matter how many bytes of address will be set.
5826                          */
5827                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5828                         break;
5829                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5830                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5831                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5832                                                                   actions,
5833                                                                   item_flags,
5834                                                                   error);
5835                         if (ret < 0)
5836                                 return ret;
5837                         /* Count all modify-header actions as one action. */
5838                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5839                                 ++actions_n;
5840                         action_flags |= actions->type ==
5841                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5842                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5843                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5844                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5845                         break;
5846                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5847                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5848                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5849                                                                   actions,
5850                                                                   item_flags,
5851                                                                   error);
5852                         if (ret < 0)
5853                                 return ret;
5854                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5855                                 return rte_flow_error_set(error, ENOTSUP,
5856                                         RTE_FLOW_ERROR_TYPE_ACTION,
5857                                         actions,
5858                                         "Can't change header "
5859                                         "with ICMPv6 proto");
5860                         /* Count all modify-header actions as one action. */
5861                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5862                                 ++actions_n;
5863                         action_flags |= actions->type ==
5864                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5865                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5866                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5867                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5868                         break;
5869                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5870                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5871                         ret = flow_dv_validate_action_modify_tp(action_flags,
5872                                                                 actions,
5873                                                                 item_flags,
5874                                                                 error);
5875                         if (ret < 0)
5876                                 return ret;
5877                         /* Count all modify-header actions as one action. */
5878                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5879                                 ++actions_n;
5880                         action_flags |= actions->type ==
5881                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5882                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5883                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5884                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5885                         break;
5886                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5887                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5888                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5889                                                                  actions,
5890                                                                  item_flags,
5891                                                                  error);
5892                         if (ret < 0)
5893                                 return ret;
5894                         /* Count all modify-header actions as one action. */
5895                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5896                                 ++actions_n;
5897                         action_flags |= actions->type ==
5898                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5899                                                 MLX5_FLOW_ACTION_SET_TTL :
5900                                                 MLX5_FLOW_ACTION_DEC_TTL;
5901                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5902                         break;
5903                 case RTE_FLOW_ACTION_TYPE_JUMP:
5904                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5905                                                            action_flags,
5906                                                            attr, external,
5907                                                            error);
5908                         if (ret)
5909                                 return ret;
5910                         ++actions_n;
5911                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5912                         break;
5913                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5914                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5915                         ret = flow_dv_validate_action_modify_tcp_seq
5916                                                                 (action_flags,
5917                                                                  actions,
5918                                                                  item_flags,
5919                                                                  error);
5920                         if (ret < 0)
5921                                 return ret;
5922                         /* Count all modify-header actions as one action. */
5923                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5924                                 ++actions_n;
5925                         action_flags |= actions->type ==
5926                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5927                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5928                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5929                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5930                         break;
5931                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5932                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5933                         ret = flow_dv_validate_action_modify_tcp_ack
5934                                                                 (action_flags,
5935                                                                  actions,
5936                                                                  item_flags,
5937                                                                  error);
5938                         if (ret < 0)
5939                                 return ret;
5940                         /* Count all modify-header actions as one action. */
5941                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5942                                 ++actions_n;
5943                         action_flags |= actions->type ==
5944                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5945                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5946                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5947                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5948                         break;
5949                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5950                         break;
5951                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5952                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5953                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5954                         break;
5955                 case RTE_FLOW_ACTION_TYPE_METER:
5956                         ret = mlx5_flow_validate_action_meter(dev,
5957                                                               action_flags,
5958                                                               actions, attr,
5959                                                               error);
5960                         if (ret < 0)
5961                                 return ret;
5962                         action_flags |= MLX5_FLOW_ACTION_METER;
5963                         ++actions_n;
5964                         /* Meter action will add one more TAG action. */
5965                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5966                         break;
5967                 case RTE_FLOW_ACTION_TYPE_AGE:
5968                         ret = flow_dv_validate_action_age(action_flags,
5969                                                           actions, dev,
5970                                                           error);
5971                         if (ret < 0)
5972                                 return ret;
5973                         action_flags |= MLX5_FLOW_ACTION_AGE;
5974                         ++actions_n;
5975                         break;
5976                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5977                         ret = flow_dv_validate_action_modify_ipv4_dscp
5978                                                          (action_flags,
5979                                                           actions,
5980                                                           item_flags,
5981                                                           error);
5982                         if (ret < 0)
5983                                 return ret;
5984                         /* Count all modify-header actions as one action. */
5985                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5986                                 ++actions_n;
5987                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5988                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5989                         break;
5990                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5991                         ret = flow_dv_validate_action_modify_ipv6_dscp
5992                                                                 (action_flags,
5993                                                                  actions,
5994                                                                  item_flags,
5995                                                                  error);
5996                         if (ret < 0)
5997                                 return ret;
5998                         /* Count all modify-header actions as one action. */
5999                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6000                                 ++actions_n;
6001                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6002                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6003                         break;
6004                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6005                         ret = flow_dv_validate_action_sample(action_flags,
6006                                                              actions, dev,
6007                                                              attr, error);
6008                         if (ret < 0)
6009                                 return ret;
6010                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6011                         ++actions_n;
6012                         break;
6013                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6014                         if (actions[0].type != (typeof(actions[0].type))
6015                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6016                                 return rte_flow_error_set
6017                                                 (error, EINVAL,
6018                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6019                                                 NULL, "MLX5 private action "
6020                                                 "must be the first");
6021
6022                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6023                         break;
6024                 default:
6025                         return rte_flow_error_set(error, ENOTSUP,
6026                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6027                                                   actions,
6028                                                   "action not supported");
6029                 }
6030         }
6031         /*
6032          * Validate actions in flow rules
6033          * - Explicit decap action is prohibited by the tunnel offload API.
6034          * - Drop action in tunnel steer rule is prohibited by the API.
6035          * - Application cannot use MARK action because it's value can mask
6036          *   tunnel default miss nitification.
6037          * - JUMP in tunnel match rule has no support in current PMD
6038          *   implementation.
6039          * - TAG & META are reserved for future uses.
6040          */
6041         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6042                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6043                                             MLX5_FLOW_ACTION_MARK     |
6044                                             MLX5_FLOW_ACTION_SET_TAG  |
6045                                             MLX5_FLOW_ACTION_SET_META |
6046                                             MLX5_FLOW_ACTION_DROP;
6047
6048                 if (action_flags & bad_actions_mask)
6049                         return rte_flow_error_set
6050                                         (error, EINVAL,
6051                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6052                                         "Invalid RTE action in tunnel "
6053                                         "set decap rule");
6054                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6055                         return rte_flow_error_set
6056                                         (error, EINVAL,
6057                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6058                                         "tunnel set decap rule must terminate "
6059                                         "with JUMP");
6060                 if (!attr->ingress)
6061                         return rte_flow_error_set
6062                                         (error, EINVAL,
6063                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6064                                         "tunnel flows for ingress traffic only");
6065         }
6066         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6067                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6068                                             MLX5_FLOW_ACTION_MARK    |
6069                                             MLX5_FLOW_ACTION_SET_TAG |
6070                                             MLX5_FLOW_ACTION_SET_META;
6071
6072                 if (action_flags & bad_actions_mask)
6073                         return rte_flow_error_set
6074                                         (error, EINVAL,
6075                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6076                                         "Invalid RTE action in tunnel "
6077                                         "set match rule");
6078         }
6079         /*
6080          * Validate the drop action mutual exclusion with other actions.
6081          * Drop action is mutually-exclusive with any other action, except for
6082          * Count action.
6083          */
6084         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6085             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6086                 return rte_flow_error_set(error, EINVAL,
6087                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6088                                           "Drop action is mutually-exclusive "
6089                                           "with any other action, except for "
6090                                           "Count action");
6091         /* Eswitch has few restrictions on using items and actions */
6092         if (attr->transfer) {
6093                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6094                     action_flags & MLX5_FLOW_ACTION_FLAG)
6095                         return rte_flow_error_set(error, ENOTSUP,
6096                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6097                                                   NULL,
6098                                                   "unsupported action FLAG");
6099                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6100                     action_flags & MLX5_FLOW_ACTION_MARK)
6101                         return rte_flow_error_set(error, ENOTSUP,
6102                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6103                                                   NULL,
6104                                                   "unsupported action MARK");
6105                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6106                         return rte_flow_error_set(error, ENOTSUP,
6107                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6108                                                   NULL,
6109                                                   "unsupported action QUEUE");
6110                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6111                         return rte_flow_error_set(error, ENOTSUP,
6112                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6113                                                   NULL,
6114                                                   "unsupported action RSS");
6115                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6116                         return rte_flow_error_set(error, EINVAL,
6117                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6118                                                   actions,
6119                                                   "no fate action is found");
6120         } else {
6121                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6122                         return rte_flow_error_set(error, EINVAL,
6123                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6124                                                   actions,
6125                                                   "no fate action is found");
6126         }
6127         /*
6128          * Continue validation for Xcap and VLAN actions.
6129          * If hairpin is working in explicit TX rule mode, there is no actions
6130          * splitting and the validation of hairpin ingress flow should be the
6131          * same as other standard flows.
6132          */
6133         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6134                              MLX5_FLOW_VLAN_ACTIONS)) &&
6135             (queue_index == 0xFFFF ||
6136              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6137              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6138              conf->tx_explicit != 0))) {
6139                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6140                     MLX5_FLOW_XCAP_ACTIONS)
6141                         return rte_flow_error_set(error, ENOTSUP,
6142                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6143                                                   NULL, "encap and decap "
6144                                                   "combination aren't supported");
6145                 if (!attr->transfer && attr->ingress) {
6146                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6147                                 return rte_flow_error_set
6148                                                 (error, ENOTSUP,
6149                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6150                                                  NULL, "encap is not supported"
6151                                                  " for ingress traffic");
6152                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6153                                 return rte_flow_error_set
6154                                                 (error, ENOTSUP,
6155                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6156                                                  NULL, "push VLAN action not "
6157                                                  "supported for ingress");
6158                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6159                                         MLX5_FLOW_VLAN_ACTIONS)
6160                                 return rte_flow_error_set
6161                                                 (error, ENOTSUP,
6162                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6163                                                  NULL, "no support for "
6164                                                  "multiple VLAN actions");
6165                 }
6166         }
6167         /*
6168          * Hairpin flow will add one more TAG action in TX implicit mode.
6169          * In TX explicit mode, there will be no hairpin flow ID.
6170          */
6171         if (hairpin > 0)
6172                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6173         /* extra metadata enabled: one more TAG action will be add. */
6174         if (dev_conf->dv_flow_en &&
6175             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6176             mlx5_flow_ext_mreg_supported(dev))
6177                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6178         if ((uint32_t)rw_act_num >
6179                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6180                 return rte_flow_error_set(error, ENOTSUP,
6181                                           RTE_FLOW_ERROR_TYPE_ACTION,
6182                                           NULL, "too many header modify"
6183                                           " actions to support");
6184         }
6185         return 0;
6186 }
6187
6188 /**
6189  * Internal preparation function. Allocates the DV flow size,
6190  * this size is constant.
6191  *
6192  * @param[in] dev
6193  *   Pointer to the rte_eth_dev structure.
6194  * @param[in] attr
6195  *   Pointer to the flow attributes.
6196  * @param[in] items
6197  *   Pointer to the list of items.
6198  * @param[in] actions
6199  *   Pointer to the list of actions.
6200  * @param[out] error
6201  *   Pointer to the error structure.
6202  *
6203  * @return
6204  *   Pointer to mlx5_flow object on success,
6205  *   otherwise NULL and rte_errno is set.
6206  */
6207 static struct mlx5_flow *
6208 flow_dv_prepare(struct rte_eth_dev *dev,
6209                 const struct rte_flow_attr *attr __rte_unused,
6210                 const struct rte_flow_item items[] __rte_unused,
6211                 const struct rte_flow_action actions[] __rte_unused,
6212                 struct rte_flow_error *error)
6213 {
6214         uint32_t handle_idx = 0;
6215         struct mlx5_flow *dev_flow;
6216         struct mlx5_flow_handle *dev_handle;
6217         struct mlx5_priv *priv = dev->data->dev_private;
6218         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6219
6220         MLX5_ASSERT(wks);
6221         /* In case of corrupting the memory. */
6222         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6223                 rte_flow_error_set(error, ENOSPC,
6224                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6225                                    "not free temporary device flow");
6226                 return NULL;
6227         }
6228         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6229                                    &handle_idx);
6230         if (!dev_handle) {
6231                 rte_flow_error_set(error, ENOMEM,
6232                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6233                                    "not enough memory to create flow handle");
6234                 return NULL;
6235         }
6236         MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6237         dev_flow = &wks->flows[wks->flow_idx++];
6238         dev_flow->handle = dev_handle;
6239         dev_flow->handle_idx = handle_idx;
6240         /*
6241          * In some old rdma-core releases, before continuing, a check of the
6242          * length of matching parameter will be done at first. It needs to use
6243          * the length without misc4 param. If the flow has misc4 support, then
6244          * the length needs to be adjusted accordingly. Each param member is
6245          * aligned with a 64B boundary naturally.
6246          */
6247         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6248                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6249         /*
6250          * The matching value needs to be cleared to 0 before using. In the
6251          * past, it will be automatically cleared when using rte_*alloc
6252          * API. The time consumption will be almost the same as before.
6253          */
6254         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6255         dev_flow->ingress = attr->ingress;
6256         dev_flow->dv.transfer = attr->transfer;
6257         return dev_flow;
6258 }
6259
6260 #ifdef RTE_LIBRTE_MLX5_DEBUG
6261 /**
6262  * Sanity check for match mask and value. Similar to check_valid_spec() in
6263  * kernel driver. If unmasked bit is present in value, it returns failure.
6264  *
6265  * @param match_mask
6266  *   pointer to match mask buffer.
6267  * @param match_value
6268  *   pointer to match value buffer.
6269  *
6270  * @return
6271  *   0 if valid, -EINVAL otherwise.
6272  */
6273 static int
6274 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6275 {
6276         uint8_t *m = match_mask;
6277         uint8_t *v = match_value;
6278         unsigned int i;
6279
6280         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6281                 if (v[i] & ~m[i]) {
6282                         DRV_LOG(ERR,
6283                                 "match_value differs from match_criteria"
6284                                 " %p[%u] != %p[%u]",
6285                                 match_value, i, match_mask, i);
6286                         return -EINVAL;
6287                 }
6288         }
6289         return 0;
6290 }
6291 #endif
6292
6293 /**
6294  * Add match of ip_version.
6295  *
6296  * @param[in] group
6297  *   Flow group.
6298  * @param[in] headers_v
6299  *   Values header pointer.
6300  * @param[in] headers_m
6301  *   Masks header pointer.
6302  * @param[in] ip_version
6303  *   The IP version to set.
6304  */
6305 static inline void
6306 flow_dv_set_match_ip_version(uint32_t group,
6307                              void *headers_v,
6308                              void *headers_m,
6309                              uint8_t ip_version)
6310 {
6311         if (group == 0)
6312                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6313         else
6314                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6315                          ip_version);
6316         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6317         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6318         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6319 }
6320
6321 /**
6322  * Add Ethernet item to matcher and to the value.
6323  *
6324  * @param[in, out] matcher
6325  *   Flow matcher.
6326  * @param[in, out] key
6327  *   Flow matcher value.
6328  * @param[in] item
6329  *   Flow pattern to translate.
6330  * @param[in] inner
6331  *   Item is inner pattern.
6332  */
6333 static void
6334 flow_dv_translate_item_eth(void *matcher, void *key,
6335                            const struct rte_flow_item *item, int inner,
6336                            uint32_t group)
6337 {
6338         const struct rte_flow_item_eth *eth_m = item->mask;
6339         const struct rte_flow_item_eth *eth_v = item->spec;
6340         const struct rte_flow_item_eth nic_mask = {
6341                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6342                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6343                 .type = RTE_BE16(0xffff),
6344                 .has_vlan = 0,
6345         };
6346         void *hdrs_m;
6347         void *hdrs_v;
6348         char *l24_v;
6349         unsigned int i;
6350
6351         if (!eth_v)
6352                 return;
6353         if (!eth_m)
6354                 eth_m = &nic_mask;
6355         if (inner) {
6356                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6357                                          inner_headers);
6358                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6359         } else {
6360                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6361                                          outer_headers);
6362                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6363         }
6364         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6365                &eth_m->dst, sizeof(eth_m->dst));
6366         /* The value must be in the range of the mask. */
6367         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6368         for (i = 0; i < sizeof(eth_m->dst); ++i)
6369                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6370         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6371                &eth_m->src, sizeof(eth_m->src));
6372         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6373         /* The value must be in the range of the mask. */
6374         for (i = 0; i < sizeof(eth_m->dst); ++i)
6375                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6376         /*
6377          * HW supports match on one Ethertype, the Ethertype following the last
6378          * VLAN tag of the packet (see PRM).
6379          * Set match on ethertype only if ETH header is not followed by VLAN.
6380          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6381          * ethertype, and use ip_version field instead.
6382          * eCPRI over Ether layer will use type value 0xAEFE.
6383          */
6384         if (eth_m->type == 0xFFFF) {
6385                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6386                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6387                 switch (eth_v->type) {
6388                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6389                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6390                         return;
6391                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6392                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6393                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6394                         return;
6395                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6396                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6397                         return;
6398                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6399                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6400                         return;
6401                 default:
6402                         break;
6403                 }
6404         }
6405         if (eth_m->has_vlan) {
6406                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6407                 if (eth_v->has_vlan) {
6408                         /*
6409                          * Here, when also has_more_vlan field in VLAN item is
6410                          * not set, only single-tagged packets will be matched.
6411                          */
6412                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6413                         return;
6414                 }
6415         }
6416         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6417                  rte_be_to_cpu_16(eth_m->type));
6418         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6419         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6420 }
6421
6422 /**
6423  * Add VLAN item to matcher and to the value.
6424  *
6425  * @param[in, out] dev_flow
6426  *   Flow descriptor.
6427  * @param[in, out] matcher
6428  *   Flow matcher.
6429  * @param[in, out] key
6430  *   Flow matcher value.
6431  * @param[in] item
6432  *   Flow pattern to translate.
6433  * @param[in] inner
6434  *   Item is inner pattern.
6435  */
6436 static void
6437 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6438                             void *matcher, void *key,
6439                             const struct rte_flow_item *item,
6440                             int inner, uint32_t group)
6441 {
6442         const struct rte_flow_item_vlan *vlan_m = item->mask;
6443         const struct rte_flow_item_vlan *vlan_v = item->spec;
6444         void *hdrs_m;
6445         void *hdrs_v;
6446         uint16_t tci_m;
6447         uint16_t tci_v;
6448
6449         if (inner) {
6450                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6451                                          inner_headers);
6452                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6453         } else {
6454                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6455                                          outer_headers);
6456                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6457                 /*
6458                  * This is workaround, masks are not supported,
6459                  * and pre-validated.
6460                  */
6461                 if (vlan_v)
6462                         dev_flow->handle->vf_vlan.tag =
6463                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6464         }
6465         /*
6466          * When VLAN item exists in flow, mark packet as tagged,
6467          * even if TCI is not specified.
6468          */
6469         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6470                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6471                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6472         }
6473         if (!vlan_v)
6474                 return;
6475         if (!vlan_m)
6476                 vlan_m = &rte_flow_item_vlan_mask;
6477         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6478         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6479         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6480         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6481         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6482         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6483         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6484         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6485         /*
6486          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6487          * ethertype, and use ip_version field instead.
6488          */
6489         if (vlan_m->inner_type == 0xFFFF) {
6490                 switch (vlan_v->inner_type) {
6491                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6492                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6493                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6494                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6495                         return;
6496                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6497                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6498                         return;
6499                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6500                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6501                         return;
6502                 default:
6503                         break;
6504                 }
6505         }
6506         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6507                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6508                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6509                 /* Only one vlan_tag bit can be set. */
6510                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6511                 return;
6512         }
6513         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6514                  rte_be_to_cpu_16(vlan_m->inner_type));
6515         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6516                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6517 }
6518
6519 /**
6520  * Add IPV4 item to matcher and to the value.
6521  *
6522  * @param[in, out] matcher
6523  *   Flow matcher.
6524  * @param[in, out] key
6525  *   Flow matcher value.
6526  * @param[in] item
6527  *   Flow pattern to translate.
6528  * @param[in] inner
6529  *   Item is inner pattern.
6530  * @param[in] group
6531  *   The group to insert the rule.
6532  */
6533 static void
6534 flow_dv_translate_item_ipv4(void *matcher, void *key,
6535                             const struct rte_flow_item *item,
6536                             int inner, uint32_t group)
6537 {
6538         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6539         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6540         const struct rte_flow_item_ipv4 nic_mask = {
6541                 .hdr = {
6542                         .src_addr = RTE_BE32(0xffffffff),
6543                         .dst_addr = RTE_BE32(0xffffffff),
6544                         .type_of_service = 0xff,
6545                         .next_proto_id = 0xff,
6546                         .time_to_live = 0xff,
6547                 },
6548         };
6549         void *headers_m;
6550         void *headers_v;
6551         char *l24_m;
6552         char *l24_v;
6553         uint8_t tos;
6554
6555         if (inner) {
6556                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6557                                          inner_headers);
6558                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6559         } else {
6560                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6561                                          outer_headers);
6562                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6563         }
6564         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6565         if (!ipv4_v)
6566                 return;
6567         if (!ipv4_m)
6568                 ipv4_m = &nic_mask;
6569         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6570                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6571         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6572                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6573         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6574         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6575         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6576                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6577         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6578                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6579         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6580         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6581         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6582         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6583                  ipv4_m->hdr.type_of_service);
6584         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6585         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6586                  ipv4_m->hdr.type_of_service >> 2);
6587         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6588         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6589                  ipv4_m->hdr.next_proto_id);
6590         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6591                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6592         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6593                  ipv4_m->hdr.time_to_live);
6594         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6595                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6596         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6597                  !!(ipv4_m->hdr.fragment_offset));
6598         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6599                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6600 }
6601
6602 /**
6603  * Add IPV6 item to matcher and to the value.
6604  *
6605  * @param[in, out] matcher
6606  *   Flow matcher.
6607  * @param[in, out] key
6608  *   Flow matcher value.
6609  * @param[in] item
6610  *   Flow pattern to translate.
6611  * @param[in] inner
6612  *   Item is inner pattern.
6613  * @param[in] group
6614  *   The group to insert the rule.
6615  */
6616 static void
6617 flow_dv_translate_item_ipv6(void *matcher, void *key,
6618                             const struct rte_flow_item *item,
6619                             int inner, uint32_t group)
6620 {
6621         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6622         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6623         const struct rte_flow_item_ipv6 nic_mask = {
6624                 .hdr = {
6625                         .src_addr =
6626                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6627                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6628                         .dst_addr =
6629                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6630                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6631                         .vtc_flow = RTE_BE32(0xffffffff),
6632                         .proto = 0xff,
6633                         .hop_limits = 0xff,
6634                 },
6635         };
6636         void *headers_m;
6637         void *headers_v;
6638         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6639         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6640         char *l24_m;
6641         char *l24_v;
6642         uint32_t vtc_m;
6643         uint32_t vtc_v;
6644         int i;
6645         int size;
6646
6647         if (inner) {
6648                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6649                                          inner_headers);
6650                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6651         } else {
6652                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6653                                          outer_headers);
6654                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6655         }
6656         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6657         if (!ipv6_v)
6658                 return;
6659         if (!ipv6_m)
6660                 ipv6_m = &nic_mask;
6661         size = sizeof(ipv6_m->hdr.dst_addr);
6662         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6663                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6664         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6665                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6666         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6667         for (i = 0; i < size; ++i)
6668                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6669         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6670                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6671         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6672                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6673         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6674         for (i = 0; i < size; ++i)
6675                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6676         /* TOS. */
6677         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6678         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6679         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6680         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6681         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6682         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6683         /* Label. */
6684         if (inner) {
6685                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6686                          vtc_m);
6687                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6688                          vtc_v);
6689         } else {
6690                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6691                          vtc_m);
6692                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6693                          vtc_v);
6694         }
6695         /* Protocol. */
6696         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6697                  ipv6_m->hdr.proto);
6698         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6699                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6700         /* Hop limit. */
6701         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6702                  ipv6_m->hdr.hop_limits);
6703         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6704                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6705         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6706                  !!(ipv6_m->has_frag_ext));
6707         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6708                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6709 }
6710
6711 /**
6712  * Add IPV6 fragment extension item to matcher and to the value.
6713  *
6714  * @param[in, out] matcher
6715  *   Flow matcher.
6716  * @param[in, out] key
6717  *   Flow matcher value.
6718  * @param[in] item
6719  *   Flow pattern to translate.
6720  * @param[in] inner
6721  *   Item is inner pattern.
6722  */
6723 static void
6724 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6725                                      const struct rte_flow_item *item,
6726                                      int inner)
6727 {
6728         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6729         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6730         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6731                 .hdr = {
6732                         .next_header = 0xff,
6733                         .frag_data = RTE_BE16(0xffff),
6734                 },
6735         };
6736         void *headers_m;
6737         void *headers_v;
6738
6739         if (inner) {
6740                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6741                                          inner_headers);
6742                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6743         } else {
6744                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6745                                          outer_headers);
6746                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6747         }
6748         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6749         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6750         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6751         if (!ipv6_frag_ext_v)
6752                 return;
6753         if (!ipv6_frag_ext_m)
6754                 ipv6_frag_ext_m = &nic_mask;
6755         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6756                  ipv6_frag_ext_m->hdr.next_header);
6757         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6758                  ipv6_frag_ext_v->hdr.next_header &
6759                  ipv6_frag_ext_m->hdr.next_header);
6760 }
6761
6762 /**
6763  * Add TCP item to matcher and to the value.
6764  *
6765  * @param[in, out] matcher
6766  *   Flow matcher.
6767  * @param[in, out] key
6768  *   Flow matcher value.
6769  * @param[in] item
6770  *   Flow pattern to translate.
6771  * @param[in] inner
6772  *   Item is inner pattern.
6773  */
6774 static void
6775 flow_dv_translate_item_tcp(void *matcher, void *key,
6776                            const struct rte_flow_item *item,
6777                            int inner)
6778 {
6779         const struct rte_flow_item_tcp *tcp_m = item->mask;
6780         const struct rte_flow_item_tcp *tcp_v = item->spec;
6781         void *headers_m;
6782         void *headers_v;
6783
6784         if (inner) {
6785                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6786                                          inner_headers);
6787                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6788         } else {
6789                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6790                                          outer_headers);
6791                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6792         }
6793         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6794         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6795         if (!tcp_v)
6796                 return;
6797         if (!tcp_m)
6798                 tcp_m = &rte_flow_item_tcp_mask;
6799         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6800                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6801         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6802                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6803         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6804                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6805         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6806                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6807         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6808                  tcp_m->hdr.tcp_flags);
6809         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6810                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6811 }
6812
6813 /**
6814  * Add UDP item to matcher and to the value.
6815  *
6816  * @param[in, out] matcher
6817  *   Flow matcher.
6818  * @param[in, out] key
6819  *   Flow matcher value.
6820  * @param[in] item
6821  *   Flow pattern to translate.
6822  * @param[in] inner
6823  *   Item is inner pattern.
6824  */
6825 static void
6826 flow_dv_translate_item_udp(void *matcher, void *key,
6827                            const struct rte_flow_item *item,
6828                            int inner)
6829 {
6830         const struct rte_flow_item_udp *udp_m = item->mask;
6831         const struct rte_flow_item_udp *udp_v = item->spec;
6832         void *headers_m;
6833         void *headers_v;
6834
6835         if (inner) {
6836                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6837                                          inner_headers);
6838                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6839         } else {
6840                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6841                                          outer_headers);
6842                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6843         }
6844         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6845         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6846         if (!udp_v)
6847                 return;
6848         if (!udp_m)
6849                 udp_m = &rte_flow_item_udp_mask;
6850         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6851                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6852         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6853                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6854         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6855                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6856         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6857                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6858 }
6859
6860 /**
6861  * Add GRE optional Key item to matcher and to the value.
6862  *
6863  * @param[in, out] matcher
6864  *   Flow matcher.
6865  * @param[in, out] key
6866  *   Flow matcher value.
6867  * @param[in] item
6868  *   Flow pattern to translate.
6869  * @param[in] inner
6870  *   Item is inner pattern.
6871  */
6872 static void
6873 flow_dv_translate_item_gre_key(void *matcher, void *key,
6874                                    const struct rte_flow_item *item)
6875 {
6876         const rte_be32_t *key_m = item->mask;
6877         const rte_be32_t *key_v = item->spec;
6878         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6879         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6880         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6881
6882         /* GRE K bit must be on and should already be validated */
6883         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6884         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6885         if (!key_v)
6886                 return;
6887         if (!key_m)
6888                 key_m = &gre_key_default_mask;
6889         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6890                  rte_be_to_cpu_32(*key_m) >> 8);
6891         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6892                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6893         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6894                  rte_be_to_cpu_32(*key_m) & 0xFF);
6895         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6896                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6897 }
6898
6899 /**
6900  * Add GRE item to matcher and to the value.
6901  *
6902  * @param[in, out] matcher
6903  *   Flow matcher.
6904  * @param[in, out] key
6905  *   Flow matcher value.
6906  * @param[in] item
6907  *   Flow pattern to translate.
6908  * @param[in] inner
6909  *   Item is inner pattern.
6910  */
6911 static void
6912 flow_dv_translate_item_gre(void *matcher, void *key,
6913                            const struct rte_flow_item *item,
6914                            int inner)
6915 {
6916         const struct rte_flow_item_gre *gre_m = item->mask;
6917         const struct rte_flow_item_gre *gre_v = item->spec;
6918         void *headers_m;
6919         void *headers_v;
6920         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6921         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6922         struct {
6923                 union {
6924                         __extension__
6925                         struct {
6926                                 uint16_t version:3;
6927                                 uint16_t rsvd0:9;
6928                                 uint16_t s_present:1;
6929                                 uint16_t k_present:1;
6930                                 uint16_t rsvd_bit1:1;
6931                                 uint16_t c_present:1;
6932                         };
6933                         uint16_t value;
6934                 };
6935         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6936
6937         if (inner) {
6938                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6939                                          inner_headers);
6940                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6941         } else {
6942                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6943                                          outer_headers);
6944                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6945         }
6946         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6947         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6948         if (!gre_v)
6949                 return;
6950         if (!gre_m)
6951                 gre_m = &rte_flow_item_gre_mask;
6952         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6953                  rte_be_to_cpu_16(gre_m->protocol));
6954         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6955                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6956         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6957         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6958         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6959                  gre_crks_rsvd0_ver_m.c_present);
6960         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6961                  gre_crks_rsvd0_ver_v.c_present &
6962                  gre_crks_rsvd0_ver_m.c_present);
6963         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6964                  gre_crks_rsvd0_ver_m.k_present);
6965         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6966                  gre_crks_rsvd0_ver_v.k_present &
6967                  gre_crks_rsvd0_ver_m.k_present);
6968         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6969                  gre_crks_rsvd0_ver_m.s_present);
6970         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6971                  gre_crks_rsvd0_ver_v.s_present &
6972                  gre_crks_rsvd0_ver_m.s_present);
6973 }
6974
6975 /**
6976  * Add NVGRE item to matcher and to the value.
6977  *
6978  * @param[in, out] matcher
6979  *   Flow matcher.
6980  * @param[in, out] key
6981  *   Flow matcher value.
6982  * @param[in] item
6983  *   Flow pattern to translate.
6984  * @param[in] inner
6985  *   Item is inner pattern.
6986  */
6987 static void
6988 flow_dv_translate_item_nvgre(void *matcher, void *key,
6989                              const struct rte_flow_item *item,
6990                              int inner)
6991 {
6992         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6993         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6994         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6995         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6996         const char *tni_flow_id_m;
6997         const char *tni_flow_id_v;
6998         char *gre_key_m;
6999         char *gre_key_v;
7000         int size;
7001         int i;
7002
7003         /* For NVGRE, GRE header fields must be set with defined values. */
7004         const struct rte_flow_item_gre gre_spec = {
7005                 .c_rsvd0_ver = RTE_BE16(0x2000),
7006                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7007         };
7008         const struct rte_flow_item_gre gre_mask = {
7009                 .c_rsvd0_ver = RTE_BE16(0xB000),
7010                 .protocol = RTE_BE16(UINT16_MAX),
7011         };
7012         const struct rte_flow_item gre_item = {
7013                 .spec = &gre_spec,
7014                 .mask = &gre_mask,
7015                 .last = NULL,
7016         };
7017         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7018         if (!nvgre_v)
7019                 return;
7020         if (!nvgre_m)
7021                 nvgre_m = &rte_flow_item_nvgre_mask;
7022         tni_flow_id_m = (const char *)nvgre_m->tni;
7023         tni_flow_id_v = (const char *)nvgre_v->tni;
7024         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7025         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7026         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7027         memcpy(gre_key_m, tni_flow_id_m, size);
7028         for (i = 0; i < size; ++i)
7029                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7030 }
7031
7032 /**
7033  * Add VXLAN item to matcher and to the value.
7034  *
7035  * @param[in, out] matcher
7036  *   Flow matcher.
7037  * @param[in, out] key
7038  *   Flow matcher value.
7039  * @param[in] item
7040  *   Flow pattern to translate.
7041  * @param[in] inner
7042  *   Item is inner pattern.
7043  */
7044 static void
7045 flow_dv_translate_item_vxlan(void *matcher, void *key,
7046                              const struct rte_flow_item *item,
7047                              int inner)
7048 {
7049         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7050         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7051         void *headers_m;
7052         void *headers_v;
7053         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7054         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7055         char *vni_m;
7056         char *vni_v;
7057         uint16_t dport;
7058         int size;
7059         int i;
7060
7061         if (inner) {
7062                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7063                                          inner_headers);
7064                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7065         } else {
7066                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7067                                          outer_headers);
7068                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7069         }
7070         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7071                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7072         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7073                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7074                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7075         }
7076         if (!vxlan_v)
7077                 return;
7078         if (!vxlan_m)
7079                 vxlan_m = &rte_flow_item_vxlan_mask;
7080         size = sizeof(vxlan_m->vni);
7081         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7082         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7083         memcpy(vni_m, vxlan_m->vni, size);
7084         for (i = 0; i < size; ++i)
7085                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7086 }
7087
7088 /**
7089  * Add VXLAN-GPE item to matcher and to the value.
7090  *
7091  * @param[in, out] matcher
7092  *   Flow matcher.
7093  * @param[in, out] key
7094  *   Flow matcher value.
7095  * @param[in] item
7096  *   Flow pattern to translate.
7097  * @param[in] inner
7098  *   Item is inner pattern.
7099  */
7100
7101 static void
7102 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7103                                  const struct rte_flow_item *item, int inner)
7104 {
7105         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7106         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7107         void *headers_m;
7108         void *headers_v;
7109         void *misc_m =
7110                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7111         void *misc_v =
7112                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7113         char *vni_m;
7114         char *vni_v;
7115         uint16_t dport;
7116         int size;
7117         int i;
7118         uint8_t flags_m = 0xff;
7119         uint8_t flags_v = 0xc;
7120
7121         if (inner) {
7122                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7123                                          inner_headers);
7124                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7125         } else {
7126                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7127                                          outer_headers);
7128                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7129         }
7130         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7131                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7132         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7133                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7134                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7135         }
7136         if (!vxlan_v)
7137                 return;
7138         if (!vxlan_m)
7139                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7140         size = sizeof(vxlan_m->vni);
7141         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7142         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7143         memcpy(vni_m, vxlan_m->vni, size);
7144         for (i = 0; i < size; ++i)
7145                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7146         if (vxlan_m->flags) {
7147                 flags_m = vxlan_m->flags;
7148                 flags_v = vxlan_v->flags;
7149         }
7150         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7151         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7152         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7153                  vxlan_m->protocol);
7154         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7155                  vxlan_v->protocol);
7156 }
7157
7158 /**
7159  * Add Geneve item to matcher and to the value.
7160  *
7161  * @param[in, out] matcher
7162  *   Flow matcher.
7163  * @param[in, out] key
7164  *   Flow matcher value.
7165  * @param[in] item
7166  *   Flow pattern to translate.
7167  * @param[in] inner
7168  *   Item is inner pattern.
7169  */
7170
7171 static void
7172 flow_dv_translate_item_geneve(void *matcher, void *key,
7173                               const struct rte_flow_item *item, int inner)
7174 {
7175         const struct rte_flow_item_geneve *geneve_m = item->mask;
7176         const struct rte_flow_item_geneve *geneve_v = item->spec;
7177         void *headers_m;
7178         void *headers_v;
7179         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7180         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7181         uint16_t dport;
7182         uint16_t gbhdr_m;
7183         uint16_t gbhdr_v;
7184         char *vni_m;
7185         char *vni_v;
7186         size_t size, i;
7187
7188         if (inner) {
7189                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7190                                          inner_headers);
7191                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7192         } else {
7193                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7194                                          outer_headers);
7195                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7196         }
7197         dport = MLX5_UDP_PORT_GENEVE;
7198         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7199                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7200                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7201         }
7202         if (!geneve_v)
7203                 return;
7204         if (!geneve_m)
7205                 geneve_m = &rte_flow_item_geneve_mask;
7206         size = sizeof(geneve_m->vni);
7207         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7208         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7209         memcpy(vni_m, geneve_m->vni, size);
7210         for (i = 0; i < size; ++i)
7211                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7212         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7213                  rte_be_to_cpu_16(geneve_m->protocol));
7214         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7215                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7216         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7217         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7218         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7219                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7220         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7221                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7222         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7223                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7224         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7225                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7226                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7227 }
7228
7229 /**
7230  * Add MPLS item to matcher and to the value.
7231  *
7232  * @param[in, out] matcher
7233  *   Flow matcher.
7234  * @param[in, out] key
7235  *   Flow matcher value.
7236  * @param[in] item
7237  *   Flow pattern to translate.
7238  * @param[in] prev_layer
7239  *   The protocol layer indicated in previous item.
7240  * @param[in] inner
7241  *   Item is inner pattern.
7242  */
7243 static void
7244 flow_dv_translate_item_mpls(void *matcher, void *key,
7245                             const struct rte_flow_item *item,
7246                             uint64_t prev_layer,
7247                             int inner)
7248 {
7249         const uint32_t *in_mpls_m = item->mask;
7250         const uint32_t *in_mpls_v = item->spec;
7251         uint32_t *out_mpls_m = 0;
7252         uint32_t *out_mpls_v = 0;
7253         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7254         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7255         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7256                                      misc_parameters_2);
7257         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7258         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7259         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7260
7261         switch (prev_layer) {
7262         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7263                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7264                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7265                          MLX5_UDP_PORT_MPLS);
7266                 break;
7267         case MLX5_FLOW_LAYER_GRE:
7268                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7269                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7270                          RTE_ETHER_TYPE_MPLS);
7271                 break;
7272         default:
7273                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7274                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7275                          IPPROTO_MPLS);
7276                 break;
7277         }
7278         if (!in_mpls_v)
7279                 return;
7280         if (!in_mpls_m)
7281                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7282         switch (prev_layer) {
7283         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7284                 out_mpls_m =
7285                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7286                                                  outer_first_mpls_over_udp);
7287                 out_mpls_v =
7288                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7289                                                  outer_first_mpls_over_udp);
7290                 break;
7291         case MLX5_FLOW_LAYER_GRE:
7292                 out_mpls_m =
7293                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7294                                                  outer_first_mpls_over_gre);
7295                 out_mpls_v =
7296                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7297                                                  outer_first_mpls_over_gre);
7298                 break;
7299         default:
7300                 /* Inner MPLS not over GRE is not supported. */
7301                 if (!inner) {
7302                         out_mpls_m =
7303                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7304                                                          misc2_m,
7305                                                          outer_first_mpls);
7306                         out_mpls_v =
7307                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7308                                                          misc2_v,
7309                                                          outer_first_mpls);
7310                 }
7311                 break;
7312         }
7313         if (out_mpls_m && out_mpls_v) {
7314                 *out_mpls_m = *in_mpls_m;
7315                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7316         }
7317 }
7318
7319 /**
7320  * Add metadata register item to matcher
7321  *
7322  * @param[in, out] matcher
7323  *   Flow matcher.
7324  * @param[in, out] key
7325  *   Flow matcher value.
7326  * @param[in] reg_type
7327  *   Type of device metadata register
7328  * @param[in] value
7329  *   Register value
7330  * @param[in] mask
7331  *   Register mask
7332  */
7333 static void
7334 flow_dv_match_meta_reg(void *matcher, void *key,
7335                        enum modify_reg reg_type,
7336                        uint32_t data, uint32_t mask)
7337 {
7338         void *misc2_m =
7339                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7340         void *misc2_v =
7341                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7342         uint32_t temp;
7343
7344         data &= mask;
7345         switch (reg_type) {
7346         case REG_A:
7347                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7348                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7349                 break;
7350         case REG_B:
7351                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7352                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7353                 break;
7354         case REG_C_0:
7355                 /*
7356                  * The metadata register C0 field might be divided into
7357                  * source vport index and META item value, we should set
7358                  * this field according to specified mask, not as whole one.
7359                  */
7360                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7361                 temp |= mask;
7362                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7363                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7364                 temp &= ~mask;
7365                 temp |= data;
7366                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7367                 break;
7368         case REG_C_1:
7369                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7370                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7371                 break;
7372         case REG_C_2:
7373                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7374                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7375                 break;
7376         case REG_C_3:
7377                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7378                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7379                 break;
7380         case REG_C_4:
7381                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7382                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7383                 break;
7384         case REG_C_5:
7385                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7386                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7387                 break;
7388         case REG_C_6:
7389                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7390                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7391                 break;
7392         case REG_C_7:
7393                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7394                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7395                 break;
7396         default:
7397                 MLX5_ASSERT(false);
7398                 break;
7399         }
7400 }
7401
7402 /**
7403  * Add MARK item to matcher
7404  *
7405  * @param[in] dev
7406  *   The device to configure through.
7407  * @param[in, out] matcher
7408  *   Flow matcher.
7409  * @param[in, out] key
7410  *   Flow matcher value.
7411  * @param[in] item
7412  *   Flow pattern to translate.
7413  */
7414 static void
7415 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7416                             void *matcher, void *key,
7417                             const struct rte_flow_item *item)
7418 {
7419         struct mlx5_priv *priv = dev->data->dev_private;
7420         const struct rte_flow_item_mark *mark;
7421         uint32_t value;
7422         uint32_t mask;
7423
7424         mark = item->mask ? (const void *)item->mask :
7425                             &rte_flow_item_mark_mask;
7426         mask = mark->id & priv->sh->dv_mark_mask;
7427         mark = (const void *)item->spec;
7428         MLX5_ASSERT(mark);
7429         value = mark->id & priv->sh->dv_mark_mask & mask;
7430         if (mask) {
7431                 enum modify_reg reg;
7432
7433                 /* Get the metadata register index for the mark. */
7434                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7435                 MLX5_ASSERT(reg > 0);
7436                 if (reg == REG_C_0) {
7437                         struct mlx5_priv *priv = dev->data->dev_private;
7438                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7439                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7440
7441                         mask &= msk_c0;
7442                         mask <<= shl_c0;
7443                         value <<= shl_c0;
7444                 }
7445                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7446         }
7447 }
7448
7449 /**
7450  * Add META item to matcher
7451  *
7452  * @param[in] dev
7453  *   The devich to configure through.
7454  * @param[in, out] matcher
7455  *   Flow matcher.
7456  * @param[in, out] key
7457  *   Flow matcher value.
7458  * @param[in] attr
7459  *   Attributes of flow that includes this item.
7460  * @param[in] item
7461  *   Flow pattern to translate.
7462  */
7463 static void
7464 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7465                             void *matcher, void *key,
7466                             const struct rte_flow_attr *attr,
7467                             const struct rte_flow_item *item)
7468 {
7469         const struct rte_flow_item_meta *meta_m;
7470         const struct rte_flow_item_meta *meta_v;
7471
7472         meta_m = (const void *)item->mask;
7473         if (!meta_m)
7474                 meta_m = &rte_flow_item_meta_mask;
7475         meta_v = (const void *)item->spec;
7476         if (meta_v) {
7477                 int reg;
7478                 uint32_t value = meta_v->data;
7479                 uint32_t mask = meta_m->data;
7480
7481                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7482                 if (reg < 0)
7483                         return;
7484                 /*
7485                  * In datapath code there is no endianness
7486                  * coversions for perfromance reasons, all
7487                  * pattern conversions are done in rte_flow.
7488                  */
7489                 value = rte_cpu_to_be_32(value);
7490                 mask = rte_cpu_to_be_32(mask);
7491                 if (reg == REG_C_0) {
7492                         struct mlx5_priv *priv = dev->data->dev_private;
7493                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7494                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7495 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7496                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7497
7498                         value >>= shr_c0;
7499                         mask >>= shr_c0;
7500 #endif
7501                         value <<= shl_c0;
7502                         mask <<= shl_c0;
7503                         MLX5_ASSERT(msk_c0);
7504                         MLX5_ASSERT(!(~msk_c0 & mask));
7505                 }
7506                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7507         }
7508 }
7509
7510 /**
7511  * Add vport metadata Reg C0 item to matcher
7512  *
7513  * @param[in, out] matcher
7514  *   Flow matcher.
7515  * @param[in, out] key
7516  *   Flow matcher value.
7517  * @param[in] reg
7518  *   Flow pattern to translate.
7519  */
7520 static void
7521 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7522                                   uint32_t value, uint32_t mask)
7523 {
7524         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7525 }
7526
7527 /**
7528  * Add tag item to matcher
7529  *
7530  * @param[in] dev
7531  *   The devich to configure through.
7532  * @param[in, out] matcher
7533  *   Flow matcher.
7534  * @param[in, out] key
7535  *   Flow matcher value.
7536  * @param[in] item
7537  *   Flow pattern to translate.
7538  */
7539 static void
7540 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7541                                 void *matcher, void *key,
7542                                 const struct rte_flow_item *item)
7543 {
7544         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7545         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7546         uint32_t mask, value;
7547
7548         MLX5_ASSERT(tag_v);
7549         value = tag_v->data;
7550         mask = tag_m ? tag_m->data : UINT32_MAX;
7551         if (tag_v->id == REG_C_0) {
7552                 struct mlx5_priv *priv = dev->data->dev_private;
7553                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7554                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7555
7556                 mask &= msk_c0;
7557                 mask <<= shl_c0;
7558                 value <<= shl_c0;
7559         }
7560         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7561 }
7562
7563 /**
7564  * Add TAG item to matcher
7565  *
7566  * @param[in] dev
7567  *   The devich to configure through.
7568  * @param[in, out] matcher
7569  *   Flow matcher.
7570  * @param[in, out] key
7571  *   Flow matcher value.
7572  * @param[in] item
7573  *   Flow pattern to translate.
7574  */
7575 static void
7576 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7577                            void *matcher, void *key,
7578                            const struct rte_flow_item *item)
7579 {
7580         const struct rte_flow_item_tag *tag_v = item->spec;
7581         const struct rte_flow_item_tag *tag_m = item->mask;
7582         enum modify_reg reg;
7583
7584         MLX5_ASSERT(tag_v);
7585         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7586         /* Get the metadata register index for the tag. */
7587         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7588         MLX5_ASSERT(reg > 0);
7589         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7590 }
7591
7592 /**
7593  * Add source vport match to the specified matcher.
7594  *
7595  * @param[in, out] matcher
7596  *   Flow matcher.
7597  * @param[in, out] key
7598  *   Flow matcher value.
7599  * @param[in] port
7600  *   Source vport value to match
7601  * @param[in] mask
7602  *   Mask
7603  */
7604 static void
7605 flow_dv_translate_item_source_vport(void *matcher, void *key,
7606                                     int16_t port, uint16_t mask)
7607 {
7608         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7609         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7610
7611         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7612         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7613 }
7614
7615 /**
7616  * Translate port-id item to eswitch match on  port-id.
7617  *
7618  * @param[in] dev
7619  *   The devich to configure through.
7620  * @param[in, out] matcher
7621  *   Flow matcher.
7622  * @param[in, out] key
7623  *   Flow matcher value.
7624  * @param[in] item
7625  *   Flow pattern to translate.
7626  *
7627  * @return
7628  *   0 on success, a negative errno value otherwise.
7629  */
7630 static int
7631 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7632                                void *key, const struct rte_flow_item *item)
7633 {
7634         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7635         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7636         struct mlx5_priv *priv;
7637         uint16_t mask, id;
7638
7639         mask = pid_m ? pid_m->id : 0xffff;
7640         id = pid_v ? pid_v->id : dev->data->port_id;
7641         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7642         if (!priv)
7643                 return -rte_errno;
7644         /* Translate to vport field or to metadata, depending on mode. */
7645         if (priv->vport_meta_mask)
7646                 flow_dv_translate_item_meta_vport(matcher, key,
7647                                                   priv->vport_meta_tag,
7648                                                   priv->vport_meta_mask);
7649         else
7650                 flow_dv_translate_item_source_vport(matcher, key,
7651                                                     priv->vport_id, mask);
7652         return 0;
7653 }
7654
7655 /**
7656  * Add ICMP6 item to matcher and to the value.
7657  *
7658  * @param[in, out] matcher
7659  *   Flow matcher.
7660  * @param[in, out] key
7661  *   Flow matcher value.
7662  * @param[in] item
7663  *   Flow pattern to translate.
7664  * @param[in] inner
7665  *   Item is inner pattern.
7666  */
7667 static void
7668 flow_dv_translate_item_icmp6(void *matcher, void *key,
7669                               const struct rte_flow_item *item,
7670                               int inner)
7671 {
7672         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7673         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7674         void *headers_m;
7675         void *headers_v;
7676         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7677                                      misc_parameters_3);
7678         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7679         if (inner) {
7680                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7681                                          inner_headers);
7682                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7683         } else {
7684                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7685                                          outer_headers);
7686                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7687         }
7688         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7689         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7690         if (!icmp6_v)
7691                 return;
7692         if (!icmp6_m)
7693                 icmp6_m = &rte_flow_item_icmp6_mask;
7694         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7695         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7696                  icmp6_v->type & icmp6_m->type);
7697         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7698         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7699                  icmp6_v->code & icmp6_m->code);
7700 }
7701
7702 /**
7703  * Add ICMP item to matcher and to the value.
7704  *
7705  * @param[in, out] matcher
7706  *   Flow matcher.
7707  * @param[in, out] key
7708  *   Flow matcher value.
7709  * @param[in] item
7710  *   Flow pattern to translate.
7711  * @param[in] inner
7712  *   Item is inner pattern.
7713  */
7714 static void
7715 flow_dv_translate_item_icmp(void *matcher, void *key,
7716                             const struct rte_flow_item *item,
7717                             int inner)
7718 {
7719         const struct rte_flow_item_icmp *icmp_m = item->mask;
7720         const struct rte_flow_item_icmp *icmp_v = item->spec;
7721         uint32_t icmp_header_data_m = 0;
7722         uint32_t icmp_header_data_v = 0;
7723         void *headers_m;
7724         void *headers_v;
7725         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7726                                      misc_parameters_3);
7727         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7728         if (inner) {
7729                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7730                                          inner_headers);
7731                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7732         } else {
7733                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7734                                          outer_headers);
7735                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7736         }
7737         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7738         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7739         if (!icmp_v)
7740                 return;
7741         if (!icmp_m)
7742                 icmp_m = &rte_flow_item_icmp_mask;
7743         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7744                  icmp_m->hdr.icmp_type);
7745         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7746                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7747         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7748                  icmp_m->hdr.icmp_code);
7749         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7750                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7751         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7752         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7753         if (icmp_header_data_m) {
7754                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7755                 icmp_header_data_v |=
7756                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7757                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7758                          icmp_header_data_m);
7759                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7760                          icmp_header_data_v & icmp_header_data_m);
7761         }
7762 }
7763
7764 /**
7765  * Add GTP item to matcher and to the value.
7766  *
7767  * @param[in, out] matcher
7768  *   Flow matcher.
7769  * @param[in, out] key
7770  *   Flow matcher value.
7771  * @param[in] item
7772  *   Flow pattern to translate.
7773  * @param[in] inner
7774  *   Item is inner pattern.
7775  */
7776 static void
7777 flow_dv_translate_item_gtp(void *matcher, void *key,
7778                            const struct rte_flow_item *item, int inner)
7779 {
7780         const struct rte_flow_item_gtp *gtp_m = item->mask;
7781         const struct rte_flow_item_gtp *gtp_v = item->spec;
7782         void *headers_m;
7783         void *headers_v;
7784         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7785                                      misc_parameters_3);
7786         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7787         uint16_t dport = RTE_GTPU_UDP_PORT;
7788
7789         if (inner) {
7790                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7791                                          inner_headers);
7792                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7793         } else {
7794                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7795                                          outer_headers);
7796                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7797         }
7798         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7799                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7800                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7801         }
7802         if (!gtp_v)
7803                 return;
7804         if (!gtp_m)
7805                 gtp_m = &rte_flow_item_gtp_mask;
7806         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7807                  gtp_m->v_pt_rsv_flags);
7808         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7809                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7810         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7811         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7812                  gtp_v->msg_type & gtp_m->msg_type);
7813         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7814                  rte_be_to_cpu_32(gtp_m->teid));
7815         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7816                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7817 }
7818
7819 /**
7820  * Add eCPRI item to matcher and to the value.
7821  *
7822  * @param[in] dev
7823  *   The devich to configure through.
7824  * @param[in, out] matcher
7825  *   Flow matcher.
7826  * @param[in, out] key
7827  *   Flow matcher value.
7828  * @param[in] item
7829  *   Flow pattern to translate.
7830  * @param[in] samples
7831  *   Sample IDs to be used in the matching.
7832  */
7833 static void
7834 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7835                              void *key, const struct rte_flow_item *item)
7836 {
7837         struct mlx5_priv *priv = dev->data->dev_private;
7838         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7839         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7840         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7841                                      misc_parameters_4);
7842         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7843         uint32_t *samples;
7844         void *dw_m;
7845         void *dw_v;
7846
7847         if (!ecpri_v)
7848                 return;
7849         if (!ecpri_m)
7850                 ecpri_m = &rte_flow_item_ecpri_mask;
7851         /*
7852          * Maximal four DW samples are supported in a single matching now.
7853          * Two are used now for a eCPRI matching:
7854          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7855          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7856          *    if any.
7857          */
7858         if (!ecpri_m->hdr.common.u32)
7859                 return;
7860         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7861         /* Need to take the whole DW as the mask to fill the entry. */
7862         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7863                             prog_sample_field_value_0);
7864         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7865                             prog_sample_field_value_0);
7866         /* Already big endian (network order) in the header. */
7867         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7868         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7869         /* Sample#0, used for matching type, offset 0. */
7870         MLX5_SET(fte_match_set_misc4, misc4_m,
7871                  prog_sample_field_id_0, samples[0]);
7872         /* It makes no sense to set the sample ID in the mask field. */
7873         MLX5_SET(fte_match_set_misc4, misc4_v,
7874                  prog_sample_field_id_0, samples[0]);
7875         /*
7876          * Checking if message body part needs to be matched.
7877          * Some wildcard rules only matching type field should be supported.
7878          */
7879         if (ecpri_m->hdr.dummy[0]) {
7880                 switch (ecpri_v->hdr.common.type) {
7881                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7882                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7883                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7884                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7885                                             prog_sample_field_value_1);
7886                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7887                                             prog_sample_field_value_1);
7888                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7889                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7890                         /* Sample#1, to match message body, offset 4. */
7891                         MLX5_SET(fte_match_set_misc4, misc4_m,
7892                                  prog_sample_field_id_1, samples[1]);
7893                         MLX5_SET(fte_match_set_misc4, misc4_v,
7894                                  prog_sample_field_id_1, samples[1]);
7895                         break;
7896                 default:
7897                         /* Others, do not match any sample ID. */
7898                         break;
7899                 }
7900         }
7901 }
7902
7903 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7904
7905 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7906         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7907                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7908
7909 /**
7910  * Calculate flow matcher enable bitmap.
7911  *
7912  * @param match_criteria
7913  *   Pointer to flow matcher criteria.
7914  *
7915  * @return
7916  *   Bitmap of enabled fields.
7917  */
7918 static uint8_t
7919 flow_dv_matcher_enable(uint32_t *match_criteria)
7920 {
7921         uint8_t match_criteria_enable;
7922
7923         match_criteria_enable =
7924                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7925                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7926         match_criteria_enable |=
7927                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7928                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7929         match_criteria_enable |=
7930                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7931                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7932         match_criteria_enable |=
7933                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7934                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7935         match_criteria_enable |=
7936                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7937                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7938         match_criteria_enable |=
7939                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7940                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7941         return match_criteria_enable;
7942 }
7943
7944 struct mlx5_hlist_entry *
7945 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7946 {
7947         struct mlx5_dev_ctx_shared *sh = list->ctx;
7948         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7949         struct rte_eth_dev *dev = ctx->dev;
7950         struct mlx5_flow_tbl_data_entry *tbl_data;
7951         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7952         struct rte_flow_error *error = ctx->error;
7953         union mlx5_flow_tbl_key key = { .v64 = key64 };
7954         struct mlx5_flow_tbl_resource *tbl;
7955         void *domain;
7956         uint32_t idx = 0;
7957         int ret;
7958
7959         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7960         if (!tbl_data) {
7961                 rte_flow_error_set(error, ENOMEM,
7962                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7963                                    NULL,
7964                                    "cannot allocate flow table data entry");
7965                 return NULL;
7966         }
7967         tbl_data->idx = idx;
7968         tbl_data->tunnel = tt_prm->tunnel;
7969         tbl_data->group_id = tt_prm->group_id;
7970         tbl_data->external = tt_prm->external;
7971         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7972         tbl_data->is_egress = !!key.direction;
7973         tbl = &tbl_data->tbl;
7974         if (key.dummy)
7975                 return &tbl_data->entry;
7976         if (key.domain)
7977                 domain = sh->fdb_domain;
7978         else if (key.direction)
7979                 domain = sh->tx_domain;
7980         else
7981                 domain = sh->rx_domain;
7982         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
7983         if (ret) {
7984                 rte_flow_error_set(error, ENOMEM,
7985                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7986                                    NULL, "cannot create flow table object");
7987                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7988                 return NULL;
7989         }
7990         if (key.table_id) {
7991                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
7992                                         (tbl->obj, &tbl_data->jump.action);
7993                 if (ret) {
7994                         rte_flow_error_set(error, ENOMEM,
7995                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7996                                            NULL,
7997                                            "cannot create flow jump action");
7998                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7999                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8000                         return NULL;
8001                 }
8002         }
8003         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
8004               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
8005               key.table_id);
8006         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8007                              flow_dv_matcher_create_cb,
8008                              flow_dv_matcher_match_cb,
8009                              flow_dv_matcher_remove_cb);
8010         return &tbl_data->entry;
8011 }
8012
8013 /**
8014  * Get a flow table.
8015  *
8016  * @param[in, out] dev
8017  *   Pointer to rte_eth_dev structure.
8018  * @param[in] table_id
8019  *   Table id to use.
8020  * @param[in] egress
8021  *   Direction of the table.
8022  * @param[in] transfer
8023  *   E-Switch or NIC flow.
8024  * @param[in] dummy
8025  *   Dummy entry for dv API.
8026  * @param[out] error
8027  *   pointer to error structure.
8028  *
8029  * @return
8030  *   Returns tables resource based on the index, NULL in case of failed.
8031  */
8032 struct mlx5_flow_tbl_resource *
8033 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8034                          uint32_t table_id, uint8_t egress,
8035                          uint8_t transfer,
8036                          bool external,
8037                          const struct mlx5_flow_tunnel *tunnel,
8038                          uint32_t group_id, uint8_t dummy,
8039                          struct rte_flow_error *error)
8040 {
8041         struct mlx5_priv *priv = dev->data->dev_private;
8042         union mlx5_flow_tbl_key table_key = {
8043                 {
8044                         .table_id = table_id,
8045                         .dummy = dummy,
8046                         .domain = !!transfer,
8047                         .direction = !!egress,
8048                 }
8049         };
8050         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8051                 .tunnel = tunnel,
8052                 .group_id = group_id,
8053                 .external = external,
8054         };
8055         struct mlx5_flow_cb_ctx ctx = {
8056                 .dev = dev,
8057                 .error = error,
8058                 .data = &tt_prm,
8059         };
8060         struct mlx5_hlist_entry *entry;
8061         struct mlx5_flow_tbl_data_entry *tbl_data;
8062
8063         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8064         if (!entry) {
8065                 rte_flow_error_set(error, ENOMEM,
8066                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8067                                    "cannot get table");
8068                 return NULL;
8069         }
8070         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8071         return &tbl_data->tbl;
8072 }
8073
8074 void
8075 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8076                       struct mlx5_hlist_entry *entry)
8077 {
8078         struct mlx5_dev_ctx_shared *sh = list->ctx;
8079         struct mlx5_flow_tbl_data_entry *tbl_data =
8080                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8081
8082         MLX5_ASSERT(entry && sh);
8083         if (tbl_data->jump.action)
8084                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8085         if (tbl_data->tbl.obj)
8086                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8087         if (tbl_data->tunnel_offload && tbl_data->external) {
8088                 struct mlx5_hlist_entry *he;
8089                 struct mlx5_hlist *tunnel_grp_hash;
8090                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8091                 union tunnel_tbl_key tunnel_key = {
8092                         .tunnel_id = tbl_data->tunnel ?
8093                                         tbl_data->tunnel->tunnel_id : 0,
8094                         .group = tbl_data->group_id
8095                 };
8096                 union mlx5_flow_tbl_key table_key = {
8097                         .v64 = entry->key
8098                 };
8099                 uint32_t table_id = table_key.table_id;
8100
8101                 tunnel_grp_hash = tbl_data->tunnel ?
8102                                         tbl_data->tunnel->groups :
8103                                         thub->groups;
8104                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8105                 if (he) {
8106                         struct tunnel_tbl_entry *tte;
8107                         tte = container_of(he, typeof(*tte), hash);
8108                         MLX5_ASSERT(tte->flow_table == table_id);
8109                         mlx5_hlist_remove(tunnel_grp_hash, he);
8110                         mlx5_free(tte);
8111                 }
8112                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
8113                                 tunnel_flow_tbl_to_id(table_id));
8114                 DRV_LOG(DEBUG,
8115                         "Table_id %#x tunnel %u group %u released.",
8116                         table_id,
8117                         tbl_data->tunnel ?
8118                         tbl_data->tunnel->tunnel_id : 0,
8119                         tbl_data->group_id);
8120         }
8121         mlx5_cache_list_destroy(&tbl_data->matchers);
8122         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8123 }
8124
8125 /**
8126  * Release a flow table.
8127  *
8128  * @param[in] sh
8129  *   Pointer to device shared structure.
8130  * @param[in] tbl
8131  *   Table resource to be released.
8132  *
8133  * @return
8134  *   Returns 0 if table was released, else return 1;
8135  */
8136 static int
8137 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8138                              struct mlx5_flow_tbl_resource *tbl)
8139 {
8140         struct mlx5_flow_tbl_data_entry *tbl_data =
8141                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8142
8143         if (!tbl)
8144                 return 0;
8145         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8146 }
8147
8148 int
8149 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8150                          struct mlx5_cache_entry *entry, void *cb_ctx)
8151 {
8152         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8153         struct mlx5_flow_dv_matcher *ref = ctx->data;
8154         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8155                                                         entry);
8156
8157         return cur->crc != ref->crc ||
8158                cur->priority != ref->priority ||
8159                memcmp((const void *)cur->mask.buf,
8160                       (const void *)ref->mask.buf, ref->mask.size);
8161 }
8162
8163 struct mlx5_cache_entry *
8164 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8165                           struct mlx5_cache_entry *entry __rte_unused,
8166                           void *cb_ctx)
8167 {
8168         struct mlx5_dev_ctx_shared *sh = list->ctx;
8169         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8170         struct mlx5_flow_dv_matcher *ref = ctx->data;
8171         struct mlx5_flow_dv_matcher *cache;
8172         struct mlx5dv_flow_matcher_attr dv_attr = {
8173                 .type = IBV_FLOW_ATTR_NORMAL,
8174                 .match_mask = (void *)&ref->mask,
8175         };
8176         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8177                                                             typeof(*tbl), tbl);
8178         int ret;
8179
8180         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8181         if (!cache) {
8182                 rte_flow_error_set(ctx->error, ENOMEM,
8183                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8184                                    "cannot create matcher");
8185                 return NULL;
8186         }
8187         *cache = *ref;
8188         dv_attr.match_criteria_enable =
8189                 flow_dv_matcher_enable(cache->mask.buf);
8190         dv_attr.priority = ref->priority;
8191         if (tbl->is_egress)
8192                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8193         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8194                                                &cache->matcher_object);
8195         if (ret) {
8196                 mlx5_free(cache);
8197                 rte_flow_error_set(ctx->error, ENOMEM,
8198                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8199                                    "cannot create matcher");
8200                 return NULL;
8201         }
8202         return &cache->entry;
8203 }
8204
8205 /**
8206  * Register the flow matcher.
8207  *
8208  * @param[in, out] dev
8209  *   Pointer to rte_eth_dev structure.
8210  * @param[in, out] matcher
8211  *   Pointer to flow matcher.
8212  * @param[in, out] key
8213  *   Pointer to flow table key.
8214  * @parm[in, out] dev_flow
8215  *   Pointer to the dev_flow.
8216  * @param[out] error
8217  *   pointer to error structure.
8218  *
8219  * @return
8220  *   0 on success otherwise -errno and errno is set.
8221  */
8222 static int
8223 flow_dv_matcher_register(struct rte_eth_dev *dev,
8224                          struct mlx5_flow_dv_matcher *ref,
8225                          union mlx5_flow_tbl_key *key,
8226                          struct mlx5_flow *dev_flow,
8227                          struct rte_flow_error *error)
8228 {
8229         struct mlx5_cache_entry *entry;
8230         struct mlx5_flow_dv_matcher *cache;
8231         struct mlx5_flow_tbl_resource *tbl;
8232         struct mlx5_flow_tbl_data_entry *tbl_data;
8233         struct mlx5_flow_cb_ctx ctx = {
8234                 .error = error,
8235                 .data = ref,
8236         };
8237
8238         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
8239                                        key->domain, false, NULL, 0, 0, error);
8240         if (!tbl)
8241                 return -rte_errno;      /* No need to refill the error info */
8242         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8243         ref->tbl = tbl;
8244         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8245         if (!entry) {
8246                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8247                 return rte_flow_error_set(error, ENOMEM,
8248                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8249                                           "cannot allocate ref memory");
8250         }
8251         cache = container_of(entry, typeof(*cache), entry);
8252         dev_flow->handle->dvh.matcher = cache;
8253         return 0;
8254 }
8255
8256 struct mlx5_hlist_entry *
8257 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8258 {
8259         struct mlx5_dev_ctx_shared *sh = list->ctx;
8260         struct rte_flow_error *error = ctx;
8261         struct mlx5_flow_dv_tag_resource *entry;
8262         uint32_t idx = 0;
8263         int ret;
8264
8265         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8266         if (!entry) {
8267                 rte_flow_error_set(error, ENOMEM,
8268                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8269                                    "cannot allocate resource memory");
8270                 return NULL;
8271         }
8272         entry->idx = idx;
8273         ret = mlx5_flow_os_create_flow_action_tag(key,
8274                                                   &entry->action);
8275         if (ret) {
8276                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8277                 rte_flow_error_set(error, ENOMEM,
8278                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8279                                    NULL, "cannot create action");
8280                 return NULL;
8281         }
8282         return &entry->entry;
8283 }
8284
8285 /**
8286  * Find existing tag resource or create and register a new one.
8287  *
8288  * @param dev[in, out]
8289  *   Pointer to rte_eth_dev structure.
8290  * @param[in, out] tag_be24
8291  *   Tag value in big endian then R-shift 8.
8292  * @parm[in, out] dev_flow
8293  *   Pointer to the dev_flow.
8294  * @param[out] error
8295  *   pointer to error structure.
8296  *
8297  * @return
8298  *   0 on success otherwise -errno and errno is set.
8299  */
8300 static int
8301 flow_dv_tag_resource_register
8302                         (struct rte_eth_dev *dev,
8303                          uint32_t tag_be24,
8304                          struct mlx5_flow *dev_flow,
8305                          struct rte_flow_error *error)
8306 {
8307         struct mlx5_priv *priv = dev->data->dev_private;
8308         struct mlx5_flow_dv_tag_resource *cache_resource;
8309         struct mlx5_hlist_entry *entry;
8310
8311         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8312         if (entry) {
8313                 cache_resource = container_of
8314                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8315                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8316                 dev_flow->dv.tag_resource = cache_resource;
8317                 return 0;
8318         }
8319         return -rte_errno;
8320 }
8321
8322 void
8323 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8324                       struct mlx5_hlist_entry *entry)
8325 {
8326         struct mlx5_dev_ctx_shared *sh = list->ctx;
8327         struct mlx5_flow_dv_tag_resource *tag =
8328                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8329
8330         MLX5_ASSERT(tag && sh && tag->action);
8331         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8332         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8333         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8334 }
8335
8336 /**
8337  * Release the tag.
8338  *
8339  * @param dev
8340  *   Pointer to Ethernet device.
8341  * @param tag_idx
8342  *   Tag index.
8343  *
8344  * @return
8345  *   1 while a reference on it exists, 0 when freed.
8346  */
8347 static int
8348 flow_dv_tag_release(struct rte_eth_dev *dev,
8349                     uint32_t tag_idx)
8350 {
8351         struct mlx5_priv *priv = dev->data->dev_private;
8352         struct mlx5_flow_dv_tag_resource *tag;
8353
8354         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8355         if (!tag)
8356                 return 0;
8357         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8358                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8359         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8360 }
8361
8362 /**
8363  * Translate port ID action to vport.
8364  *
8365  * @param[in] dev
8366  *   Pointer to rte_eth_dev structure.
8367  * @param[in] action
8368  *   Pointer to the port ID action.
8369  * @param[out] dst_port_id
8370  *   The target port ID.
8371  * @param[out] error
8372  *   Pointer to the error structure.
8373  *
8374  * @return
8375  *   0 on success, a negative errno value otherwise and rte_errno is set.
8376  */
8377 static int
8378 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8379                                  const struct rte_flow_action *action,
8380                                  uint32_t *dst_port_id,
8381                                  struct rte_flow_error *error)
8382 {
8383         uint32_t port;
8384         struct mlx5_priv *priv;
8385         const struct rte_flow_action_port_id *conf =
8386                         (const struct rte_flow_action_port_id *)action->conf;
8387
8388         port = conf->original ? dev->data->port_id : conf->id;
8389         priv = mlx5_port_to_eswitch_info(port, false);
8390         if (!priv)
8391                 return rte_flow_error_set(error, -rte_errno,
8392                                           RTE_FLOW_ERROR_TYPE_ACTION,
8393                                           NULL,
8394                                           "No eswitch info was found for port");
8395 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8396         /*
8397          * This parameter is transferred to
8398          * mlx5dv_dr_action_create_dest_ib_port().
8399          */
8400         *dst_port_id = priv->dev_port;
8401 #else
8402         /*
8403          * Legacy mode, no LAG configurations is supported.
8404          * This parameter is transferred to
8405          * mlx5dv_dr_action_create_dest_vport().
8406          */
8407         *dst_port_id = priv->vport_id;
8408 #endif
8409         return 0;
8410 }
8411
8412 /**
8413  * Create a counter with aging configuration.
8414  *
8415  * @param[in] dev
8416  *   Pointer to rte_eth_dev structure.
8417  * @param[out] count
8418  *   Pointer to the counter action configuration.
8419  * @param[in] age
8420  *   Pointer to the aging action configuration.
8421  *
8422  * @return
8423  *   Index to flow counter on success, 0 otherwise.
8424  */
8425 static uint32_t
8426 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8427                                 struct mlx5_flow *dev_flow,
8428                                 const struct rte_flow_action_count *count,
8429                                 const struct rte_flow_action_age *age)
8430 {
8431         uint32_t counter;
8432         struct mlx5_age_param *age_param;
8433
8434         if (count && count->shared)
8435                 counter = flow_dv_counter_get_shared(dev, count->id);
8436         else
8437                 counter = flow_dv_counter_alloc(dev, !!age);
8438         if (!counter || age == NULL)
8439                 return counter;
8440         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8441         age_param->context = age->context ? age->context :
8442                 (void *)(uintptr_t)(dev_flow->flow_idx);
8443         age_param->timeout = age->timeout;
8444         age_param->port_id = dev->data->port_id;
8445         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8446         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8447         return counter;
8448 }
8449 /**
8450  * Add Tx queue matcher
8451  *
8452  * @param[in] dev
8453  *   Pointer to the dev struct.
8454  * @param[in, out] matcher
8455  *   Flow matcher.
8456  * @param[in, out] key
8457  *   Flow matcher value.
8458  * @param[in] item
8459  *   Flow pattern to translate.
8460  * @param[in] inner
8461  *   Item is inner pattern.
8462  */
8463 static void
8464 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8465                                 void *matcher, void *key,
8466                                 const struct rte_flow_item *item)
8467 {
8468         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8469         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8470         void *misc_m =
8471                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8472         void *misc_v =
8473                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8474         struct mlx5_txq_ctrl *txq;
8475         uint32_t queue;
8476
8477
8478         queue_m = (const void *)item->mask;
8479         if (!queue_m)
8480                 return;
8481         queue_v = (const void *)item->spec;
8482         if (!queue_v)
8483                 return;
8484         txq = mlx5_txq_get(dev, queue_v->queue);
8485         if (!txq)
8486                 return;
8487         queue = txq->obj->sq->id;
8488         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8489         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8490                  queue & queue_m->queue);
8491         mlx5_txq_release(dev, queue_v->queue);
8492 }
8493
8494 /**
8495  * Set the hash fields according to the @p flow information.
8496  *
8497  * @param[in] dev_flow
8498  *   Pointer to the mlx5_flow.
8499  * @param[in] rss_desc
8500  *   Pointer to the mlx5_flow_rss_desc.
8501  */
8502 static void
8503 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8504                        struct mlx5_flow_rss_desc *rss_desc)
8505 {
8506         uint64_t items = dev_flow->handle->layers;
8507         int rss_inner = 0;
8508         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8509
8510         dev_flow->hash_fields = 0;
8511 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8512         if (rss_desc->level >= 2) {
8513                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8514                 rss_inner = 1;
8515         }
8516 #endif
8517         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8518             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8519                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8520                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8521                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8522                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8523                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8524                         else
8525                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8526                 }
8527         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8528                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8529                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8530                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8531                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8532                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8533                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8534                         else
8535                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8536                 }
8537         }
8538         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8539             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8540                 if (rss_types & ETH_RSS_UDP) {
8541                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8542                                 dev_flow->hash_fields |=
8543                                                 IBV_RX_HASH_SRC_PORT_UDP;
8544                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8545                                 dev_flow->hash_fields |=
8546                                                 IBV_RX_HASH_DST_PORT_UDP;
8547                         else
8548                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8549                 }
8550         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8551                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8552                 if (rss_types & ETH_RSS_TCP) {
8553                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8554                                 dev_flow->hash_fields |=
8555                                                 IBV_RX_HASH_SRC_PORT_TCP;
8556                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8557                                 dev_flow->hash_fields |=
8558                                                 IBV_RX_HASH_DST_PORT_TCP;
8559                         else
8560                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8561                 }
8562         }
8563 }
8564
8565 /**
8566  * Prepare an Rx Hash queue.
8567  *
8568  * @param dev
8569  *   Pointer to Ethernet device.
8570  * @param[in] dev_flow
8571  *   Pointer to the mlx5_flow.
8572  * @param[in] rss_desc
8573  *   Pointer to the mlx5_flow_rss_desc.
8574  * @param[out] hrxq_idx
8575  *   Hash Rx queue index.
8576  *
8577  * @return
8578  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8579  */
8580 static struct mlx5_hrxq *
8581 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
8582                      struct mlx5_flow *dev_flow,
8583                      struct mlx5_flow_rss_desc *rss_desc,
8584                      uint32_t *hrxq_idx)
8585 {
8586         struct mlx5_priv *priv = dev->data->dev_private;
8587         struct mlx5_flow_handle *dh = dev_flow->handle;
8588         struct mlx5_hrxq *hrxq;
8589
8590         MLX5_ASSERT(rss_desc->queue_num);
8591         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
8592         rss_desc->hash_fields = dev_flow->hash_fields;
8593         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
8594         rss_desc->standalone = false;
8595         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
8596         if (!*hrxq_idx)
8597                 return NULL;
8598         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8599                               *hrxq_idx);
8600         return hrxq;
8601 }
8602
8603 /**
8604  * Find existing sample resource or create and register a new one.
8605  *
8606  * @param[in, out] dev
8607  *   Pointer to rte_eth_dev structure.
8608  * @param[in] resource
8609  *   Pointer to sample resource.
8610  * @parm[in, out] dev_flow
8611  *   Pointer to the dev_flow.
8612  * @param[in, out] sample_dv_actions
8613  *   Pointer to sample actions list.
8614  * @param[out] error
8615  *   pointer to error structure.
8616  *
8617  * @return
8618  *   0 on success otherwise -errno and errno is set.
8619  */
8620 static int
8621 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8622                          struct mlx5_flow_dv_sample_resource *resource,
8623                          struct mlx5_flow *dev_flow,
8624                          void **sample_dv_actions,
8625                          struct rte_flow_error *error)
8626 {
8627         struct mlx5_flow_dv_sample_resource *cache_resource;
8628         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8629         struct mlx5_priv *priv = dev->data->dev_private;
8630         struct mlx5_dev_ctx_shared *sh = priv->sh;
8631         struct mlx5_flow_tbl_resource *tbl;
8632         uint32_t idx = 0;
8633         const uint32_t next_ft_step = 1;
8634         uint32_t next_ft_id = resource->ft_id + next_ft_step;
8635         uint8_t is_egress = 0;
8636         uint8_t is_transfer = 0;
8637
8638         /* Lookup a matching resource from cache. */
8639         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_SAMPLE], sh->sample_action_list,
8640                       idx, cache_resource, next) {
8641                 if (resource->ratio == cache_resource->ratio &&
8642                     resource->ft_type == cache_resource->ft_type &&
8643                     resource->ft_id == cache_resource->ft_id &&
8644                     resource->set_action == cache_resource->set_action &&
8645                     !memcmp((void *)&resource->sample_act,
8646                             (void *)&cache_resource->sample_act,
8647                             sizeof(struct mlx5_flow_sub_actions_list))) {
8648                         DRV_LOG(DEBUG, "sample resource %p: refcnt %d++",
8649                                 (void *)cache_resource,
8650                                 __atomic_load_n(&cache_resource->refcnt,
8651                                                 __ATOMIC_RELAXED));
8652                         __atomic_fetch_add(&cache_resource->refcnt, 1,
8653                                            __ATOMIC_RELAXED);
8654                         dev_flow->handle->dvh.rix_sample = idx;
8655                         dev_flow->dv.sample_res = cache_resource;
8656                         return 0;
8657                 }
8658         }
8659         /* Register new sample resource. */
8660         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE],
8661                                        &dev_flow->handle->dvh.rix_sample);
8662         if (!cache_resource)
8663                 return rte_flow_error_set(error, ENOMEM,
8664                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8665                                           NULL,
8666                                           "cannot allocate resource memory");
8667         *cache_resource = *resource;
8668         /* Create normal path table level */
8669         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8670                 is_transfer = 1;
8671         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
8672                 is_egress = 1;
8673         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8674                                         is_egress, is_transfer,
8675                                         dev_flow->external, NULL, 0, 0, error);
8676         if (!tbl) {
8677                 rte_flow_error_set(error, ENOMEM,
8678                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8679                                           NULL,
8680                                           "fail to create normal path table "
8681                                           "for sample");
8682                 goto error;
8683         }
8684         cache_resource->normal_path_tbl = tbl;
8685         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8686                 cache_resource->default_miss =
8687                                 mlx5_glue->dr_create_flow_action_default_miss();
8688                 if (!cache_resource->default_miss) {
8689                         rte_flow_error_set(error, ENOMEM,
8690                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8691                                                 NULL,
8692                                                 "cannot create default miss "
8693                                                 "action");
8694                         goto error;
8695                 }
8696                 sample_dv_actions[resource->sample_act.actions_num++] =
8697                                                 cache_resource->default_miss;
8698         }
8699         /* Create a DR sample action */
8700         sampler_attr.sample_ratio = cache_resource->ratio;
8701         sampler_attr.default_next_table = tbl->obj;
8702         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8703         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8704                                                         &sample_dv_actions[0];
8705         sampler_attr.action = cache_resource->set_action;
8706         cache_resource->verbs_action =
8707                 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8708         if (!cache_resource->verbs_action) {
8709                 rte_flow_error_set(error, ENOMEM,
8710                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8711                                         NULL, "cannot create sample action");
8712                 goto error;
8713         }
8714         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8715         ILIST_INSERT(sh->ipool[MLX5_IPOOL_SAMPLE], &sh->sample_action_list,
8716                      dev_flow->handle->dvh.rix_sample, cache_resource,
8717                      next);
8718         dev_flow->dv.sample_res = cache_resource;
8719         DRV_LOG(DEBUG, "new sample resource %p: refcnt %d++",
8720                 (void *)cache_resource,
8721                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8722         return 0;
8723 error:
8724         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8725                 if (cache_resource->default_miss)
8726                         claim_zero(mlx5_glue->destroy_flow_action
8727                                 (cache_resource->default_miss));
8728         } else {
8729                 if (cache_resource->sample_idx.rix_hrxq &&
8730                     !mlx5_hrxq_release(dev,
8731                                 cache_resource->sample_idx.rix_hrxq))
8732                         cache_resource->sample_idx.rix_hrxq = 0;
8733                 if (cache_resource->sample_idx.rix_tag &&
8734                     !flow_dv_tag_release(dev,
8735                                 cache_resource->sample_idx.rix_tag))
8736                         cache_resource->sample_idx.rix_tag = 0;
8737                 if (cache_resource->sample_idx.cnt) {
8738                         flow_dv_counter_release(dev,
8739                                 cache_resource->sample_idx.cnt);
8740                         cache_resource->sample_idx.cnt = 0;
8741                 }
8742         }
8743         if (cache_resource->normal_path_tbl)
8744                 flow_dv_tbl_resource_release(MLX5_SH(dev),
8745                                 cache_resource->normal_path_tbl);
8746         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE],
8747                                 dev_flow->handle->dvh.rix_sample);
8748         dev_flow->handle->dvh.rix_sample = 0;
8749         return -rte_errno;
8750 }
8751
8752 /**
8753  * Find existing destination array resource or create and register a new one.
8754  *
8755  * @param[in, out] dev
8756  *   Pointer to rte_eth_dev structure.
8757  * @param[in] resource
8758  *   Pointer to destination array resource.
8759  * @parm[in, out] dev_flow
8760  *   Pointer to the dev_flow.
8761  * @param[out] error
8762  *   pointer to error structure.
8763  *
8764  * @return
8765  *   0 on success otherwise -errno and errno is set.
8766  */
8767 static int
8768 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8769                          struct mlx5_flow_dv_dest_array_resource *resource,
8770                          struct mlx5_flow *dev_flow,
8771                          struct rte_flow_error *error)
8772 {
8773         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8774         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8775         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8776         struct mlx5_priv *priv = dev->data->dev_private;
8777         struct mlx5_dev_ctx_shared *sh = priv->sh;
8778         struct mlx5_flow_sub_actions_list *sample_act;
8779         struct mlx5dv_dr_domain *domain;
8780         uint32_t idx = 0;
8781
8782         /* Lookup a matching resource from cache. */
8783         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8784                       sh->dest_array_list,
8785                       idx, cache_resource, next) {
8786                 if (resource->num_of_dest == cache_resource->num_of_dest &&
8787                     resource->ft_type == cache_resource->ft_type &&
8788                     !memcmp((void *)cache_resource->sample_act,
8789                             (void *)resource->sample_act,
8790                            (resource->num_of_dest *
8791                            sizeof(struct mlx5_flow_sub_actions_list)))) {
8792                         DRV_LOG(DEBUG, "dest array resource %p: refcnt %d++",
8793                                 (void *)cache_resource,
8794                                 __atomic_load_n(&cache_resource->refcnt,
8795                                                 __ATOMIC_RELAXED));
8796                         __atomic_fetch_add(&cache_resource->refcnt, 1,
8797                                            __ATOMIC_RELAXED);
8798                         dev_flow->handle->dvh.rix_dest_array = idx;
8799                         dev_flow->dv.dest_array_res = cache_resource;
8800                         return 0;
8801                 }
8802         }
8803         /* Register new destination array resource. */
8804         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8805                                        &dev_flow->handle->dvh.rix_dest_array);
8806         if (!cache_resource)
8807                 return rte_flow_error_set(error, ENOMEM,
8808                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8809                                           NULL,
8810                                           "cannot allocate resource memory");
8811         *cache_resource = *resource;
8812         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8813                 domain = sh->fdb_domain;
8814         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
8815                 domain = sh->rx_domain;
8816         else
8817                 domain = sh->tx_domain;
8818         for (idx = 0; idx < resource->num_of_dest; idx++) {
8819                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8820                                  mlx5_malloc(MLX5_MEM_ZERO,
8821                                  sizeof(struct mlx5dv_dr_action_dest_attr),
8822                                  0, SOCKET_ID_ANY);
8823                 if (!dest_attr[idx]) {
8824                         rte_flow_error_set(error, ENOMEM,
8825                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8826                                            NULL,
8827                                            "cannot allocate resource memory");
8828                         goto error;
8829                 }
8830                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8831                 sample_act = &resource->sample_act[idx];
8832                 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8833                         dest_attr[idx]->dest = sample_act->dr_queue_action;
8834                 } else if (sample_act->action_flags ==
8835                           (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8836                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8837                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8838                         dest_attr[idx]->dest_reformat->reformat =
8839                                         sample_act->dr_encap_action;
8840                         dest_attr[idx]->dest_reformat->dest =
8841                                         sample_act->dr_port_id_action;
8842                 } else if (sample_act->action_flags ==
8843                            MLX5_FLOW_ACTION_PORT_ID) {
8844                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
8845                 }
8846         }
8847         /* create a dest array actioin */
8848         cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8849                                                 (domain,
8850                                                  cache_resource->num_of_dest,
8851                                                  dest_attr);
8852         if (!cache_resource->action) {
8853                 rte_flow_error_set(error, ENOMEM,
8854                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8855                                    NULL,
8856                                    "cannot create destination array action");
8857                 goto error;
8858         }
8859         __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED);
8860         ILIST_INSERT(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8861                      &sh->dest_array_list,
8862                      dev_flow->handle->dvh.rix_dest_array, cache_resource,
8863                      next);
8864         dev_flow->dv.dest_array_res = cache_resource;
8865         DRV_LOG(DEBUG, "new destination array resource %p: refcnt %d++",
8866                 (void *)cache_resource,
8867                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
8868         for (idx = 0; idx < resource->num_of_dest; idx++)
8869                 mlx5_free(dest_attr[idx]);
8870         return 0;
8871 error:
8872         for (idx = 0; idx < resource->num_of_dest; idx++) {
8873                 struct mlx5_flow_sub_actions_idx *act_res =
8874                                         &cache_resource->sample_idx[idx];
8875                 if (act_res->rix_hrxq &&
8876                     !mlx5_hrxq_release(dev,
8877                                 act_res->rix_hrxq))
8878                         act_res->rix_hrxq = 0;
8879                 if (act_res->rix_encap_decap &&
8880                         !flow_dv_encap_decap_resource_release(dev,
8881                                 act_res->rix_encap_decap))
8882                         act_res->rix_encap_decap = 0;
8883                 if (act_res->rix_port_id_action &&
8884                         !flow_dv_port_id_action_resource_release(dev,
8885                                 act_res->rix_port_id_action))
8886                         act_res->rix_port_id_action = 0;
8887                 if (dest_attr[idx])
8888                         mlx5_free(dest_attr[idx]);
8889         }
8890
8891         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8892                                 dev_flow->handle->dvh.rix_dest_array);
8893         dev_flow->handle->dvh.rix_dest_array = 0;
8894         return -rte_errno;
8895 }
8896
8897 /**
8898  * Convert Sample action to DV specification.
8899  *
8900  * @param[in] dev
8901  *   Pointer to rte_eth_dev structure.
8902  * @param[in] action
8903  *   Pointer to action structure.
8904  * @param[in, out] dev_flow
8905  *   Pointer to the mlx5_flow.
8906  * @param[in] attr
8907  *   Pointer to the flow attributes.
8908  * @param[in, out] num_of_dest
8909  *   Pointer to the num of destination.
8910  * @param[in, out] sample_actions
8911  *   Pointer to sample actions list.
8912  * @param[in, out] res
8913  *   Pointer to sample resource.
8914  * @param[out] error
8915  *   Pointer to the error structure.
8916  *
8917  * @return
8918  *   0 on success, a negative errno value otherwise and rte_errno is set.
8919  */
8920 static int
8921 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
8922                                 const struct rte_flow_action *action,
8923                                 struct mlx5_flow *dev_flow,
8924                                 const struct rte_flow_attr *attr,
8925                                 uint32_t *num_of_dest,
8926                                 void **sample_actions,
8927                                 struct mlx5_flow_dv_sample_resource *res,
8928                                 struct rte_flow_error *error)
8929 {
8930         struct mlx5_priv *priv = dev->data->dev_private;
8931         const struct rte_flow_action_sample *sample_action;
8932         const struct rte_flow_action *sub_actions;
8933         const struct rte_flow_action_queue *queue;
8934         struct mlx5_flow_sub_actions_list *sample_act;
8935         struct mlx5_flow_sub_actions_idx *sample_idx;
8936         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8937         struct mlx5_flow_rss_desc *rss_desc;
8938         uint64_t action_flags = 0;
8939
8940         MLX5_ASSERT(wks);
8941         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
8942         sample_act = &res->sample_act;
8943         sample_idx = &res->sample_idx;
8944         sample_action = (const struct rte_flow_action_sample *)action->conf;
8945         res->ratio = sample_action->ratio;
8946         sub_actions = sample_action->actions;
8947         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
8948                 int type = sub_actions->type;
8949                 uint32_t pre_rix = 0;
8950                 void *pre_r;
8951                 switch (type) {
8952                 case RTE_FLOW_ACTION_TYPE_QUEUE:
8953                 {
8954                         struct mlx5_hrxq *hrxq;
8955                         uint32_t hrxq_idx;
8956
8957                         queue = sub_actions->conf;
8958                         rss_desc->queue_num = 1;
8959                         rss_desc->queue[0] = queue->index;
8960                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
8961                                                     rss_desc, &hrxq_idx);
8962                         if (!hrxq)
8963                                 return rte_flow_error_set
8964                                         (error, rte_errno,
8965                                          RTE_FLOW_ERROR_TYPE_ACTION,
8966                                          NULL,
8967                                          "cannot create fate queue");
8968                         sample_act->dr_queue_action = hrxq->action;
8969                         sample_idx->rix_hrxq = hrxq_idx;
8970                         sample_actions[sample_act->actions_num++] =
8971                                                 hrxq->action;
8972                         (*num_of_dest)++;
8973                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
8974                         if (action_flags & MLX5_FLOW_ACTION_MARK)
8975                                 dev_flow->handle->rix_hrxq = hrxq_idx;
8976                         dev_flow->handle->fate_action =
8977                                         MLX5_FLOW_FATE_QUEUE;
8978                         break;
8979                 }
8980                 case RTE_FLOW_ACTION_TYPE_MARK:
8981                 {
8982                         uint32_t tag_be = mlx5_flow_mark_set
8983                                 (((const struct rte_flow_action_mark *)
8984                                 (sub_actions->conf))->id);
8985
8986                         dev_flow->handle->mark = 1;
8987                         pre_rix = dev_flow->handle->dvh.rix_tag;
8988                         /* Save the mark resource before sample */
8989                         pre_r = dev_flow->dv.tag_resource;
8990                         if (flow_dv_tag_resource_register(dev, tag_be,
8991                                                   dev_flow, error))
8992                                 return -rte_errno;
8993                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8994                         sample_act->dr_tag_action =
8995                                 dev_flow->dv.tag_resource->action;
8996                         sample_idx->rix_tag =
8997                                 dev_flow->handle->dvh.rix_tag;
8998                         sample_actions[sample_act->actions_num++] =
8999                                                 sample_act->dr_tag_action;
9000                         /* Recover the mark resource after sample */
9001                         dev_flow->dv.tag_resource = pre_r;
9002                         dev_flow->handle->dvh.rix_tag = pre_rix;
9003                         action_flags |= MLX5_FLOW_ACTION_MARK;
9004                         break;
9005                 }
9006                 case RTE_FLOW_ACTION_TYPE_COUNT:
9007                 {
9008                         uint32_t counter;
9009
9010                         counter = flow_dv_translate_create_counter(dev,
9011                                         dev_flow, sub_actions->conf, 0);
9012                         if (!counter)
9013                                 return rte_flow_error_set
9014                                                 (error, rte_errno,
9015                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9016                                                  NULL,
9017                                                  "cannot create counter"
9018                                                  " object.");
9019                         sample_idx->cnt = counter;
9020                         sample_act->dr_cnt_action =
9021                                   (flow_dv_counter_get_by_idx(dev,
9022                                   counter, NULL))->action;
9023                         sample_actions[sample_act->actions_num++] =
9024                                                 sample_act->dr_cnt_action;
9025                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9026                         break;
9027                 }
9028                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9029                 {
9030                         struct mlx5_flow_dv_port_id_action_resource
9031                                         port_id_resource;
9032                         uint32_t port_id = 0;
9033
9034                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9035                         /* Save the port id resource before sample */
9036                         pre_rix = dev_flow->handle->rix_port_id_action;
9037                         pre_r = dev_flow->dv.port_id_action;
9038                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9039                                                              &port_id, error))
9040                                 return -rte_errno;
9041                         port_id_resource.port_id = port_id;
9042                         if (flow_dv_port_id_action_resource_register
9043                             (dev, &port_id_resource, dev_flow, error))
9044                                 return -rte_errno;
9045                         sample_act->dr_port_id_action =
9046                                 dev_flow->dv.port_id_action->action;
9047                         sample_idx->rix_port_id_action =
9048                                 dev_flow->handle->rix_port_id_action;
9049                         sample_actions[sample_act->actions_num++] =
9050                                                 sample_act->dr_port_id_action;
9051                         /* Recover the port id resource after sample */
9052                         dev_flow->dv.port_id_action = pre_r;
9053                         dev_flow->handle->rix_port_id_action = pre_rix;
9054                         (*num_of_dest)++;
9055                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9056                         break;
9057                 }
9058                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9059                         /* Save the encap resource before sample */
9060                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9061                         pre_r = dev_flow->dv.encap_decap;
9062                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9063                                                            dev_flow,
9064                                                            attr->transfer,
9065                                                            error))
9066                                 return -rte_errno;
9067                         sample_act->dr_encap_action =
9068                                 dev_flow->dv.encap_decap->action;
9069                         sample_idx->rix_encap_decap =
9070                                 dev_flow->handle->dvh.rix_encap_decap;
9071                         sample_actions[sample_act->actions_num++] =
9072                                                 sample_act->dr_encap_action;
9073                         /* Recover the encap resource after sample */
9074                         dev_flow->dv.encap_decap = pre_r;
9075                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9076                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9077                         break;
9078                 default:
9079                         return rte_flow_error_set(error, EINVAL,
9080                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9081                                 NULL,
9082                                 "Not support for sampler action");
9083                 }
9084         }
9085         sample_act->action_flags = action_flags;
9086         res->ft_id = dev_flow->dv.group;
9087         if (attr->transfer) {
9088                 union {
9089                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9090                         uint64_t set_action;
9091                 } action_ctx = { .set_action = 0 };
9092
9093                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9094                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9095                          MLX5_MODIFICATION_TYPE_SET);
9096                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9097                          MLX5_MODI_META_REG_C_0);
9098                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9099                          priv->vport_meta_tag);
9100                 res->set_action = action_ctx.set_action;
9101         } else if (attr->ingress) {
9102                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9103         } else {
9104                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9105         }
9106         return 0;
9107 }
9108
9109 /**
9110  * Convert Sample action to DV specification.
9111  *
9112  * @param[in] dev
9113  *   Pointer to rte_eth_dev structure.
9114  * @param[in, out] dev_flow
9115  *   Pointer to the mlx5_flow.
9116  * @param[in] num_of_dest
9117  *   The num of destination.
9118  * @param[in, out] res
9119  *   Pointer to sample resource.
9120  * @param[in, out] mdest_res
9121  *   Pointer to destination array resource.
9122  * @param[in] sample_actions
9123  *   Pointer to sample path actions list.
9124  * @param[in] action_flags
9125  *   Holds the actions detected until now.
9126  * @param[out] error
9127  *   Pointer to the error structure.
9128  *
9129  * @return
9130  *   0 on success, a negative errno value otherwise and rte_errno is set.
9131  */
9132 static int
9133 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9134                              struct mlx5_flow *dev_flow,
9135                              uint32_t num_of_dest,
9136                              struct mlx5_flow_dv_sample_resource *res,
9137                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9138                              void **sample_actions,
9139                              uint64_t action_flags,
9140                              struct rte_flow_error *error)
9141 {
9142         /* update normal path action resource into last index of array */
9143         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9144         struct mlx5_flow_sub_actions_list *sample_act =
9145                                         &mdest_res->sample_act[dest_index];
9146         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9147         struct mlx5_flow_rss_desc *rss_desc;
9148         uint32_t normal_idx = 0;
9149         struct mlx5_hrxq *hrxq;
9150         uint32_t hrxq_idx;
9151
9152         MLX5_ASSERT(wks);
9153         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9154         if (num_of_dest > 1) {
9155                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9156                         /* Handle QP action for mirroring */
9157                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9158                                                     rss_desc, &hrxq_idx);
9159                         if (!hrxq)
9160                                 return rte_flow_error_set
9161                                      (error, rte_errno,
9162                                       RTE_FLOW_ERROR_TYPE_ACTION,
9163                                       NULL,
9164                                       "cannot create rx queue");
9165                         normal_idx++;
9166                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9167                         sample_act->dr_queue_action = hrxq->action;
9168                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9169                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9170                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9171                 }
9172                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9173                         normal_idx++;
9174                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9175                                 dev_flow->handle->dvh.rix_encap_decap;
9176                         sample_act->dr_encap_action =
9177                                 dev_flow->dv.encap_decap->action;
9178                 }
9179                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9180                         normal_idx++;
9181                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9182                                 dev_flow->handle->rix_port_id_action;
9183                         sample_act->dr_port_id_action =
9184                                 dev_flow->dv.port_id_action->action;
9185                 }
9186                 sample_act->actions_num = normal_idx;
9187                 /* update sample action resource into first index of array */
9188                 mdest_res->ft_type = res->ft_type;
9189                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9190                                 sizeof(struct mlx5_flow_sub_actions_idx));
9191                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9192                                 sizeof(struct mlx5_flow_sub_actions_list));
9193                 mdest_res->num_of_dest = num_of_dest;
9194                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9195                                                          dev_flow, error))
9196                         return rte_flow_error_set(error, EINVAL,
9197                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9198                                                   NULL, "can't create sample "
9199                                                   "action");
9200         } else {
9201                 if (flow_dv_sample_resource_register(dev, res, dev_flow,
9202                                                      sample_actions, error))
9203                         return rte_flow_error_set(error, EINVAL,
9204                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9205                                                   NULL,
9206                                                   "can't create sample action");
9207         }
9208         return 0;
9209 }
9210
9211 /**
9212  * Fill the flow with DV spec, lock free
9213  * (mutex should be acquired by caller).
9214  *
9215  * @param[in] dev
9216  *   Pointer to rte_eth_dev structure.
9217  * @param[in, out] dev_flow
9218  *   Pointer to the sub flow.
9219  * @param[in] attr
9220  *   Pointer to the flow attributes.
9221  * @param[in] items
9222  *   Pointer to the list of items.
9223  * @param[in] actions
9224  *   Pointer to the list of actions.
9225  * @param[out] error
9226  *   Pointer to the error structure.
9227  *
9228  * @return
9229  *   0 on success, a negative errno value otherwise and rte_errno is set.
9230  */
9231 static int
9232 __flow_dv_translate(struct rte_eth_dev *dev,
9233                     struct mlx5_flow *dev_flow,
9234                     const struct rte_flow_attr *attr,
9235                     const struct rte_flow_item items[],
9236                     const struct rte_flow_action actions[],
9237                     struct rte_flow_error *error)
9238 {
9239         struct mlx5_priv *priv = dev->data->dev_private;
9240         struct mlx5_dev_config *dev_conf = &priv->config;
9241         struct rte_flow *flow = dev_flow->flow;
9242         struct mlx5_flow_handle *handle = dev_flow->handle;
9243         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9244         struct mlx5_flow_rss_desc *rss_desc;
9245         uint64_t item_flags = 0;
9246         uint64_t last_item = 0;
9247         uint64_t action_flags = 0;
9248         uint64_t priority = attr->priority;
9249         struct mlx5_flow_dv_matcher matcher = {
9250                 .mask = {
9251                         .size = sizeof(matcher.mask.buf) -
9252                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9253                 },
9254         };
9255         int actions_n = 0;
9256         bool actions_end = false;
9257         union {
9258                 struct mlx5_flow_dv_modify_hdr_resource res;
9259                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9260                             sizeof(struct mlx5_modification_cmd) *
9261                             (MLX5_MAX_MODIFY_NUM + 1)];
9262         } mhdr_dummy;
9263         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9264         const struct rte_flow_action_count *count = NULL;
9265         const struct rte_flow_action_age *age = NULL;
9266         union flow_dv_attr flow_attr = { .attr = 0 };
9267         uint32_t tag_be;
9268         union mlx5_flow_tbl_key tbl_key;
9269         uint32_t modify_action_position = UINT32_MAX;
9270         void *match_mask = matcher.mask.buf;
9271         void *match_value = dev_flow->dv.value.buf;
9272         uint8_t next_protocol = 0xff;
9273         struct rte_vlan_hdr vlan = { 0 };
9274         struct mlx5_flow_dv_dest_array_resource mdest_res;
9275         struct mlx5_flow_dv_sample_resource sample_res;
9276         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9277         struct mlx5_flow_sub_actions_list *sample_act;
9278         uint32_t sample_act_pos = UINT32_MAX;
9279         uint32_t num_of_dest = 0;
9280         int tmp_actions_n = 0;
9281         uint32_t table;
9282         int ret = 0;
9283         const struct mlx5_flow_tunnel *tunnel;
9284         struct flow_grp_info grp_info = {
9285                 .external = !!dev_flow->external,
9286                 .transfer = !!attr->transfer,
9287                 .fdb_def_rule = !!priv->fdb_def_rule,
9288         };
9289
9290         MLX5_ASSERT(wks);
9291         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9292         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9293         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9294         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9295                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9296         /* update normal path action resource into last index of array */
9297         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9298         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9299                  flow_items_to_tunnel(items) :
9300                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9301                  flow_actions_to_tunnel(actions) :
9302                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
9303         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9304                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9305         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9306                                 (dev, tunnel, attr, items, actions);
9307         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9308                                        grp_info, error);
9309         if (ret)
9310                 return ret;
9311         dev_flow->dv.group = table;
9312         if (attr->transfer)
9313                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9314         if (priority == MLX5_FLOW_PRIO_RSVD)
9315                 priority = dev_conf->flow_prio - 1;
9316         /* number of actions must be set to 0 in case of dirty stack. */
9317         mhdr_res->actions_num = 0;
9318         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9319                 /*
9320                  * do not add decap action if match rule drops packet
9321                  * HW rejects rules with decap & drop
9322                  */
9323                 bool add_decap = true;
9324                 const struct rte_flow_action *ptr = actions;
9325                 struct mlx5_flow_tbl_resource *tbl;
9326
9327                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9328                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9329                                 add_decap = false;
9330                                 break;
9331                         }
9332                 }
9333                 if (add_decap) {
9334                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9335                                                            attr->transfer,
9336                                                            error))
9337                                 return -rte_errno;
9338                         dev_flow->dv.actions[actions_n++] =
9339                                         dev_flow->dv.encap_decap->action;
9340                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9341                 }
9342                 /*
9343                  * bind table_id with <group, table> for tunnel match rule.
9344                  * Tunnel set rule establishes that bind in JUMP action handler.
9345                  * Required for scenario when application creates tunnel match
9346                  * rule before tunnel set rule.
9347                  */
9348                 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9349                                                attr->transfer,
9350                                                !!dev_flow->external, tunnel,
9351                                                attr->group, 0, error);
9352                 if (!tbl)
9353                         return rte_flow_error_set
9354                                (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
9355                                actions, "cannot register tunnel group");
9356         }
9357         for (; !actions_end ; actions++) {
9358                 const struct rte_flow_action_queue *queue;
9359                 const struct rte_flow_action_rss *rss;
9360                 const struct rte_flow_action *action = actions;
9361                 const uint8_t *rss_key;
9362                 const struct rte_flow_action_meter *mtr;
9363                 struct mlx5_flow_tbl_resource *tbl;
9364                 uint32_t port_id = 0;
9365                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9366                 int action_type = actions->type;
9367                 const struct rte_flow_action *found_action = NULL;
9368                 struct mlx5_flow_meter *fm = NULL;
9369                 uint32_t jump_group = 0;
9370
9371                 if (!mlx5_flow_os_action_supported(action_type))
9372                         return rte_flow_error_set(error, ENOTSUP,
9373                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9374                                                   actions,
9375                                                   "action not supported");
9376                 switch (action_type) {
9377                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9378                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9379                         break;
9380                 case RTE_FLOW_ACTION_TYPE_VOID:
9381                         break;
9382                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9383                         if (flow_dv_translate_action_port_id(dev, action,
9384                                                              &port_id, error))
9385                                 return -rte_errno;
9386                         port_id_resource.port_id = port_id;
9387                         MLX5_ASSERT(!handle->rix_port_id_action);
9388                         if (flow_dv_port_id_action_resource_register
9389                             (dev, &port_id_resource, dev_flow, error))
9390                                 return -rte_errno;
9391                         dev_flow->dv.actions[actions_n++] =
9392                                         dev_flow->dv.port_id_action->action;
9393                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9394                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9395                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9396                         num_of_dest++;
9397                         break;
9398                 case RTE_FLOW_ACTION_TYPE_FLAG:
9399                         action_flags |= MLX5_FLOW_ACTION_FLAG;
9400                         dev_flow->handle->mark = 1;
9401                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9402                                 struct rte_flow_action_mark mark = {
9403                                         .id = MLX5_FLOW_MARK_DEFAULT,
9404                                 };
9405
9406                                 if (flow_dv_convert_action_mark(dev, &mark,
9407                                                                 mhdr_res,
9408                                                                 error))
9409                                         return -rte_errno;
9410                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9411                                 break;
9412                         }
9413                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9414                         /*
9415                          * Only one FLAG or MARK is supported per device flow
9416                          * right now. So the pointer to the tag resource must be
9417                          * zero before the register process.
9418                          */
9419                         MLX5_ASSERT(!handle->dvh.rix_tag);
9420                         if (flow_dv_tag_resource_register(dev, tag_be,
9421                                                           dev_flow, error))
9422                                 return -rte_errno;
9423                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9424                         dev_flow->dv.actions[actions_n++] =
9425                                         dev_flow->dv.tag_resource->action;
9426                         break;
9427                 case RTE_FLOW_ACTION_TYPE_MARK:
9428                         action_flags |= MLX5_FLOW_ACTION_MARK;
9429                         dev_flow->handle->mark = 1;
9430                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9431                                 const struct rte_flow_action_mark *mark =
9432                                         (const struct rte_flow_action_mark *)
9433                                                 actions->conf;
9434
9435                                 if (flow_dv_convert_action_mark(dev, mark,
9436                                                                 mhdr_res,
9437                                                                 error))
9438                                         return -rte_errno;
9439                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9440                                 break;
9441                         }
9442                         /* Fall-through */
9443                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9444                         /* Legacy (non-extensive) MARK action. */
9445                         tag_be = mlx5_flow_mark_set
9446                               (((const struct rte_flow_action_mark *)
9447                                (actions->conf))->id);
9448                         MLX5_ASSERT(!handle->dvh.rix_tag);
9449                         if (flow_dv_tag_resource_register(dev, tag_be,
9450                                                           dev_flow, error))
9451                                 return -rte_errno;
9452                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9453                         dev_flow->dv.actions[actions_n++] =
9454                                         dev_flow->dv.tag_resource->action;
9455                         break;
9456                 case RTE_FLOW_ACTION_TYPE_SET_META:
9457                         if (flow_dv_convert_action_set_meta
9458                                 (dev, mhdr_res, attr,
9459                                  (const struct rte_flow_action_set_meta *)
9460                                   actions->conf, error))
9461                                 return -rte_errno;
9462                         action_flags |= MLX5_FLOW_ACTION_SET_META;
9463                         break;
9464                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9465                         if (flow_dv_convert_action_set_tag
9466                                 (dev, mhdr_res,
9467                                  (const struct rte_flow_action_set_tag *)
9468                                   actions->conf, error))
9469                                 return -rte_errno;
9470                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9471                         break;
9472                 case RTE_FLOW_ACTION_TYPE_DROP:
9473                         action_flags |= MLX5_FLOW_ACTION_DROP;
9474                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9475                         break;
9476                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9477                         queue = actions->conf;
9478                         rss_desc->queue_num = 1;
9479                         rss_desc->queue[0] = queue->index;
9480                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9481                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9482                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9483                         num_of_dest++;
9484                         break;
9485                 case RTE_FLOW_ACTION_TYPE_RSS:
9486                         rss = actions->conf;
9487                         memcpy(rss_desc->queue, rss->queue,
9488                                rss->queue_num * sizeof(uint16_t));
9489                         rss_desc->queue_num = rss->queue_num;
9490                         /* NULL RSS key indicates default RSS key. */
9491                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9492                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9493                         /*
9494                          * rss->level and rss.types should be set in advance
9495                          * when expanding items for RSS.
9496                          */
9497                         action_flags |= MLX5_FLOW_ACTION_RSS;
9498                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9499                         break;
9500                 case RTE_FLOW_ACTION_TYPE_AGE:
9501                 case RTE_FLOW_ACTION_TYPE_COUNT:
9502                         if (!dev_conf->devx) {
9503                                 return rte_flow_error_set
9504                                               (error, ENOTSUP,
9505                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9506                                                NULL,
9507                                                "count action not supported");
9508                         }
9509                         /* Save information first, will apply later. */
9510                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9511                                 count = action->conf;
9512                         else
9513                                 age = action->conf;
9514                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9515                         break;
9516                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9517                         dev_flow->dv.actions[actions_n++] =
9518                                                 priv->sh->pop_vlan_action;
9519                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9520                         break;
9521                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9522                         if (!(action_flags &
9523                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9524                                 flow_dev_get_vlan_info_from_items(items, &vlan);
9525                         vlan.eth_proto = rte_be_to_cpu_16
9526                              ((((const struct rte_flow_action_of_push_vlan *)
9527                                                    actions->conf)->ethertype));
9528                         found_action = mlx5_flow_find_action
9529                                         (actions + 1,
9530                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9531                         if (found_action)
9532                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9533                         found_action = mlx5_flow_find_action
9534                                         (actions + 1,
9535                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9536                         if (found_action)
9537                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9538                         if (flow_dv_create_action_push_vlan
9539                                             (dev, attr, &vlan, dev_flow, error))
9540                                 return -rte_errno;
9541                         dev_flow->dv.actions[actions_n++] =
9542                                         dev_flow->dv.push_vlan_res->action;
9543                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9544                         break;
9545                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9546                         /* of_vlan_push action handled this action */
9547                         MLX5_ASSERT(action_flags &
9548                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9549                         break;
9550                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9551                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9552                                 break;
9553                         flow_dev_get_vlan_info_from_items(items, &vlan);
9554                         mlx5_update_vlan_vid_pcp(actions, &vlan);
9555                         /* If no VLAN push - this is a modify header action */
9556                         if (flow_dv_convert_action_modify_vlan_vid
9557                                                 (mhdr_res, actions, error))
9558                                 return -rte_errno;
9559                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9560                         break;
9561                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9562                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9563                         if (flow_dv_create_action_l2_encap(dev, actions,
9564                                                            dev_flow,
9565                                                            attr->transfer,
9566                                                            error))
9567                                 return -rte_errno;
9568                         dev_flow->dv.actions[actions_n++] =
9569                                         dev_flow->dv.encap_decap->action;
9570                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9571                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9572                                 sample_act->action_flags |=
9573                                                         MLX5_FLOW_ACTION_ENCAP;
9574                         break;
9575                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9576                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9577                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9578                                                            attr->transfer,
9579                                                            error))
9580                                 return -rte_errno;
9581                         dev_flow->dv.actions[actions_n++] =
9582                                         dev_flow->dv.encap_decap->action;
9583                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9584                         break;
9585                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9586                         /* Handle encap with preceding decap. */
9587                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9588                                 if (flow_dv_create_action_raw_encap
9589                                         (dev, actions, dev_flow, attr, error))
9590                                         return -rte_errno;
9591                                 dev_flow->dv.actions[actions_n++] =
9592                                         dev_flow->dv.encap_decap->action;
9593                         } else {
9594                                 /* Handle encap without preceding decap. */
9595                                 if (flow_dv_create_action_l2_encap
9596                                     (dev, actions, dev_flow, attr->transfer,
9597                                      error))
9598                                         return -rte_errno;
9599                                 dev_flow->dv.actions[actions_n++] =
9600                                         dev_flow->dv.encap_decap->action;
9601                         }
9602                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9603                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9604                                 sample_act->action_flags |=
9605                                                         MLX5_FLOW_ACTION_ENCAP;
9606                         break;
9607                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
9608                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
9609                                 ;
9610                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
9611                                 if (flow_dv_create_action_l2_decap
9612                                     (dev, dev_flow, attr->transfer, error))
9613                                         return -rte_errno;
9614                                 dev_flow->dv.actions[actions_n++] =
9615                                         dev_flow->dv.encap_decap->action;
9616                         }
9617                         /* If decap is followed by encap, handle it at encap. */
9618                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9619                         break;
9620                 case RTE_FLOW_ACTION_TYPE_JUMP:
9621                         jump_group = ((const struct rte_flow_action_jump *)
9622                                                         action->conf)->group;
9623                         grp_info.std_tbl_fix = 0;
9624                         ret = mlx5_flow_group_to_table(dev, tunnel,
9625                                                        jump_group,
9626                                                        &table,
9627                                                        grp_info, error);
9628                         if (ret)
9629                                 return ret;
9630                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9631                                                        attr->transfer,
9632                                                        !!dev_flow->external,
9633                                                        tunnel, jump_group, 0,
9634                                                        error);
9635                         if (!tbl)
9636                                 return rte_flow_error_set
9637                                                 (error, errno,
9638                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9639                                                  NULL,
9640                                                  "cannot create jump action.");
9641                         if (flow_dv_jump_tbl_resource_register
9642                             (dev, tbl, dev_flow, error)) {
9643                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9644                                 return rte_flow_error_set
9645                                                 (error, errno,
9646                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9647                                                  NULL,
9648                                                  "cannot create jump action.");
9649                         }
9650                         dev_flow->dv.actions[actions_n++] =
9651                                         dev_flow->dv.jump->action;
9652                         action_flags |= MLX5_FLOW_ACTION_JUMP;
9653                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
9654                         break;
9655                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
9656                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
9657                         if (flow_dv_convert_action_modify_mac
9658                                         (mhdr_res, actions, error))
9659                                 return -rte_errno;
9660                         action_flags |= actions->type ==
9661                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
9662                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
9663                                         MLX5_FLOW_ACTION_SET_MAC_DST;
9664                         break;
9665                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
9666                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
9667                         if (flow_dv_convert_action_modify_ipv4
9668                                         (mhdr_res, actions, error))
9669                                 return -rte_errno;
9670                         action_flags |= actions->type ==
9671                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
9672                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
9673                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
9674                         break;
9675                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
9676                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
9677                         if (flow_dv_convert_action_modify_ipv6
9678                                         (mhdr_res, actions, error))
9679                                 return -rte_errno;
9680                         action_flags |= actions->type ==
9681                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
9682                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
9683                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
9684                         break;
9685                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
9686                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
9687                         if (flow_dv_convert_action_modify_tp
9688                                         (mhdr_res, actions, items,
9689                                          &flow_attr, dev_flow, !!(action_flags &
9690                                          MLX5_FLOW_ACTION_DECAP), error))
9691                                 return -rte_errno;
9692                         action_flags |= actions->type ==
9693                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
9694                                         MLX5_FLOW_ACTION_SET_TP_SRC :
9695                                         MLX5_FLOW_ACTION_SET_TP_DST;
9696                         break;
9697                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
9698                         if (flow_dv_convert_action_modify_dec_ttl
9699                                         (mhdr_res, items, &flow_attr, dev_flow,
9700                                          !!(action_flags &
9701                                          MLX5_FLOW_ACTION_DECAP), error))
9702                                 return -rte_errno;
9703                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
9704                         break;
9705                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
9706                         if (flow_dv_convert_action_modify_ttl
9707                                         (mhdr_res, actions, items, &flow_attr,
9708                                          dev_flow, !!(action_flags &
9709                                          MLX5_FLOW_ACTION_DECAP), error))
9710                                 return -rte_errno;
9711                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
9712                         break;
9713                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
9714                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
9715                         if (flow_dv_convert_action_modify_tcp_seq
9716                                         (mhdr_res, actions, error))
9717                                 return -rte_errno;
9718                         action_flags |= actions->type ==
9719                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
9720                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
9721                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
9722                         break;
9723
9724                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
9725                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
9726                         if (flow_dv_convert_action_modify_tcp_ack
9727                                         (mhdr_res, actions, error))
9728                                 return -rte_errno;
9729                         action_flags |= actions->type ==
9730                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
9731                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
9732                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
9733                         break;
9734                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
9735                         if (flow_dv_convert_action_set_reg
9736                                         (mhdr_res, actions, error))
9737                                 return -rte_errno;
9738                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9739                         break;
9740                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
9741                         if (flow_dv_convert_action_copy_mreg
9742                                         (dev, mhdr_res, actions, error))
9743                                 return -rte_errno;
9744                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9745                         break;
9746                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
9747                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
9748                         dev_flow->handle->fate_action =
9749                                         MLX5_FLOW_FATE_DEFAULT_MISS;
9750                         break;
9751                 case RTE_FLOW_ACTION_TYPE_METER:
9752                         mtr = actions->conf;
9753                         if (!flow->meter) {
9754                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
9755                                                             attr, error);
9756                                 if (!fm)
9757                                         return rte_flow_error_set(error,
9758                                                 rte_errno,
9759                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9760                                                 NULL,
9761                                                 "meter not found "
9762                                                 "or invalid parameters");
9763                                 flow->meter = fm->idx;
9764                         }
9765                         /* Set the meter action. */
9766                         if (!fm) {
9767                                 fm = mlx5_ipool_get(priv->sh->ipool
9768                                                 [MLX5_IPOOL_MTR], flow->meter);
9769                                 if (!fm)
9770                                         return rte_flow_error_set(error,
9771                                                 rte_errno,
9772                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9773                                                 NULL,
9774                                                 "meter not found "
9775                                                 "or invalid parameters");
9776                         }
9777                         dev_flow->dv.actions[actions_n++] =
9778                                 fm->mfts->meter_action;
9779                         action_flags |= MLX5_FLOW_ACTION_METER;
9780                         break;
9781                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
9782                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
9783                                                               actions, error))
9784                                 return -rte_errno;
9785                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
9786                         break;
9787                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
9788                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
9789                                                               actions, error))
9790                                 return -rte_errno;
9791                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
9792                         break;
9793                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
9794                         sample_act_pos = actions_n;
9795                         ret = flow_dv_translate_action_sample(dev,
9796                                                               actions,
9797                                                               dev_flow, attr,
9798                                                               &num_of_dest,
9799                                                               sample_actions,
9800                                                               &sample_res,
9801                                                               error);
9802                         if (ret < 0)
9803                                 return ret;
9804                         actions_n++;
9805                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
9806                         /* put encap action into group if work with port id */
9807                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
9808                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
9809                                 sample_act->action_flags |=
9810                                                         MLX5_FLOW_ACTION_ENCAP;
9811                         break;
9812                 case RTE_FLOW_ACTION_TYPE_END:
9813                         actions_end = true;
9814                         if (mhdr_res->actions_num) {
9815                                 /* create modify action if needed. */
9816                                 if (flow_dv_modify_hdr_resource_register
9817                                         (dev, mhdr_res, dev_flow, error))
9818                                         return -rte_errno;
9819                                 dev_flow->dv.actions[modify_action_position] =
9820                                         handle->dvh.modify_hdr->action;
9821                         }
9822                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
9823                                 flow->counter =
9824                                         flow_dv_translate_create_counter(dev,
9825                                                 dev_flow, count, age);
9826
9827                                 if (!flow->counter)
9828                                         return rte_flow_error_set
9829                                                 (error, rte_errno,
9830                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9831                                                 NULL,
9832                                                 "cannot create counter"
9833                                                 " object.");
9834                                 dev_flow->dv.actions[actions_n] =
9835                                           (flow_dv_counter_get_by_idx(dev,
9836                                           flow->counter, NULL))->action;
9837                                 actions_n++;
9838                         }
9839                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
9840                                 ret = flow_dv_create_action_sample(dev,
9841                                                           dev_flow,
9842                                                           num_of_dest,
9843                                                           &sample_res,
9844                                                           &mdest_res,
9845                                                           sample_actions,
9846                                                           action_flags,
9847                                                           error);
9848                                 if (ret < 0)
9849                                         return rte_flow_error_set
9850                                                 (error, rte_errno,
9851                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9852                                                 NULL,
9853                                                 "cannot create sample action");
9854                                 if (num_of_dest > 1) {
9855                                         dev_flow->dv.actions[sample_act_pos] =
9856                                         dev_flow->dv.dest_array_res->action;
9857                                 } else {
9858                                         dev_flow->dv.actions[sample_act_pos] =
9859                                         dev_flow->dv.sample_res->verbs_action;
9860                                 }
9861                         }
9862                         break;
9863                 default:
9864                         break;
9865                 }
9866                 if (mhdr_res->actions_num &&
9867                     modify_action_position == UINT32_MAX)
9868                         modify_action_position = actions_n++;
9869         }
9870         /*
9871          * For multiple destination (sample action with ratio=1), the encap
9872          * action and port id action will be combined into group action.
9873          * So need remove the original these actions in the flow and only
9874          * use the sample action instead of.
9875          */
9876         if (num_of_dest > 1 && sample_act->dr_port_id_action) {
9877                 int i;
9878                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9879
9880                 for (i = 0; i < actions_n; i++) {
9881                         if ((sample_act->dr_encap_action &&
9882                                 sample_act->dr_encap_action ==
9883                                 dev_flow->dv.actions[i]) ||
9884                                 (sample_act->dr_port_id_action &&
9885                                 sample_act->dr_port_id_action ==
9886                                 dev_flow->dv.actions[i]))
9887                                 continue;
9888                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
9889                 }
9890                 memcpy((void *)dev_flow->dv.actions,
9891                                 (void *)temp_actions,
9892                                 tmp_actions_n * sizeof(void *));
9893                 actions_n = tmp_actions_n;
9894         }
9895         dev_flow->dv.actions_n = actions_n;
9896         dev_flow->act_flags = action_flags;
9897         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
9898                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
9899                 int item_type = items->type;
9900
9901                 if (!mlx5_flow_os_item_supported(item_type))
9902                         return rte_flow_error_set(error, ENOTSUP,
9903                                                   RTE_FLOW_ERROR_TYPE_ITEM,
9904                                                   NULL, "item not supported");
9905                 switch (item_type) {
9906                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
9907                         flow_dv_translate_item_port_id(dev, match_mask,
9908                                                        match_value, items);
9909                         last_item = MLX5_FLOW_ITEM_PORT_ID;
9910                         break;
9911                 case RTE_FLOW_ITEM_TYPE_ETH:
9912                         flow_dv_translate_item_eth(match_mask, match_value,
9913                                                    items, tunnel,
9914                                                    dev_flow->dv.group);
9915                         matcher.priority = action_flags &
9916                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
9917                                         !dev_flow->external ?
9918                                         MLX5_PRIORITY_MAP_L3 :
9919                                         MLX5_PRIORITY_MAP_L2;
9920                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
9921                                              MLX5_FLOW_LAYER_OUTER_L2;
9922                         break;
9923                 case RTE_FLOW_ITEM_TYPE_VLAN:
9924                         flow_dv_translate_item_vlan(dev_flow,
9925                                                     match_mask, match_value,
9926                                                     items, tunnel,
9927                                                     dev_flow->dv.group);
9928                         matcher.priority = MLX5_PRIORITY_MAP_L2;
9929                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
9930                                               MLX5_FLOW_LAYER_INNER_VLAN) :
9931                                              (MLX5_FLOW_LAYER_OUTER_L2 |
9932                                               MLX5_FLOW_LAYER_OUTER_VLAN);
9933                         break;
9934                 case RTE_FLOW_ITEM_TYPE_IPV4:
9935                         mlx5_flow_tunnel_ip_check(items, next_protocol,
9936                                                   &item_flags, &tunnel);
9937                         flow_dv_translate_item_ipv4(match_mask, match_value,
9938                                                     items, tunnel,
9939                                                     dev_flow->dv.group);
9940                         matcher.priority = MLX5_PRIORITY_MAP_L3;
9941                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
9942                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
9943                         if (items->mask != NULL &&
9944                             ((const struct rte_flow_item_ipv4 *)
9945                              items->mask)->hdr.next_proto_id) {
9946                                 next_protocol =
9947                                         ((const struct rte_flow_item_ipv4 *)
9948                                          (items->spec))->hdr.next_proto_id;
9949                                 next_protocol &=
9950                                         ((const struct rte_flow_item_ipv4 *)
9951                                          (items->mask))->hdr.next_proto_id;
9952                         } else {
9953                                 /* Reset for inner layer. */
9954                                 next_protocol = 0xff;
9955                         }
9956                         break;
9957                 case RTE_FLOW_ITEM_TYPE_IPV6:
9958                         mlx5_flow_tunnel_ip_check(items, next_protocol,
9959                                                   &item_flags, &tunnel);
9960                         flow_dv_translate_item_ipv6(match_mask, match_value,
9961                                                     items, tunnel,
9962                                                     dev_flow->dv.group);
9963                         matcher.priority = MLX5_PRIORITY_MAP_L3;
9964                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
9965                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
9966                         if (items->mask != NULL &&
9967                             ((const struct rte_flow_item_ipv6 *)
9968                              items->mask)->hdr.proto) {
9969                                 next_protocol =
9970                                         ((const struct rte_flow_item_ipv6 *)
9971                                          items->spec)->hdr.proto;
9972                                 next_protocol &=
9973                                         ((const struct rte_flow_item_ipv6 *)
9974                                          items->mask)->hdr.proto;
9975                         } else {
9976                                 /* Reset for inner layer. */
9977                                 next_protocol = 0xff;
9978                         }
9979                         break;
9980                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
9981                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
9982                                                              match_value,
9983                                                              items, tunnel);
9984                         last_item = tunnel ?
9985                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
9986                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
9987                         if (items->mask != NULL &&
9988                             ((const struct rte_flow_item_ipv6_frag_ext *)
9989                              items->mask)->hdr.next_header) {
9990                                 next_protocol =
9991                                 ((const struct rte_flow_item_ipv6_frag_ext *)
9992                                  items->spec)->hdr.next_header;
9993                                 next_protocol &=
9994                                 ((const struct rte_flow_item_ipv6_frag_ext *)
9995                                  items->mask)->hdr.next_header;
9996                         } else {
9997                                 /* Reset for inner layer. */
9998                                 next_protocol = 0xff;
9999                         }
10000                         break;
10001                 case RTE_FLOW_ITEM_TYPE_TCP:
10002                         flow_dv_translate_item_tcp(match_mask, match_value,
10003                                                    items, tunnel);
10004                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10005                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10006                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10007                         break;
10008                 case RTE_FLOW_ITEM_TYPE_UDP:
10009                         flow_dv_translate_item_udp(match_mask, match_value,
10010                                                    items, tunnel);
10011                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10012                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10013                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10014                         break;
10015                 case RTE_FLOW_ITEM_TYPE_GRE:
10016                         flow_dv_translate_item_gre(match_mask, match_value,
10017                                                    items, tunnel);
10018                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10019                         last_item = MLX5_FLOW_LAYER_GRE;
10020                         break;
10021                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10022                         flow_dv_translate_item_gre_key(match_mask,
10023                                                        match_value, items);
10024                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10025                         break;
10026                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10027                         flow_dv_translate_item_nvgre(match_mask, match_value,
10028                                                      items, tunnel);
10029                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10030                         last_item = MLX5_FLOW_LAYER_GRE;
10031                         break;
10032                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10033                         flow_dv_translate_item_vxlan(match_mask, match_value,
10034                                                      items, tunnel);
10035                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10036                         last_item = MLX5_FLOW_LAYER_VXLAN;
10037                         break;
10038                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10039                         flow_dv_translate_item_vxlan_gpe(match_mask,
10040                                                          match_value, items,
10041                                                          tunnel);
10042                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10043                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10044                         break;
10045                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10046                         flow_dv_translate_item_geneve(match_mask, match_value,
10047                                                       items, tunnel);
10048                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10049                         last_item = MLX5_FLOW_LAYER_GENEVE;
10050                         break;
10051                 case RTE_FLOW_ITEM_TYPE_MPLS:
10052                         flow_dv_translate_item_mpls(match_mask, match_value,
10053                                                     items, last_item, tunnel);
10054                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10055                         last_item = MLX5_FLOW_LAYER_MPLS;
10056                         break;
10057                 case RTE_FLOW_ITEM_TYPE_MARK:
10058                         flow_dv_translate_item_mark(dev, match_mask,
10059                                                     match_value, items);
10060                         last_item = MLX5_FLOW_ITEM_MARK;
10061                         break;
10062                 case RTE_FLOW_ITEM_TYPE_META:
10063                         flow_dv_translate_item_meta(dev, match_mask,
10064                                                     match_value, attr, items);
10065                         last_item = MLX5_FLOW_ITEM_METADATA;
10066                         break;
10067                 case RTE_FLOW_ITEM_TYPE_ICMP:
10068                         flow_dv_translate_item_icmp(match_mask, match_value,
10069                                                     items, tunnel);
10070                         last_item = MLX5_FLOW_LAYER_ICMP;
10071                         break;
10072                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10073                         flow_dv_translate_item_icmp6(match_mask, match_value,
10074                                                       items, tunnel);
10075                         last_item = MLX5_FLOW_LAYER_ICMP6;
10076                         break;
10077                 case RTE_FLOW_ITEM_TYPE_TAG:
10078                         flow_dv_translate_item_tag(dev, match_mask,
10079                                                    match_value, items);
10080                         last_item = MLX5_FLOW_ITEM_TAG;
10081                         break;
10082                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10083                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10084                                                         match_value, items);
10085                         last_item = MLX5_FLOW_ITEM_TAG;
10086                         break;
10087                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10088                         flow_dv_translate_item_tx_queue(dev, match_mask,
10089                                                         match_value,
10090                                                         items);
10091                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10092                         break;
10093                 case RTE_FLOW_ITEM_TYPE_GTP:
10094                         flow_dv_translate_item_gtp(match_mask, match_value,
10095                                                    items, tunnel);
10096                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10097                         last_item = MLX5_FLOW_LAYER_GTP;
10098                         break;
10099                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10100                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10101                                 /* Create it only the first time to be used. */
10102                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10103                                 if (ret)
10104                                         return rte_flow_error_set
10105                                                 (error, -ret,
10106                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10107                                                 NULL,
10108                                                 "cannot create eCPRI parser");
10109                         }
10110                         /* Adjust the length matcher and device flow value. */
10111                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10112                         dev_flow->dv.value.size =
10113                                         MLX5_ST_SZ_BYTES(fte_match_param);
10114                         flow_dv_translate_item_ecpri(dev, match_mask,
10115                                                      match_value, items);
10116                         /* No other protocol should follow eCPRI layer. */
10117                         last_item = MLX5_FLOW_LAYER_ECPRI;
10118                         break;
10119                 default:
10120                         break;
10121                 }
10122                 item_flags |= last_item;
10123         }
10124         /*
10125          * When E-Switch mode is enabled, we have two cases where we need to
10126          * set the source port manually.
10127          * The first one, is in case of Nic steering rule, and the second is
10128          * E-Switch rule where no port_id item was found. In both cases
10129          * the source port is set according the current port in use.
10130          */
10131         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10132             (priv->representor || priv->master)) {
10133                 if (flow_dv_translate_item_port_id(dev, match_mask,
10134                                                    match_value, NULL))
10135                         return -rte_errno;
10136         }
10137 #ifdef RTE_LIBRTE_MLX5_DEBUG
10138         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10139                                               dev_flow->dv.value.buf));
10140 #endif
10141         /*
10142          * Layers may be already initialized from prefix flow if this dev_flow
10143          * is the suffix flow.
10144          */
10145         handle->layers |= item_flags;
10146         if (action_flags & MLX5_FLOW_ACTION_RSS)
10147                 flow_dv_hashfields_set(dev_flow, rss_desc);
10148         /* Register matcher. */
10149         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10150                                     matcher.mask.size);
10151         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10152                                                      matcher.priority);
10153         /* reserved field no needs to be set to 0 here. */
10154         tbl_key.domain = attr->transfer;
10155         tbl_key.direction = attr->egress;
10156         tbl_key.table_id = dev_flow->dv.group;
10157         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
10158                 return -rte_errno;
10159         return 0;
10160 }
10161
10162 /**
10163  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10164  * and tunnel.
10165  *
10166  * @param[in, out] action
10167  *   Shred RSS action holding hash RX queue objects.
10168  * @param[in] hash_fields
10169  *   Defines combination of packet fields to participate in RX hash.
10170  * @param[in] tunnel
10171  *   Tunnel type
10172  * @param[in] hrxq_idx
10173  *   Hash RX queue index to set.
10174  *
10175  * @return
10176  *   0 on success, otherwise negative errno value.
10177  */
10178 static int
10179 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10180                               const uint64_t hash_fields,
10181                               const int tunnel,
10182                               uint32_t hrxq_idx)
10183 {
10184         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10185
10186         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10187         case MLX5_RSS_HASH_IPV4:
10188                 hrxqs[0] = hrxq_idx;
10189                 return 0;
10190         case MLX5_RSS_HASH_IPV4_TCP:
10191                 hrxqs[1] = hrxq_idx;
10192                 return 0;
10193         case MLX5_RSS_HASH_IPV4_UDP:
10194                 hrxqs[2] = hrxq_idx;
10195                 return 0;
10196         case MLX5_RSS_HASH_IPV6:
10197                 hrxqs[3] = hrxq_idx;
10198                 return 0;
10199         case MLX5_RSS_HASH_IPV6_TCP:
10200                 hrxqs[4] = hrxq_idx;
10201                 return 0;
10202         case MLX5_RSS_HASH_IPV6_UDP:
10203                 hrxqs[5] = hrxq_idx;
10204                 return 0;
10205         case MLX5_RSS_HASH_NONE:
10206                 hrxqs[6] = hrxq_idx;
10207                 return 0;
10208         default:
10209                 return -1;
10210         }
10211 }
10212
10213 /**
10214  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10215  * and tunnel.
10216  *
10217  * @param[in] action
10218  *   Shred RSS action holding hash RX queue objects.
10219  * @param[in] hash_fields
10220  *   Defines combination of packet fields to participate in RX hash.
10221  * @param[in] tunnel
10222  *   Tunnel type
10223  *
10224  * @return
10225  *   Valid hash RX queue index, otherwise 0.
10226  */
10227 static uint32_t
10228 __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action,
10229                                  const uint64_t hash_fields,
10230                                  const int tunnel)
10231 {
10232         const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10233
10234         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10235         case MLX5_RSS_HASH_IPV4:
10236                 return hrxqs[0];
10237         case MLX5_RSS_HASH_IPV4_TCP:
10238                 return hrxqs[1];
10239         case MLX5_RSS_HASH_IPV4_UDP:
10240                 return hrxqs[2];
10241         case MLX5_RSS_HASH_IPV6:
10242                 return hrxqs[3];
10243         case MLX5_RSS_HASH_IPV6_TCP:
10244                 return hrxqs[4];
10245         case MLX5_RSS_HASH_IPV6_UDP:
10246                 return hrxqs[5];
10247         case MLX5_RSS_HASH_NONE:
10248                 return hrxqs[6];
10249         default:
10250                 return 0;
10251         }
10252 }
10253
10254 /**
10255  * Retrieves hash RX queue suitable for the *flow*.
10256  * If shared action configured for *flow* suitable hash RX queue will be
10257  * retrieved from attached shared action.
10258  *
10259  * @param[in] flow
10260  *   Shred RSS action holding hash RX queue objects.
10261  * @param[in] dev_flow
10262  *   Pointer to the sub flow.
10263  * @param[out] hrxq
10264  *   Pointer to retrieved hash RX queue object.
10265  *
10266  * @return
10267  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
10268  */
10269 static uint32_t
10270 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
10271                            struct mlx5_flow *dev_flow,
10272                            struct mlx5_hrxq **hrxq)
10273 {
10274         struct mlx5_priv *priv = dev->data->dev_private;
10275         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10276         uint32_t hrxq_idx;
10277
10278         if (flow->shared_rss) {
10279                 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10280                                 (flow->shared_rss, dev_flow->hash_fields,
10281                                  !!(dev_flow->handle->layers &
10282                                     MLX5_FLOW_LAYER_TUNNEL));
10283                 if (hrxq_idx) {
10284                         *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10285                                                hrxq_idx);
10286                         __atomic_fetch_add(&(*hrxq)->refcnt, 1,
10287                                            __ATOMIC_RELAXED);
10288                 }
10289         } else {
10290                 struct mlx5_flow_rss_desc *rss_desc =
10291                                 &wks->rss_desc[!!wks->flow_nested_idx];
10292
10293                 *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
10294                                              &hrxq_idx);
10295         }
10296         return hrxq_idx;
10297 }
10298
10299 /**
10300  * Apply the flow to the NIC, lock free,
10301  * (mutex should be acquired by caller).
10302  *
10303  * @param[in] dev
10304  *   Pointer to the Ethernet device structure.
10305  * @param[in, out] flow
10306  *   Pointer to flow structure.
10307  * @param[out] error
10308  *   Pointer to error structure.
10309  *
10310  * @return
10311  *   0 on success, a negative errno value otherwise and rte_errno is set.
10312  */
10313 static int
10314 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10315                 struct rte_flow_error *error)
10316 {
10317         struct mlx5_flow_dv_workspace *dv;
10318         struct mlx5_flow_handle *dh;
10319         struct mlx5_flow_handle_dv *dv_h;
10320         struct mlx5_flow *dev_flow;
10321         struct mlx5_priv *priv = dev->data->dev_private;
10322         uint32_t handle_idx;
10323         int n;
10324         int err;
10325         int idx;
10326         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10327
10328         MLX5_ASSERT(wks);
10329         for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
10330                 dev_flow = &wks->flows[idx];
10331                 dv = &dev_flow->dv;
10332                 dh = dev_flow->handle;
10333                 dv_h = &dh->dvh;
10334                 n = dv->actions_n;
10335                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10336                         if (dv->transfer) {
10337                                 dv->actions[n++] = priv->sh->esw_drop_action;
10338                         } else {
10339                                 MLX5_ASSERT(priv->drop_queue.hrxq);
10340                                 dv->actions[n++] =
10341                                                 priv->drop_queue.hrxq->action;
10342                         }
10343                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10344                            !dv_h->rix_sample && !dv_h->rix_dest_array) {
10345                         struct mlx5_hrxq *hrxq = NULL;
10346                         uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10347                                                 (dev, flow, dev_flow, &hrxq);
10348                         if (!hrxq) {
10349                                 rte_flow_error_set
10350                                         (error, rte_errno,
10351                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10352                                          "cannot get hash queue");
10353                                 goto error;
10354                         }
10355                         dh->rix_hrxq = hrxq_idx;
10356                         dv->actions[n++] = hrxq->action;
10357                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10358                         if (!priv->sh->default_miss_action) {
10359                                 rte_flow_error_set
10360                                         (error, rte_errno,
10361                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10362                                          "default miss action not be created.");
10363                                 goto error;
10364                         }
10365                         dv->actions[n++] = priv->sh->default_miss_action;
10366                 }
10367                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10368                                                (void *)&dv->value, n,
10369                                                dv->actions, &dh->drv_flow);
10370                 if (err) {
10371                         rte_flow_error_set(error, errno,
10372                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10373                                            NULL,
10374                                            "hardware refuses to create flow");
10375                         goto error;
10376                 }
10377                 if (priv->vmwa_context &&
10378                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
10379                         /*
10380                          * The rule contains the VLAN pattern.
10381                          * For VF we are going to create VLAN
10382                          * interface to make hypervisor set correct
10383                          * e-Switch vport context.
10384                          */
10385                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10386                 }
10387         }
10388         return 0;
10389 error:
10390         err = rte_errno; /* Save rte_errno before cleanup. */
10391         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10392                        handle_idx, dh, next) {
10393                 /* hrxq is union, don't clear it if the flag is not set. */
10394                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10395                         mlx5_hrxq_release(dev, dh->rix_hrxq);
10396                         dh->rix_hrxq = 0;
10397                 }
10398                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10399                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10400         }
10401         rte_errno = err; /* Restore rte_errno. */
10402         return -rte_errno;
10403 }
10404
10405 void
10406 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
10407                           struct mlx5_cache_entry *entry)
10408 {
10409         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
10410                                                           entry);
10411
10412         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
10413         mlx5_free(cache);
10414 }
10415
10416 /**
10417  * Release the flow matcher.
10418  *
10419  * @param dev
10420  *   Pointer to Ethernet device.
10421  * @param handle
10422  *   Pointer to mlx5_flow_handle.
10423  *
10424  * @return
10425  *   1 while a reference on it exists, 0 when freed.
10426  */
10427 static int
10428 flow_dv_matcher_release(struct rte_eth_dev *dev,
10429                         struct mlx5_flow_handle *handle)
10430 {
10431         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10432         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
10433                                                             typeof(*tbl), tbl);
10434         int ret;
10435
10436         MLX5_ASSERT(matcher->matcher_object);
10437         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
10438         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
10439         return ret;
10440 }
10441
10442 /**
10443  * Release encap_decap resource.
10444  *
10445  * @param list
10446  *   Pointer to the hash list.
10447  * @param entry
10448  *   Pointer to exist resource entry object.
10449  */
10450 void
10451 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
10452                               struct mlx5_hlist_entry *entry)
10453 {
10454         struct mlx5_dev_ctx_shared *sh = list->ctx;
10455         struct mlx5_flow_dv_encap_decap_resource *res =
10456                 container_of(entry, typeof(*res), entry);
10457
10458         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10459         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
10460 }
10461
10462 /**
10463  * Release an encap/decap resource.
10464  *
10465  * @param dev
10466  *   Pointer to Ethernet device.
10467  * @param encap_decap_idx
10468  *   Index of encap decap resource.
10469  *
10470  * @return
10471  *   1 while a reference on it exists, 0 when freed.
10472  */
10473 static int
10474 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10475                                      uint32_t encap_decap_idx)
10476 {
10477         struct mlx5_priv *priv = dev->data->dev_private;
10478         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10479
10480         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10481                                         encap_decap_idx);
10482         if (!cache_resource)
10483                 return 0;
10484         MLX5_ASSERT(cache_resource->action);
10485         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
10486                                      &cache_resource->entry);
10487 }
10488
10489 /**
10490  * Release an jump to table action resource.
10491  *
10492  * @param dev
10493  *   Pointer to Ethernet device.
10494  * @param handle
10495  *   Pointer to mlx5_flow_handle.
10496  *
10497  * @return
10498  *   1 while a reference on it exists, 0 when freed.
10499  */
10500 static int
10501 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10502                                   struct mlx5_flow_handle *handle)
10503 {
10504         struct mlx5_priv *priv = dev->data->dev_private;
10505         struct mlx5_flow_tbl_data_entry *tbl_data;
10506
10507         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10508                              handle->rix_jump);
10509         if (!tbl_data)
10510                 return 0;
10511         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
10512 }
10513
10514 void
10515 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10516                          struct mlx5_hlist_entry *entry)
10517 {
10518         struct mlx5_flow_dv_modify_hdr_resource *res =
10519                 container_of(entry, typeof(*res), entry);
10520
10521         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10522         mlx5_free(entry);
10523 }
10524
10525 /**
10526  * Release a modify-header resource.
10527  *
10528  * @param dev
10529  *   Pointer to Ethernet device.
10530  * @param handle
10531  *   Pointer to mlx5_flow_handle.
10532  *
10533  * @return
10534  *   1 while a reference on it exists, 0 when freed.
10535  */
10536 static int
10537 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10538                                     struct mlx5_flow_handle *handle)
10539 {
10540         struct mlx5_priv *priv = dev->data->dev_private;
10541         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10542
10543         MLX5_ASSERT(entry->action);
10544         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10545 }
10546
10547 void
10548 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
10549                           struct mlx5_cache_entry *entry)
10550 {
10551         struct mlx5_dev_ctx_shared *sh = list->ctx;
10552         struct mlx5_flow_dv_port_id_action_resource *cache =
10553                         container_of(entry, typeof(*cache), entry);
10554
10555         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10556         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
10557 }
10558
10559 /**
10560  * Release port ID action resource.
10561  *
10562  * @param dev
10563  *   Pointer to Ethernet device.
10564  * @param handle
10565  *   Pointer to mlx5_flow_handle.
10566  *
10567  * @return
10568  *   1 while a reference on it exists, 0 when freed.
10569  */
10570 static int
10571 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10572                                         uint32_t port_id)
10573 {
10574         struct mlx5_priv *priv = dev->data->dev_private;
10575         struct mlx5_flow_dv_port_id_action_resource *cache;
10576
10577         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
10578         if (!cache)
10579                 return 0;
10580         MLX5_ASSERT(cache->action);
10581         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
10582                                      &cache->entry);
10583 }
10584
10585 void
10586 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
10587                             struct mlx5_cache_entry *entry)
10588 {
10589         struct mlx5_dev_ctx_shared *sh = list->ctx;
10590         struct mlx5_flow_dv_push_vlan_action_resource *cache =
10591                         container_of(entry, typeof(*cache), entry);
10592
10593         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10594         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
10595 }
10596
10597 /**
10598  * Release push vlan action resource.
10599  *
10600  * @param dev
10601  *   Pointer to Ethernet device.
10602  * @param handle
10603  *   Pointer to mlx5_flow_handle.
10604  *
10605  * @return
10606  *   1 while a reference on it exists, 0 when freed.
10607  */
10608 static int
10609 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
10610                                           struct mlx5_flow_handle *handle)
10611 {
10612         struct mlx5_priv *priv = dev->data->dev_private;
10613         struct mlx5_flow_dv_push_vlan_action_resource *cache;
10614         uint32_t idx = handle->dvh.rix_push_vlan;
10615
10616         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
10617         if (!cache)
10618                 return 0;
10619         MLX5_ASSERT(cache->action);
10620         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
10621                                      &cache->entry);
10622 }
10623
10624 /**
10625  * Release the fate resource.
10626  *
10627  * @param dev
10628  *   Pointer to Ethernet device.
10629  * @param handle
10630  *   Pointer to mlx5_flow_handle.
10631  */
10632 static void
10633 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
10634                                struct mlx5_flow_handle *handle)
10635 {
10636         if (!handle->rix_fate)
10637                 return;
10638         switch (handle->fate_action) {
10639         case MLX5_FLOW_FATE_QUEUE:
10640                 mlx5_hrxq_release(dev, handle->rix_hrxq);
10641                 break;
10642         case MLX5_FLOW_FATE_JUMP:
10643                 flow_dv_jump_tbl_resource_release(dev, handle);
10644                 break;
10645         case MLX5_FLOW_FATE_PORT_ID:
10646                 flow_dv_port_id_action_resource_release(dev,
10647                                 handle->rix_port_id_action);
10648                 break;
10649         default:
10650                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
10651                 break;
10652         }
10653         handle->rix_fate = 0;
10654 }
10655
10656 /**
10657  * Release an sample resource.
10658  *
10659  * @param dev
10660  *   Pointer to Ethernet device.
10661  * @param handle
10662  *   Pointer to mlx5_flow_handle.
10663  *
10664  * @return
10665  *   1 while a reference on it exists, 0 when freed.
10666  */
10667 static int
10668 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
10669                                      struct mlx5_flow_handle *handle)
10670 {
10671         struct mlx5_priv *priv = dev->data->dev_private;
10672         uint32_t idx = handle->dvh.rix_sample;
10673         struct mlx5_flow_dv_sample_resource *cache_resource;
10674
10675         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10676                          idx);
10677         if (!cache_resource)
10678                 return 0;
10679         MLX5_ASSERT(cache_resource->verbs_action);
10680         DRV_LOG(DEBUG, "sample resource %p: refcnt %d--",
10681                 (void *)cache_resource,
10682                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10683         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10684                                __ATOMIC_RELAXED) == 0) {
10685                 if (cache_resource->verbs_action)
10686                         claim_zero(mlx5_glue->destroy_flow_action
10687                                         (cache_resource->verbs_action));
10688                 if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10689                         if (cache_resource->default_miss)
10690                                 claim_zero(mlx5_glue->destroy_flow_action
10691                                   (cache_resource->default_miss));
10692                 }
10693                 if (cache_resource->normal_path_tbl)
10694                         flow_dv_tbl_resource_release(MLX5_SH(dev),
10695                                 cache_resource->normal_path_tbl);
10696         }
10697         if (cache_resource->sample_idx.rix_hrxq &&
10698                 !mlx5_hrxq_release(dev,
10699                         cache_resource->sample_idx.rix_hrxq))
10700                 cache_resource->sample_idx.rix_hrxq = 0;
10701         if (cache_resource->sample_idx.rix_tag &&
10702                 !flow_dv_tag_release(dev,
10703                         cache_resource->sample_idx.rix_tag))
10704                 cache_resource->sample_idx.rix_tag = 0;
10705         if (cache_resource->sample_idx.cnt) {
10706                 flow_dv_counter_release(dev,
10707                         cache_resource->sample_idx.cnt);
10708                 cache_resource->sample_idx.cnt = 0;
10709         }
10710         if (!__atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)) {
10711                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10712                              &priv->sh->sample_action_list, idx,
10713                              cache_resource, next);
10714                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE], idx);
10715                 DRV_LOG(DEBUG, "sample resource %p: removed",
10716                         (void *)cache_resource);
10717                 return 0;
10718         }
10719         return 1;
10720 }
10721
10722 /**
10723  * Release an destination array resource.
10724  *
10725  * @param dev
10726  *   Pointer to Ethernet device.
10727  * @param handle
10728  *   Pointer to mlx5_flow_handle.
10729  *
10730  * @return
10731  *   1 while a reference on it exists, 0 when freed.
10732  */
10733 static int
10734 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
10735                                      struct mlx5_flow_handle *handle)
10736 {
10737         struct mlx5_priv *priv = dev->data->dev_private;
10738         struct mlx5_flow_dv_dest_array_resource *cache_resource;
10739         struct mlx5_flow_sub_actions_idx *mdest_act_res;
10740         uint32_t idx = handle->dvh.rix_dest_array;
10741         uint32_t i = 0;
10742
10743         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10744                          idx);
10745         if (!cache_resource)
10746                 return 0;
10747         MLX5_ASSERT(cache_resource->action);
10748         DRV_LOG(DEBUG, "destination array resource %p: refcnt %d--",
10749                 (void *)cache_resource,
10750                 __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED));
10751         if (__atomic_sub_fetch(&cache_resource->refcnt, 1,
10752                                __ATOMIC_RELAXED) == 0) {
10753                 if (cache_resource->action)
10754                         claim_zero(mlx5_glue->destroy_flow_action
10755                                                 (cache_resource->action));
10756                 for (; i < cache_resource->num_of_dest; i++) {
10757                         mdest_act_res = &cache_resource->sample_idx[i];
10758                         if (mdest_act_res->rix_hrxq) {
10759                                 mlx5_hrxq_release(dev,
10760                                         mdest_act_res->rix_hrxq);
10761                                 mdest_act_res->rix_hrxq = 0;
10762                         }
10763                         if (mdest_act_res->rix_encap_decap) {
10764                                 flow_dv_encap_decap_resource_release(dev,
10765                                         mdest_act_res->rix_encap_decap);
10766                                 mdest_act_res->rix_encap_decap = 0;
10767                         }
10768                         if (mdest_act_res->rix_port_id_action) {
10769                                 flow_dv_port_id_action_resource_release(dev,
10770                                         mdest_act_res->rix_port_id_action);
10771                                 mdest_act_res->rix_port_id_action = 0;
10772                         }
10773                         if (mdest_act_res->rix_tag) {
10774                                 flow_dv_tag_release(dev,
10775                                         mdest_act_res->rix_tag);
10776                                 mdest_act_res->rix_tag = 0;
10777                         }
10778                 }
10779                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10780                              &priv->sh->dest_array_list, idx,
10781                              cache_resource, next);
10782                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY], idx);
10783                 DRV_LOG(DEBUG, "destination array resource %p: removed",
10784                         (void *)cache_resource);
10785                 return 0;
10786         }
10787         return 1;
10788 }
10789
10790 /**
10791  * Remove the flow from the NIC but keeps it in memory.
10792  * Lock free, (mutex should be acquired by caller).
10793  *
10794  * @param[in] dev
10795  *   Pointer to Ethernet device.
10796  * @param[in, out] flow
10797  *   Pointer to flow structure.
10798  */
10799 static void
10800 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
10801 {
10802         struct mlx5_flow_handle *dh;
10803         uint32_t handle_idx;
10804         struct mlx5_priv *priv = dev->data->dev_private;
10805
10806         if (!flow)
10807                 return;
10808         handle_idx = flow->dev_handles;
10809         while (handle_idx) {
10810                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10811                                     handle_idx);
10812                 if (!dh)
10813                         return;
10814                 if (dh->drv_flow) {
10815                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
10816                         dh->drv_flow = NULL;
10817                 }
10818                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
10819                         flow_dv_fate_resource_release(dev, dh);
10820                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10821                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10822                 handle_idx = dh->next.next;
10823         }
10824 }
10825
10826 /**
10827  * Remove the flow from the NIC and the memory.
10828  * Lock free, (mutex should be acquired by caller).
10829  *
10830  * @param[in] dev
10831  *   Pointer to the Ethernet device structure.
10832  * @param[in, out] flow
10833  *   Pointer to flow structure.
10834  */
10835 static void
10836 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
10837 {
10838         struct rte_flow_shared_action *shared;
10839         struct mlx5_flow_handle *dev_handle;
10840         struct mlx5_priv *priv = dev->data->dev_private;
10841
10842         if (!flow)
10843                 return;
10844         __flow_dv_remove(dev, flow);
10845         shared = mlx5_flow_get_shared_rss(flow);
10846         if (shared)
10847                 __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
10848         if (flow->counter) {
10849                 flow_dv_counter_release(dev, flow->counter);
10850                 flow->counter = 0;
10851         }
10852         if (flow->meter) {
10853                 struct mlx5_flow_meter *fm;
10854
10855                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
10856                                     flow->meter);
10857                 if (fm)
10858                         mlx5_flow_meter_detach(fm);
10859                 flow->meter = 0;
10860         }
10861         while (flow->dev_handles) {
10862                 uint32_t tmp_idx = flow->dev_handles;
10863
10864                 dev_handle = mlx5_ipool_get(priv->sh->ipool
10865                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
10866                 if (!dev_handle)
10867                         return;
10868                 flow->dev_handles = dev_handle->next.next;
10869                 if (dev_handle->dvh.matcher)
10870                         flow_dv_matcher_release(dev, dev_handle);
10871                 if (dev_handle->dvh.rix_sample)
10872                         flow_dv_sample_resource_release(dev, dev_handle);
10873                 if (dev_handle->dvh.rix_dest_array)
10874                         flow_dv_dest_array_resource_release(dev, dev_handle);
10875                 if (dev_handle->dvh.rix_encap_decap)
10876                         flow_dv_encap_decap_resource_release(dev,
10877                                 dev_handle->dvh.rix_encap_decap);
10878                 if (dev_handle->dvh.modify_hdr)
10879                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
10880                 if (dev_handle->dvh.rix_push_vlan)
10881                         flow_dv_push_vlan_action_resource_release(dev,
10882                                                                   dev_handle);
10883                 if (dev_handle->dvh.rix_tag)
10884                         flow_dv_tag_release(dev,
10885                                             dev_handle->dvh.rix_tag);
10886                 flow_dv_fate_resource_release(dev, dev_handle);
10887                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10888                            tmp_idx);
10889         }
10890 }
10891
10892 /**
10893  * Release array of hash RX queue objects.
10894  * Helper function.
10895  *
10896  * @param[in] dev
10897  *   Pointer to the Ethernet device structure.
10898  * @param[in, out] hrxqs
10899  *   Array of hash RX queue objects.
10900  *
10901  * @return
10902  *   Total number of references to hash RX queue objects in *hrxqs* array
10903  *   after this operation.
10904  */
10905 static int
10906 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
10907                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
10908 {
10909         size_t i;
10910         int remaining = 0;
10911
10912         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
10913                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
10914
10915                 if (!ret)
10916                         (*hrxqs)[i] = 0;
10917                 remaining += ret;
10918         }
10919         return remaining;
10920 }
10921
10922 /**
10923  * Release all hash RX queue objects representing shared RSS action.
10924  *
10925  * @param[in] dev
10926  *   Pointer to the Ethernet device structure.
10927  * @param[in, out] action
10928  *   Shared RSS action to remove hash RX queue objects from.
10929  *
10930  * @return
10931  *   Total number of references to hash RX queue objects stored in *action*
10932  *   after this operation.
10933  *   Expected to be 0 if no external references held.
10934  */
10935 static int
10936 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
10937                                  struct mlx5_shared_action_rss *action)
10938 {
10939         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
10940                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
10941 }
10942
10943 /**
10944  * Setup shared RSS action.
10945  * Prepare set of hash RX queue objects sufficient to handle all valid
10946  * hash_fields combinations (see enum ibv_rx_hash_fields).
10947  *
10948  * @param[in] dev
10949  *   Pointer to the Ethernet device structure.
10950  * @param[in, out] action
10951  *   Partially initialized shared RSS action.
10952  * @param[out] error
10953  *   Perform verbose error reporting if not NULL. Initialized in case of
10954  *   error only.
10955  *
10956  * @return
10957  *   0 on success, otherwise negative errno value.
10958  */
10959 static int
10960 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
10961                         struct mlx5_shared_action_rss *action,
10962                         struct rte_flow_error *error)
10963 {
10964         struct mlx5_flow_rss_desc rss_desc = { 0 };
10965         size_t i;
10966         int err;
10967
10968         memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
10969         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
10970         rss_desc.const_q = action->origin.queue;
10971         rss_desc.queue_num = action->origin.queue_num;
10972         rss_desc.standalone = true;
10973         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
10974                 uint32_t hrxq_idx;
10975                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
10976                 int tunnel;
10977
10978                 for (tunnel = 0; tunnel < 2; tunnel++) {
10979                         rss_desc.tunnel = tunnel;
10980                         rss_desc.hash_fields = hash_fields;
10981                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
10982                         if (!hrxq_idx) {
10983                                 rte_flow_error_set
10984                                         (error, rte_errno,
10985                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10986                                          "cannot get hash queue");
10987                                 goto error_hrxq_new;
10988                         }
10989                         err = __flow_dv_action_rss_hrxq_set
10990                                 (action, hash_fields, tunnel, hrxq_idx);
10991                         MLX5_ASSERT(!err);
10992                 }
10993         }
10994         return 0;
10995 error_hrxq_new:
10996         err = rte_errno;
10997         __flow_dv_action_rss_hrxqs_release(dev, action);
10998         rte_errno = err;
10999         return -rte_errno;
11000 }
11001
11002 /**
11003  * Create shared RSS action.
11004  *
11005  * @param[in] dev
11006  *   Pointer to the Ethernet device structure.
11007  * @param[in] conf
11008  *   Shared action configuration.
11009  * @param[in] rss
11010  *   RSS action specification used to create shared action.
11011  * @param[out] error
11012  *   Perform verbose error reporting if not NULL. Initialized in case of
11013  *   error only.
11014  *
11015  * @return
11016  *   A valid shared action handle in case of success, NULL otherwise and
11017  *   rte_errno is set.
11018  */
11019 static struct rte_flow_shared_action *
11020 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11021                             const struct rte_flow_shared_action_conf *conf,
11022                             const struct rte_flow_action_rss *rss,
11023                             struct rte_flow_error *error)
11024 {
11025         struct rte_flow_shared_action *shared_action = NULL;
11026         void *queue = NULL;
11027         struct mlx5_shared_action_rss *shared_rss;
11028         struct rte_flow_action_rss *origin;
11029         const uint8_t *rss_key;
11030         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11031
11032         RTE_SET_USED(conf);
11033         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11034                             0, SOCKET_ID_ANY);
11035         shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0,
11036                                     SOCKET_ID_ANY);
11037         if (!shared_action || !queue) {
11038                 rte_flow_error_set(error, ENOMEM,
11039                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11040                                    "cannot allocate resource memory");
11041                 goto error_rss_init;
11042         }
11043         shared_rss = &shared_action->rss;
11044         shared_rss->queue = queue;
11045         origin = &shared_rss->origin;
11046         origin->func = rss->func;
11047         origin->level = rss->level;
11048         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11049         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11050         /* NULL RSS key indicates default RSS key. */
11051         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11052         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11053         origin->key = &shared_rss->key[0];
11054         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11055         memcpy(shared_rss->queue, rss->queue, queue_size);
11056         origin->queue = shared_rss->queue;
11057         origin->queue_num = rss->queue_num;
11058         if (__flow_dv_action_rss_setup(dev, shared_rss, error))
11059                 goto error_rss_init;
11060         shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS;
11061         return shared_action;
11062 error_rss_init:
11063         mlx5_free(shared_action);
11064         mlx5_free(queue);
11065         return NULL;
11066 }
11067
11068 /**
11069  * Destroy the shared RSS action.
11070  * Release related hash RX queue objects.
11071  *
11072  * @param[in] dev
11073  *   Pointer to the Ethernet device structure.
11074  * @param[in] shared_rss
11075  *   The shared RSS action object to be removed.
11076  * @param[out] error
11077  *   Perform verbose error reporting if not NULL. Initialized in case of
11078  *   error only.
11079  *
11080  * @return
11081  *   0 on success, otherwise negative errno value.
11082  */
11083 static int
11084 __flow_dv_action_rss_release(struct rte_eth_dev *dev,
11085                          struct mlx5_shared_action_rss *shared_rss,
11086                          struct rte_flow_error *error)
11087 {
11088         struct rte_flow_shared_action *shared_action = NULL;
11089         uint32_t old_refcnt = 1;
11090         int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11091
11092         if (remaining) {
11093                 return rte_flow_error_set(error, ETOOMANYREFS,
11094                                           RTE_FLOW_ERROR_TYPE_ACTION,
11095                                           NULL,
11096                                           "shared rss hrxq has references");
11097         }
11098         shared_action = container_of(shared_rss,
11099                                      struct rte_flow_shared_action, rss);
11100         if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt,
11101                                          0, 0,
11102                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
11103                 return rte_flow_error_set(error, ETOOMANYREFS,
11104                                           RTE_FLOW_ERROR_TYPE_ACTION,
11105                                           NULL,
11106                                           "shared rss has references");
11107         }
11108         rte_free(shared_rss->queue);
11109         return 0;
11110 }
11111
11112 /**
11113  * Create shared action, lock free,
11114  * (mutex should be acquired by caller).
11115  * Dispatcher for action type specific call.
11116  *
11117  * @param[in] dev
11118  *   Pointer to the Ethernet device structure.
11119  * @param[in] conf
11120  *   Shared action configuration.
11121  * @param[in] action
11122  *   Action specification used to create shared action.
11123  * @param[out] error
11124  *   Perform verbose error reporting if not NULL. Initialized in case of
11125  *   error only.
11126  *
11127  * @return
11128  *   A valid shared action handle in case of success, NULL otherwise and
11129  *   rte_errno is set.
11130  */
11131 static struct rte_flow_shared_action *
11132 __flow_dv_action_create(struct rte_eth_dev *dev,
11133                         const struct rte_flow_shared_action_conf *conf,
11134                         const struct rte_flow_action *action,
11135                         struct rte_flow_error *error)
11136 {
11137         struct rte_flow_shared_action *shared_action = NULL;
11138         struct mlx5_priv *priv = dev->data->dev_private;
11139
11140         switch (action->type) {
11141         case RTE_FLOW_ACTION_TYPE_RSS:
11142                 shared_action = __flow_dv_action_rss_create(dev, conf,
11143                                                             action->conf,
11144                                                             error);
11145                 break;
11146         default:
11147                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11148                                    NULL, "action type not supported");
11149                 break;
11150         }
11151         if (shared_action) {
11152                 __atomic_add_fetch(&shared_action->refcnt, 1,
11153                                    __ATOMIC_RELAXED);
11154                 LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
11155         }
11156         return shared_action;
11157 }
11158
11159 /**
11160  * Destroy the shared action.
11161  * Release action related resources on the NIC and the memory.
11162  * Lock free, (mutex should be acquired by caller).
11163  * Dispatcher for action type specific call.
11164  *
11165  * @param[in] dev
11166  *   Pointer to the Ethernet device structure.
11167  * @param[in] action
11168  *   The shared action object to be removed.
11169  * @param[out] error
11170  *   Perform verbose error reporting if not NULL. Initialized in case of
11171  *   error only.
11172  *
11173  * @return
11174  *   0 on success, otherwise negative errno value.
11175  */
11176 static int
11177 __flow_dv_action_destroy(struct rte_eth_dev *dev,
11178                          struct rte_flow_shared_action *action,
11179                          struct rte_flow_error *error)
11180 {
11181         int ret;
11182
11183         switch (action->type) {
11184         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11185                 ret = __flow_dv_action_rss_release(dev, &action->rss, error);
11186                 break;
11187         default:
11188                 return rte_flow_error_set(error, ENOTSUP,
11189                                           RTE_FLOW_ERROR_TYPE_ACTION,
11190                                           NULL,
11191                                           "action type not supported");
11192         }
11193         if (ret)
11194                 return ret;
11195         LIST_REMOVE(action, next);
11196         rte_free(action);
11197         return 0;
11198 }
11199
11200 /**
11201  * Updates in place shared RSS action configuration.
11202  *
11203  * @param[in] dev
11204  *   Pointer to the Ethernet device structure.
11205  * @param[in] shared_rss
11206  *   The shared RSS action object to be updated.
11207  * @param[in] action_conf
11208  *   RSS action specification used to modify *shared_rss*.
11209  * @param[out] error
11210  *   Perform verbose error reporting if not NULL. Initialized in case of
11211  *   error only.
11212  *
11213  * @return
11214  *   0 on success, otherwise negative errno value.
11215  * @note: currently only support update of RSS queues.
11216  */
11217 static int
11218 __flow_dv_action_rss_update(struct rte_eth_dev *dev,
11219                             struct mlx5_shared_action_rss *shared_rss,
11220                             const struct rte_flow_action_rss *action_conf,
11221                             struct rte_flow_error *error)
11222 {
11223         size_t i;
11224         int ret;
11225         void *queue = NULL;
11226         const uint8_t *rss_key;
11227         uint32_t rss_key_len;
11228         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11229
11230         queue = mlx5_malloc(MLX5_MEM_ZERO,
11231                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11232                             0, SOCKET_ID_ANY);
11233         if (!queue)
11234                 return rte_flow_error_set(error, ENOMEM,
11235                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11236                                           NULL,
11237                                           "cannot allocate resource memory");
11238         if (action_conf->key) {
11239                 rss_key = action_conf->key;
11240                 rss_key_len = action_conf->key_len;
11241         } else {
11242                 rss_key = rss_hash_default_key;
11243                 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
11244         }
11245         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11246                 uint32_t hrxq_idx;
11247                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11248                 int tunnel;
11249
11250                 for (tunnel = 0; tunnel < 2; tunnel++) {
11251                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup
11252                                         (shared_rss, hash_fields, tunnel);
11253                         MLX5_ASSERT(hrxq_idx);
11254                         ret = mlx5_hrxq_modify
11255                                 (dev, hrxq_idx,
11256                                  rss_key, rss_key_len,
11257                                  hash_fields,
11258                                  action_conf->queue, action_conf->queue_num);
11259                         if (ret) {
11260                                 mlx5_free(queue);
11261                                 return rte_flow_error_set
11262                                         (error, rte_errno,
11263                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11264                                          "cannot update hash queue");
11265                         }
11266                 }
11267         }
11268         mlx5_free(shared_rss->queue);
11269         shared_rss->queue = queue;
11270         memcpy(shared_rss->queue, action_conf->queue, queue_size);
11271         shared_rss->origin.queue = shared_rss->queue;
11272         shared_rss->origin.queue_num = action_conf->queue_num;
11273         return 0;
11274 }
11275
11276 /**
11277  * Updates in place shared action configuration, lock free,
11278  * (mutex should be acquired by caller).
11279  *
11280  * @param[in] dev
11281  *   Pointer to the Ethernet device structure.
11282  * @param[in] action
11283  *   The shared action object to be updated.
11284  * @param[in] action_conf
11285  *   Action specification used to modify *action*.
11286  *   *action_conf* should be of type correlating with type of the *action*,
11287  *   otherwise considered as invalid.
11288  * @param[out] error
11289  *   Perform verbose error reporting if not NULL. Initialized in case of
11290  *   error only.
11291  *
11292  * @return
11293  *   0 on success, otherwise negative errno value.
11294  */
11295 static int
11296 __flow_dv_action_update(struct rte_eth_dev *dev,
11297                         struct rte_flow_shared_action *action,
11298                         const void *action_conf,
11299                         struct rte_flow_error *error)
11300 {
11301         switch (action->type) {
11302         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11303                 return __flow_dv_action_rss_update(dev, &action->rss,
11304                                                    action_conf, error);
11305         default:
11306                 return rte_flow_error_set(error, ENOTSUP,
11307                                           RTE_FLOW_ERROR_TYPE_ACTION,
11308                                           NULL,
11309                                           "action type not supported");
11310         }
11311 }
11312 /**
11313  * Query a dv flow  rule for its statistics via devx.
11314  *
11315  * @param[in] dev
11316  *   Pointer to Ethernet device.
11317  * @param[in] flow
11318  *   Pointer to the sub flow.
11319  * @param[out] data
11320  *   data retrieved by the query.
11321  * @param[out] error
11322  *   Perform verbose error reporting if not NULL.
11323  *
11324  * @return
11325  *   0 on success, a negative errno value otherwise and rte_errno is set.
11326  */
11327 static int
11328 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11329                     void *data, struct rte_flow_error *error)
11330 {
11331         struct mlx5_priv *priv = dev->data->dev_private;
11332         struct rte_flow_query_count *qc = data;
11333
11334         if (!priv->config.devx)
11335                 return rte_flow_error_set(error, ENOTSUP,
11336                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11337                                           NULL,
11338                                           "counters are not supported");
11339         if (flow->counter) {
11340                 uint64_t pkts, bytes;
11341                 struct mlx5_flow_counter *cnt;
11342
11343                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11344                                                  NULL);
11345                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11346                                                &bytes);
11347
11348                 if (err)
11349                         return rte_flow_error_set(error, -err,
11350                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11351                                         NULL, "cannot read counters");
11352                 qc->hits_set = 1;
11353                 qc->bytes_set = 1;
11354                 qc->hits = pkts - cnt->hits;
11355                 qc->bytes = bytes - cnt->bytes;
11356                 if (qc->reset) {
11357                         cnt->hits = pkts;
11358                         cnt->bytes = bytes;
11359                 }
11360                 return 0;
11361         }
11362         return rte_flow_error_set(error, EINVAL,
11363                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11364                                   NULL,
11365                                   "counters are not available");
11366 }
11367
11368 /**
11369  * Query a flow rule AGE action for aging information.
11370  *
11371  * @param[in] dev
11372  *   Pointer to Ethernet device.
11373  * @param[in] flow
11374  *   Pointer to the sub flow.
11375  * @param[out] data
11376  *   data retrieved by the query.
11377  * @param[out] error
11378  *   Perform verbose error reporting if not NULL.
11379  *
11380  * @return
11381  *   0 on success, a negative errno value otherwise and rte_errno is set.
11382  */
11383 static int
11384 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11385                   void *data, struct rte_flow_error *error)
11386 {
11387         struct rte_flow_query_age *resp = data;
11388
11389         if (flow->counter) {
11390                 struct mlx5_age_param *age_param =
11391                                 flow_dv_counter_idx_get_age(dev, flow->counter);
11392
11393                 if (!age_param || !age_param->timeout)
11394                         return rte_flow_error_set
11395                                         (error, EINVAL,
11396                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11397                                          NULL, "cannot read age data");
11398                 resp->aged = __atomic_load_n(&age_param->state,
11399                                              __ATOMIC_RELAXED) ==
11400                                                         AGE_TMOUT ? 1 : 0;
11401                 resp->sec_since_last_hit_valid = !resp->aged;
11402                 if (resp->sec_since_last_hit_valid)
11403                         resp->sec_since_last_hit =
11404                                 __atomic_load_n(&age_param->sec_since_last_hit,
11405                                                 __ATOMIC_RELAXED);
11406                 return 0;
11407         }
11408         return rte_flow_error_set(error, EINVAL,
11409                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11410                                   NULL,
11411                                   "age data not available");
11412 }
11413
11414 /**
11415  * Query a flow.
11416  *
11417  * @see rte_flow_query()
11418  * @see rte_flow_ops
11419  */
11420 static int
11421 flow_dv_query(struct rte_eth_dev *dev,
11422               struct rte_flow *flow __rte_unused,
11423               const struct rte_flow_action *actions __rte_unused,
11424               void *data __rte_unused,
11425               struct rte_flow_error *error __rte_unused)
11426 {
11427         int ret = -EINVAL;
11428
11429         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11430                 switch (actions->type) {
11431                 case RTE_FLOW_ACTION_TYPE_VOID:
11432                         break;
11433                 case RTE_FLOW_ACTION_TYPE_COUNT:
11434                         ret = flow_dv_query_count(dev, flow, data, error);
11435                         break;
11436                 case RTE_FLOW_ACTION_TYPE_AGE:
11437                         ret = flow_dv_query_age(dev, flow, data, error);
11438                         break;
11439                 default:
11440                         return rte_flow_error_set(error, ENOTSUP,
11441                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11442                                                   actions,
11443                                                   "action not supported");
11444                 }
11445         }
11446         return ret;
11447 }
11448
11449 /**
11450  * Destroy the meter table set.
11451  * Lock free, (mutex should be acquired by caller).
11452  *
11453  * @param[in] dev
11454  *   Pointer to Ethernet device.
11455  * @param[in] tbl
11456  *   Pointer to the meter table set.
11457  *
11458  * @return
11459  *   Always 0.
11460  */
11461 static int
11462 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11463                         struct mlx5_meter_domains_infos *tbl)
11464 {
11465         struct mlx5_priv *priv = dev->data->dev_private;
11466         struct mlx5_meter_domains_infos *mtd =
11467                                 (struct mlx5_meter_domains_infos *)tbl;
11468
11469         if (!mtd || !priv->config.dv_flow_en)
11470                 return 0;
11471         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11472                 claim_zero(mlx5_flow_os_destroy_flow
11473                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11474         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11475                 claim_zero(mlx5_flow_os_destroy_flow
11476                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11477         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11478                 claim_zero(mlx5_flow_os_destroy_flow
11479                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11480         if (mtd->egress.color_matcher)
11481                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11482                            (mtd->egress.color_matcher));
11483         if (mtd->egress.any_matcher)
11484                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11485                            (mtd->egress.any_matcher));
11486         if (mtd->egress.tbl)
11487                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
11488         if (mtd->egress.sfx_tbl)
11489                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
11490         if (mtd->ingress.color_matcher)
11491                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11492                            (mtd->ingress.color_matcher));
11493         if (mtd->ingress.any_matcher)
11494                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11495                            (mtd->ingress.any_matcher));
11496         if (mtd->ingress.tbl)
11497                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
11498         if (mtd->ingress.sfx_tbl)
11499                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11500                                              mtd->ingress.sfx_tbl);
11501         if (mtd->transfer.color_matcher)
11502                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11503                            (mtd->transfer.color_matcher));
11504         if (mtd->transfer.any_matcher)
11505                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11506                            (mtd->transfer.any_matcher));
11507         if (mtd->transfer.tbl)
11508                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
11509         if (mtd->transfer.sfx_tbl)
11510                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11511                                              mtd->transfer.sfx_tbl);
11512         if (mtd->drop_actn)
11513                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
11514         mlx5_free(mtd);
11515         return 0;
11516 }
11517
11518 /* Number of meter flow actions, count and jump or count and drop. */
11519 #define METER_ACTIONS 2
11520
11521 /**
11522  * Create specify domain meter table and suffix table.
11523  *
11524  * @param[in] dev
11525  *   Pointer to Ethernet device.
11526  * @param[in,out] mtb
11527  *   Pointer to DV meter table set.
11528  * @param[in] egress
11529  *   Table attribute.
11530  * @param[in] transfer
11531  *   Table attribute.
11532  * @param[in] color_reg_c_idx
11533  *   Reg C index for color match.
11534  *
11535  * @return
11536  *   0 on success, -1 otherwise and rte_errno is set.
11537  */
11538 static int
11539 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
11540                            struct mlx5_meter_domains_infos *mtb,
11541                            uint8_t egress, uint8_t transfer,
11542                            uint32_t color_reg_c_idx)
11543 {
11544         struct mlx5_priv *priv = dev->data->dev_private;
11545         struct mlx5_dev_ctx_shared *sh = priv->sh;
11546         struct mlx5_flow_dv_match_params mask = {
11547                 .size = sizeof(mask.buf),
11548         };
11549         struct mlx5_flow_dv_match_params value = {
11550                 .size = sizeof(value.buf),
11551         };
11552         struct mlx5dv_flow_matcher_attr dv_attr = {
11553                 .type = IBV_FLOW_ATTR_NORMAL,
11554                 .priority = 0,
11555                 .match_criteria_enable = 0,
11556                 .match_mask = (void *)&mask,
11557         };
11558         void *actions[METER_ACTIONS];
11559         struct mlx5_meter_domain_info *dtb;
11560         struct rte_flow_error error;
11561         int i = 0;
11562         int ret;
11563
11564         if (transfer)
11565                 dtb = &mtb->transfer;
11566         else if (egress)
11567                 dtb = &mtb->egress;
11568         else
11569                 dtb = &mtb->ingress;
11570         /* Create the meter table with METER level. */
11571         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
11572                                             egress, transfer, false, NULL, 0,
11573                                             0, &error);
11574         if (!dtb->tbl) {
11575                 DRV_LOG(ERR, "Failed to create meter policer table.");
11576                 return -1;
11577         }
11578         /* Create the meter suffix table with SUFFIX level. */
11579         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
11580                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
11581                                             egress, transfer, false, NULL, 0,
11582                                             0, &error);
11583         if (!dtb->sfx_tbl) {
11584                 DRV_LOG(ERR, "Failed to create meter suffix table.");
11585                 return -1;
11586         }
11587         /* Create matchers, Any and Color. */
11588         dv_attr.priority = 3;
11589         dv_attr.match_criteria_enable = 0;
11590         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11591                                                &dtb->any_matcher);
11592         if (ret) {
11593                 DRV_LOG(ERR, "Failed to create meter"
11594                              " policer default matcher.");
11595                 goto error_exit;
11596         }
11597         dv_attr.priority = 0;
11598         dv_attr.match_criteria_enable =
11599                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
11600         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
11601                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
11602         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11603                                                &dtb->color_matcher);
11604         if (ret) {
11605                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
11606                 goto error_exit;
11607         }
11608         if (mtb->count_actns[RTE_MTR_DROPPED])
11609                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
11610         actions[i++] = mtb->drop_actn;
11611         /* Default rule: lowest priority, match any, actions: drop. */
11612         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
11613                                        actions,
11614                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
11615         if (ret) {
11616                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
11617                 goto error_exit;
11618         }
11619         return 0;
11620 error_exit:
11621         return -1;
11622 }
11623
11624 /**
11625  * Create the needed meter and suffix tables.
11626  * Lock free, (mutex should be acquired by caller).
11627  *
11628  * @param[in] dev
11629  *   Pointer to Ethernet device.
11630  * @param[in] fm
11631  *   Pointer to the flow meter.
11632  *
11633  * @return
11634  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
11635  */
11636 static struct mlx5_meter_domains_infos *
11637 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
11638                        const struct mlx5_flow_meter *fm)
11639 {
11640         struct mlx5_priv *priv = dev->data->dev_private;
11641         struct mlx5_meter_domains_infos *mtb;
11642         int ret;
11643         int i;
11644
11645         if (!priv->mtr_en) {
11646                 rte_errno = ENOTSUP;
11647                 return NULL;
11648         }
11649         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
11650         if (!mtb) {
11651                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
11652                 return NULL;
11653         }
11654         /* Create meter count actions */
11655         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
11656                 struct mlx5_flow_counter *cnt;
11657                 if (!fm->policer_stats.cnt[i])
11658                         continue;
11659                 cnt = flow_dv_counter_get_by_idx(dev,
11660                       fm->policer_stats.cnt[i], NULL);
11661                 mtb->count_actns[i] = cnt->action;
11662         }
11663         /* Create drop action. */
11664         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
11665         if (ret) {
11666                 DRV_LOG(ERR, "Failed to create drop action.");
11667                 goto error_exit;
11668         }
11669         /* Egress meter table. */
11670         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
11671         if (ret) {
11672                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
11673                 goto error_exit;
11674         }
11675         /* Ingress meter table. */
11676         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
11677         if (ret) {
11678                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
11679                 goto error_exit;
11680         }
11681         /* FDB meter table. */
11682         if (priv->config.dv_esw_en) {
11683                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
11684                                                  priv->mtr_color_reg);
11685                 if (ret) {
11686                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
11687                         goto error_exit;
11688                 }
11689         }
11690         return mtb;
11691 error_exit:
11692         flow_dv_destroy_mtr_tbl(dev, mtb);
11693         return NULL;
11694 }
11695
11696 /**
11697  * Destroy domain policer rule.
11698  *
11699  * @param[in] dt
11700  *   Pointer to domain table.
11701  */
11702 static void
11703 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
11704 {
11705         int i;
11706
11707         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11708                 if (dt->policer_rules[i]) {
11709                         claim_zero(mlx5_flow_os_destroy_flow
11710                                    (dt->policer_rules[i]));
11711                         dt->policer_rules[i] = NULL;
11712                 }
11713         }
11714         if (dt->jump_actn) {
11715                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
11716                 dt->jump_actn = NULL;
11717         }
11718 }
11719
11720 /**
11721  * Destroy policer rules.
11722  *
11723  * @param[in] dev
11724  *   Pointer to Ethernet device.
11725  * @param[in] fm
11726  *   Pointer to flow meter structure.
11727  * @param[in] attr
11728  *   Pointer to flow attributes.
11729  *
11730  * @return
11731  *   Always 0.
11732  */
11733 static int
11734 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
11735                               const struct mlx5_flow_meter *fm,
11736                               const struct rte_flow_attr *attr)
11737 {
11738         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
11739
11740         if (!mtb)
11741                 return 0;
11742         if (attr->egress)
11743                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
11744         if (attr->ingress)
11745                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
11746         if (attr->transfer)
11747                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
11748         return 0;
11749 }
11750
11751 /**
11752  * Create specify domain meter policer rule.
11753  *
11754  * @param[in] fm
11755  *   Pointer to flow meter structure.
11756  * @param[in] mtb
11757  *   Pointer to DV meter table set.
11758  * @param[in] mtr_reg_c
11759  *   Color match REG_C.
11760  *
11761  * @return
11762  *   0 on success, -1 otherwise.
11763  */
11764 static int
11765 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
11766                                     struct mlx5_meter_domain_info *dtb,
11767                                     uint8_t mtr_reg_c)
11768 {
11769         struct mlx5_flow_dv_match_params matcher = {
11770                 .size = sizeof(matcher.buf),
11771         };
11772         struct mlx5_flow_dv_match_params value = {
11773                 .size = sizeof(value.buf),
11774         };
11775         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11776         void *actions[METER_ACTIONS];
11777         int i;
11778         int ret = 0;
11779
11780         /* Create jump action. */
11781         if (!dtb->jump_actn)
11782                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11783                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
11784         if (ret) {
11785                 DRV_LOG(ERR, "Failed to create policer jump action.");
11786                 goto error;
11787         }
11788         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11789                 int j = 0;
11790
11791                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
11792                                        rte_col_2_mlx5_col(i), UINT8_MAX);
11793                 if (mtb->count_actns[i])
11794                         actions[j++] = mtb->count_actns[i];
11795                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
11796                         actions[j++] = mtb->drop_actn;
11797                 else
11798                         actions[j++] = dtb->jump_actn;
11799                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
11800                                                (void *)&value, j, actions,
11801                                                &dtb->policer_rules[i]);
11802                 if (ret) {
11803                         DRV_LOG(ERR, "Failed to create policer rule.");
11804                         goto error;
11805                 }
11806         }
11807         return 0;
11808 error:
11809         rte_errno = errno;
11810         return -1;
11811 }
11812
11813 /**
11814  * Create policer rules.
11815  *
11816  * @param[in] dev
11817  *   Pointer to Ethernet device.
11818  * @param[in] fm
11819  *   Pointer to flow meter structure.
11820  * @param[in] attr
11821  *   Pointer to flow attributes.
11822  *
11823  * @return
11824  *   0 on success, -1 otherwise.
11825  */
11826 static int
11827 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
11828                              struct mlx5_flow_meter *fm,
11829                              const struct rte_flow_attr *attr)
11830 {
11831         struct mlx5_priv *priv = dev->data->dev_private;
11832         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11833         int ret;
11834
11835         if (attr->egress) {
11836                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
11837                                                 priv->mtr_color_reg);
11838                 if (ret) {
11839                         DRV_LOG(ERR, "Failed to create egress policer.");
11840                         goto error;
11841                 }
11842         }
11843         if (attr->ingress) {
11844                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
11845                                                 priv->mtr_color_reg);
11846                 if (ret) {
11847                         DRV_LOG(ERR, "Failed to create ingress policer.");
11848                         goto error;
11849                 }
11850         }
11851         if (attr->transfer) {
11852                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
11853                                                 priv->mtr_color_reg);
11854                 if (ret) {
11855                         DRV_LOG(ERR, "Failed to create transfer policer.");
11856                         goto error;
11857                 }
11858         }
11859         return 0;
11860 error:
11861         flow_dv_destroy_policer_rules(dev, fm, attr);
11862         return -1;
11863 }
11864
11865 /**
11866  * Validate the batch counter support in root table.
11867  *
11868  * Create a simple flow with invalid counter and drop action on root table to
11869  * validate if batch counter with offset on root table is supported or not.
11870  *
11871  * @param[in] dev
11872  *   Pointer to rte_eth_dev structure.
11873  *
11874  * @return
11875  *   0 on success, a negative errno value otherwise and rte_errno is set.
11876  */
11877 int
11878 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
11879 {
11880         struct mlx5_priv *priv = dev->data->dev_private;
11881         struct mlx5_dev_ctx_shared *sh = priv->sh;
11882         struct mlx5_flow_dv_match_params mask = {
11883                 .size = sizeof(mask.buf),
11884         };
11885         struct mlx5_flow_dv_match_params value = {
11886                 .size = sizeof(value.buf),
11887         };
11888         struct mlx5dv_flow_matcher_attr dv_attr = {
11889                 .type = IBV_FLOW_ATTR_NORMAL,
11890                 .priority = 0,
11891                 .match_criteria_enable = 0,
11892                 .match_mask = (void *)&mask,
11893         };
11894         void *actions[2] = { 0 };
11895         struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
11896         struct mlx5_devx_obj *dcs = NULL;
11897         void *matcher = NULL;
11898         void *flow = NULL;
11899         int i, ret = -1;
11900
11901         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
11902         if (!tbl)
11903                 goto err;
11904         dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
11905                                             NULL, 0, 0, NULL);
11906         if (!dest_tbl)
11907                 goto err;
11908         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
11909         if (!dcs)
11910                 goto err;
11911         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
11912                                                     &actions[0]);
11913         if (ret)
11914                 goto err;
11915         ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11916                                 (dest_tbl->obj, &actions[1]);
11917         if (ret)
11918                 goto err;
11919         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
11920         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
11921                                                &matcher);
11922         if (ret)
11923                 goto err;
11924         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
11925                                        actions, &flow);
11926 err:
11927         /*
11928          * If batch counter with offset is not supported, the driver will not
11929          * validate the invalid offset value, flow create should success.
11930          * In this case, it means batch counter is not supported in root table.
11931          *
11932          * Otherwise, if flow create is failed, counter offset is supported.
11933          */
11934         if (flow) {
11935                 DRV_LOG(INFO, "Batch counter is not supported in root "
11936                               "table. Switch to fallback mode.");
11937                 rte_errno = ENOTSUP;
11938                 ret = -rte_errno;
11939                 claim_zero(mlx5_flow_os_destroy_flow(flow));
11940         } else {
11941                 /* Check matcher to make sure validate fail at flow create. */
11942                 if (!matcher || (matcher && errno != EINVAL))
11943                         DRV_LOG(ERR, "Unexpected error in counter offset "
11944                                      "support detection");
11945                 ret = 0;
11946         }
11947         for (i = 0; i < 2; i++) {
11948                 if (actions[i])
11949                         claim_zero(mlx5_flow_os_destroy_flow_action
11950                                    (actions[i]));
11951         }
11952         if (matcher)
11953                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
11954         if (tbl)
11955                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11956         if (dest_tbl)
11957                 flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);
11958         if (dcs)
11959                 claim_zero(mlx5_devx_cmd_destroy(dcs));
11960         return ret;
11961 }
11962
11963 /**
11964  * Query a devx counter.
11965  *
11966  * @param[in] dev
11967  *   Pointer to the Ethernet device structure.
11968  * @param[in] cnt
11969  *   Index to the flow counter.
11970  * @param[in] clear
11971  *   Set to clear the counter statistics.
11972  * @param[out] pkts
11973  *   The statistics value of packets.
11974  * @param[out] bytes
11975  *   The statistics value of bytes.
11976  *
11977  * @return
11978  *   0 on success, otherwise return -1.
11979  */
11980 static int
11981 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
11982                       uint64_t *pkts, uint64_t *bytes)
11983 {
11984         struct mlx5_priv *priv = dev->data->dev_private;
11985         struct mlx5_flow_counter *cnt;
11986         uint64_t inn_pkts, inn_bytes;
11987         int ret;
11988
11989         if (!priv->config.devx)
11990                 return -1;
11991
11992         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
11993         if (ret)
11994                 return -1;
11995         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
11996         *pkts = inn_pkts - cnt->hits;
11997         *bytes = inn_bytes - cnt->bytes;
11998         if (clear) {
11999                 cnt->hits = inn_pkts;
12000                 cnt->bytes = inn_bytes;
12001         }
12002         return 0;
12003 }
12004
12005 /**
12006  * Get aged-out flows.
12007  *
12008  * @param[in] dev
12009  *   Pointer to the Ethernet device structure.
12010  * @param[in] context
12011  *   The address of an array of pointers to the aged-out flows contexts.
12012  * @param[in] nb_contexts
12013  *   The length of context array pointers.
12014  * @param[out] error
12015  *   Perform verbose error reporting if not NULL. Initialized in case of
12016  *   error only.
12017  *
12018  * @return
12019  *   how many contexts get in success, otherwise negative errno value.
12020  *   if nb_contexts is 0, return the amount of all aged contexts.
12021  *   if nb_contexts is not 0 , return the amount of aged flows reported
12022  *   in the context array.
12023  * @note: only stub for now
12024  */
12025 static int
12026 flow_get_aged_flows(struct rte_eth_dev *dev,
12027                     void **context,
12028                     uint32_t nb_contexts,
12029                     struct rte_flow_error *error)
12030 {
12031         struct mlx5_priv *priv = dev->data->dev_private;
12032         struct mlx5_age_info *age_info;
12033         struct mlx5_age_param *age_param;
12034         struct mlx5_flow_counter *counter;
12035         int nb_flows = 0;
12036
12037         if (nb_contexts && !context)
12038                 return rte_flow_error_set(error, EINVAL,
12039                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12040                                           NULL,
12041                                           "Should assign at least one flow or"
12042                                           " context to get if nb_contexts != 0");
12043         age_info = GET_PORT_AGE_INFO(priv);
12044         rte_spinlock_lock(&age_info->aged_sl);
12045         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12046                 nb_flows++;
12047                 if (nb_contexts) {
12048                         age_param = MLX5_CNT_TO_AGE(counter);
12049                         context[nb_flows - 1] = age_param->context;
12050                         if (!(--nb_contexts))
12051                                 break;
12052                 }
12053         }
12054         rte_spinlock_unlock(&age_info->aged_sl);
12055         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12056         return nb_flows;
12057 }
12058
12059 /*
12060  * Mutex-protected thunk to lock-free  __flow_dv_translate().
12061  */
12062 static int
12063 flow_dv_translate(struct rte_eth_dev *dev,
12064                   struct mlx5_flow *dev_flow,
12065                   const struct rte_flow_attr *attr,
12066                   const struct rte_flow_item items[],
12067                   const struct rte_flow_action actions[],
12068                   struct rte_flow_error *error)
12069 {
12070         int ret;
12071
12072         flow_dv_shared_lock(dev);
12073         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
12074         flow_dv_shared_unlock(dev);
12075         return ret;
12076 }
12077
12078 /*
12079  * Mutex-protected thunk to lock-free  __flow_dv_apply().
12080  */
12081 static int
12082 flow_dv_apply(struct rte_eth_dev *dev,
12083               struct rte_flow *flow,
12084               struct rte_flow_error *error)
12085 {
12086         int ret;
12087
12088         flow_dv_shared_lock(dev);
12089         ret = __flow_dv_apply(dev, flow, error);
12090         flow_dv_shared_unlock(dev);
12091         return ret;
12092 }
12093
12094 /*
12095  * Mutex-protected thunk to lock-free __flow_dv_remove().
12096  */
12097 static void
12098 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12099 {
12100         flow_dv_shared_lock(dev);
12101         __flow_dv_remove(dev, flow);
12102         flow_dv_shared_unlock(dev);
12103 }
12104
12105 /*
12106  * Mutex-protected thunk to lock-free __flow_dv_destroy().
12107  */
12108 static void
12109 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12110 {
12111         flow_dv_shared_lock(dev);
12112         __flow_dv_destroy(dev, flow);
12113         flow_dv_shared_unlock(dev);
12114 }
12115
12116 /*
12117  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12118  */
12119 static uint32_t
12120 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12121 {
12122         uint32_t cnt;
12123
12124         flow_dv_shared_lock(dev);
12125         cnt = flow_dv_counter_alloc(dev, 0);
12126         flow_dv_shared_unlock(dev);
12127         return cnt;
12128 }
12129
12130 /*
12131  * Mutex-protected thunk to lock-free flow_dv_counter_release().
12132  */
12133 static void
12134 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
12135 {
12136         flow_dv_shared_lock(dev);
12137         flow_dv_counter_release(dev, cnt);
12138         flow_dv_shared_unlock(dev);
12139 }
12140
12141 /**
12142  * Validate shared action.
12143  * Dispatcher for action type specific validation.
12144  *
12145  * @param[in] dev
12146  *   Pointer to the Ethernet device structure.
12147  * @param[in] conf
12148  *   Shared action configuration.
12149  * @param[in] action
12150  *   The shared action object to validate.
12151  * @param[out] error
12152  *   Perform verbose error reporting if not NULL. Initialized in case of
12153  *   error only.
12154  *
12155  * @return
12156  *   0 on success, otherwise negative errno value.
12157  */
12158 static int
12159 flow_dv_action_validate(struct rte_eth_dev *dev,
12160                         const struct rte_flow_shared_action_conf *conf,
12161                         const struct rte_flow_action *action,
12162                         struct rte_flow_error *error)
12163 {
12164         RTE_SET_USED(conf);
12165         switch (action->type) {
12166         case RTE_FLOW_ACTION_TYPE_RSS:
12167                 return mlx5_validate_action_rss(dev, action, error);
12168         default:
12169                 return rte_flow_error_set(error, ENOTSUP,
12170                                           RTE_FLOW_ERROR_TYPE_ACTION,
12171                                           NULL,
12172                                           "action type not supported");
12173         }
12174 }
12175
12176 /*
12177  * Mutex-protected thunk to lock-free  __flow_dv_action_create().
12178  */
12179 static struct rte_flow_shared_action *
12180 flow_dv_action_create(struct rte_eth_dev *dev,
12181                       const struct rte_flow_shared_action_conf *conf,
12182                       const struct rte_flow_action *action,
12183                       struct rte_flow_error *error)
12184 {
12185         struct rte_flow_shared_action *shared_action = NULL;
12186
12187         flow_dv_shared_lock(dev);
12188         shared_action = __flow_dv_action_create(dev, conf, action, error);
12189         flow_dv_shared_unlock(dev);
12190         return shared_action;
12191 }
12192
12193 /*
12194  * Mutex-protected thunk to lock-free  __flow_dv_action_destroy().
12195  */
12196 static int
12197 flow_dv_action_destroy(struct rte_eth_dev *dev,
12198                        struct rte_flow_shared_action *action,
12199                        struct rte_flow_error *error)
12200 {
12201         int ret;
12202
12203         flow_dv_shared_lock(dev);
12204         ret = __flow_dv_action_destroy(dev, action, error);
12205         flow_dv_shared_unlock(dev);
12206         return ret;
12207 }
12208
12209 /*
12210  * Mutex-protected thunk to lock-free  __flow_dv_action_update().
12211  */
12212 static int
12213 flow_dv_action_update(struct rte_eth_dev *dev,
12214                       struct rte_flow_shared_action *action,
12215                       const void *action_conf,
12216                       struct rte_flow_error *error)
12217 {
12218         int ret;
12219
12220         flow_dv_shared_lock(dev);
12221         ret = __flow_dv_action_update(dev, action, action_conf,
12222                                       error);
12223         flow_dv_shared_unlock(dev);
12224         return ret;
12225 }
12226
12227 static int
12228 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12229 {
12230         struct mlx5_priv *priv = dev->data->dev_private;
12231         int ret = 0;
12232
12233         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12234                 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12235                                                 flags);
12236                 if (ret != 0)
12237                         return ret;
12238         }
12239         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12240                 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12241                 if (ret != 0)
12242                         return ret;
12243         }
12244         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12245                 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12246                 if (ret != 0)
12247                         return ret;
12248         }
12249         return 0;
12250 }
12251
12252 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12253         .validate = flow_dv_validate,
12254         .prepare = flow_dv_prepare,
12255         .translate = flow_dv_translate,
12256         .apply = flow_dv_apply,
12257         .remove = flow_dv_remove,
12258         .destroy = flow_dv_destroy,
12259         .query = flow_dv_query,
12260         .create_mtr_tbls = flow_dv_create_mtr_tbl,
12261         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12262         .create_policer_rules = flow_dv_create_policer_rules,
12263         .destroy_policer_rules = flow_dv_destroy_policer_rules,
12264         .counter_alloc = flow_dv_counter_allocate,
12265         .counter_free = flow_dv_counter_free,
12266         .counter_query = flow_dv_counter_query,
12267         .get_aged_flows = flow_get_aged_flows,
12268         .action_validate = flow_dv_action_validate,
12269         .action_create = flow_dv_action_create,
12270         .action_destroy = flow_dv_action_destroy,
12271         .action_update = flow_dv_action_update,
12272         .sync_domain = flow_dv_sync_domain,
12273 };
12274
12275 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
12276