08bbc5ddfb9987874a2615713be79ee76a157738
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                       uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84
85 /**
86  * Initialize flow attributes structure according to flow items' types.
87  *
88  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
89  * mode. For tunnel mode, the items to be modified are the outermost ones.
90  *
91  * @param[in] item
92  *   Pointer to item specification.
93  * @param[out] attr
94  *   Pointer to flow attributes structure.
95  * @param[in] dev_flow
96  *   Pointer to the sub flow.
97  * @param[in] tunnel_decap
98  *   Whether action is after tunnel decapsulation.
99  */
100 static void
101 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
102                   struct mlx5_flow *dev_flow, bool tunnel_decap)
103 {
104         uint64_t layers = dev_flow->handle->layers;
105
106         /*
107          * If layers is already initialized, it means this dev_flow is the
108          * suffix flow, the layers flags is set by the prefix flow. Need to
109          * use the layer flags from prefix flow as the suffix flow may not
110          * have the user defined items as the flow is split.
111          */
112         if (layers) {
113                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
114                         attr->ipv4 = 1;
115                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
116                         attr->ipv6 = 1;
117                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
118                         attr->tcp = 1;
119                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
120                         attr->udp = 1;
121                 attr->valid = 1;
122                 return;
123         }
124         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
125                 uint8_t next_protocol = 0xff;
126                 switch (item->type) {
127                 case RTE_FLOW_ITEM_TYPE_GRE:
128                 case RTE_FLOW_ITEM_TYPE_NVGRE:
129                 case RTE_FLOW_ITEM_TYPE_VXLAN:
130                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
131                 case RTE_FLOW_ITEM_TYPE_GENEVE:
132                 case RTE_FLOW_ITEM_TYPE_MPLS:
133                         if (tunnel_decap)
134                                 attr->attr = 0;
135                         break;
136                 case RTE_FLOW_ITEM_TYPE_IPV4:
137                         if (!attr->ipv6)
138                                 attr->ipv4 = 1;
139                         if (item->mask != NULL &&
140                             ((const struct rte_flow_item_ipv4 *)
141                             item->mask)->hdr.next_proto_id)
142                                 next_protocol =
143                                     ((const struct rte_flow_item_ipv4 *)
144                                       (item->spec))->hdr.next_proto_id &
145                                     ((const struct rte_flow_item_ipv4 *)
146                                       (item->mask))->hdr.next_proto_id;
147                         if ((next_protocol == IPPROTO_IPIP ||
148                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
149                                 attr->attr = 0;
150                         break;
151                 case RTE_FLOW_ITEM_TYPE_IPV6:
152                         if (!attr->ipv4)
153                                 attr->ipv6 = 1;
154                         if (item->mask != NULL &&
155                             ((const struct rte_flow_item_ipv6 *)
156                             item->mask)->hdr.proto)
157                                 next_protocol =
158                                     ((const struct rte_flow_item_ipv6 *)
159                                       (item->spec))->hdr.proto &
160                                     ((const struct rte_flow_item_ipv6 *)
161                                       (item->mask))->hdr.proto;
162                         if ((next_protocol == IPPROTO_IPIP ||
163                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
164                                 attr->attr = 0;
165                         break;
166                 case RTE_FLOW_ITEM_TYPE_UDP:
167                         if (!attr->tcp)
168                                 attr->udp = 1;
169                         break;
170                 case RTE_FLOW_ITEM_TYPE_TCP:
171                         if (!attr->udp)
172                                 attr->tcp = 1;
173                         break;
174                 default:
175                         break;
176                 }
177         }
178         attr->valid = 1;
179 }
180
181 /**
182  * Convert rte_mtr_color to mlx5 color.
183  *
184  * @param[in] rcol
185  *   rte_mtr_color.
186  *
187  * @return
188  *   mlx5 color.
189  */
190 static int
191 rte_col_2_mlx5_col(enum rte_color rcol)
192 {
193         switch (rcol) {
194         case RTE_COLOR_GREEN:
195                 return MLX5_FLOW_COLOR_GREEN;
196         case RTE_COLOR_YELLOW:
197                 return MLX5_FLOW_COLOR_YELLOW;
198         case RTE_COLOR_RED:
199                 return MLX5_FLOW_COLOR_RED;
200         default:
201                 break;
202         }
203         return MLX5_FLOW_COLOR_UNDEFINED;
204 }
205
206 struct field_modify_info {
207         uint32_t size; /* Size of field in protocol header, in bytes. */
208         uint32_t offset; /* Offset of field in protocol header, in bytes. */
209         enum mlx5_modification_field id;
210 };
211
212 struct field_modify_info modify_eth[] = {
213         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
214         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
215         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
216         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
217         {0, 0, 0},
218 };
219
220 struct field_modify_info modify_vlan_out_first_vid[] = {
221         /* Size in bits !!! */
222         {12, 0, MLX5_MODI_OUT_FIRST_VID},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_ipv4[] = {
227         {1,  1, MLX5_MODI_OUT_IP_DSCP},
228         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
229         {4, 12, MLX5_MODI_OUT_SIPV4},
230         {4, 16, MLX5_MODI_OUT_DIPV4},
231         {0, 0, 0},
232 };
233
234 struct field_modify_info modify_ipv6[] = {
235         {1,  0, MLX5_MODI_OUT_IP_DSCP},
236         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
237         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
238         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
239         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
240         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
241         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
242         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
243         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
244         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_udp[] = {
249         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
250         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_tcp[] = {
255         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
256         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
257         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
258         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
259         {0, 0, 0},
260 };
261
262 static void
263 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
264                           uint8_t next_protocol, uint64_t *item_flags,
265                           int *tunnel)
266 {
267         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
268                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
269         if (next_protocol == IPPROTO_IPIP) {
270                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
271                 *tunnel = 1;
272         }
273         if (next_protocol == IPPROTO_IPV6) {
274                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
275                 *tunnel = 1;
276         }
277 }
278
279 /**
280  * Acquire the synchronizing object to protect multithreaded access
281  * to shared dv context. Lock occurs only if context is actually
282  * shared, i.e. we have multiport IB device and representors are
283  * created.
284  *
285  * @param[in] dev
286  *   Pointer to the rte_eth_dev structure.
287  */
288 static void
289 flow_dv_shared_lock(struct rte_eth_dev *dev)
290 {
291         struct mlx5_priv *priv = dev->data->dev_private;
292         struct mlx5_dev_ctx_shared *sh = priv->sh;
293
294         if (sh->refcnt > 1) {
295                 int ret;
296
297                 ret = pthread_mutex_lock(&sh->dv_mutex);
298                 MLX5_ASSERT(!ret);
299                 (void)ret;
300         }
301 }
302
303 static void
304 flow_dv_shared_unlock(struct rte_eth_dev *dev)
305 {
306         struct mlx5_priv *priv = dev->data->dev_private;
307         struct mlx5_dev_ctx_shared *sh = priv->sh;
308
309         if (sh->refcnt > 1) {
310                 int ret;
311
312                 ret = pthread_mutex_unlock(&sh->dv_mutex);
313                 MLX5_ASSERT(!ret);
314                 (void)ret;
315         }
316 }
317
318 /* Update VLAN's VID/PCP based on input rte_flow_action.
319  *
320  * @param[in] action
321  *   Pointer to struct rte_flow_action.
322  * @param[out] vlan
323  *   Pointer to struct rte_vlan_hdr.
324  */
325 static void
326 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
327                          struct rte_vlan_hdr *vlan)
328 {
329         uint16_t vlan_tci;
330         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
331                 vlan_tci =
332                     ((const struct rte_flow_action_of_set_vlan_pcp *)
333                                                action->conf)->vlan_pcp;
334                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
335                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
336                 vlan->vlan_tci |= vlan_tci;
337         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
338                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
339                 vlan->vlan_tci |= rte_be_to_cpu_16
340                     (((const struct rte_flow_action_of_set_vlan_vid *)
341                                              action->conf)->vlan_vid);
342         }
343 }
344
345 /**
346  * Fetch 1, 2, 3 or 4 byte field from the byte array
347  * and return as unsigned integer in host-endian format.
348  *
349  * @param[in] data
350  *   Pointer to data array.
351  * @param[in] size
352  *   Size of field to extract.
353  *
354  * @return
355  *   converted field in host endian format.
356  */
357 static inline uint32_t
358 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
359 {
360         uint32_t ret;
361
362         switch (size) {
363         case 1:
364                 ret = *data;
365                 break;
366         case 2:
367                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
368                 break;
369         case 3:
370                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
371                 ret = (ret << 8) | *(data + sizeof(uint16_t));
372                 break;
373         case 4:
374                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
375                 break;
376         default:
377                 MLX5_ASSERT(false);
378                 ret = 0;
379                 break;
380         }
381         return ret;
382 }
383
384 /**
385  * Convert modify-header action to DV specification.
386  *
387  * Data length of each action is determined by provided field description
388  * and the item mask. Data bit offset and width of each action is determined
389  * by provided item mask.
390  *
391  * @param[in] item
392  *   Pointer to item specification.
393  * @param[in] field
394  *   Pointer to field modification information.
395  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
396  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
397  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
398  * @param[in] dcopy
399  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
400  *   Negative offset value sets the same offset as source offset.
401  *   size field is ignored, value is taken from source field.
402  * @param[in,out] resource
403  *   Pointer to the modify-header resource.
404  * @param[in] type
405  *   Type of modification.
406  * @param[out] error
407  *   Pointer to the error structure.
408  *
409  * @return
410  *   0 on success, a negative errno value otherwise and rte_errno is set.
411  */
412 static int
413 flow_dv_convert_modify_action(struct rte_flow_item *item,
414                               struct field_modify_info *field,
415                               struct field_modify_info *dcopy,
416                               struct mlx5_flow_dv_modify_hdr_resource *resource,
417                               uint32_t type, struct rte_flow_error *error)
418 {
419         uint32_t i = resource->actions_num;
420         struct mlx5_modification_cmd *actions = resource->actions;
421
422         /*
423          * The item and mask are provided in big-endian format.
424          * The fields should be presented as in big-endian format either.
425          * Mask must be always present, it defines the actual field width.
426          */
427         MLX5_ASSERT(item->mask);
428         MLX5_ASSERT(field->size);
429         do {
430                 unsigned int size_b;
431                 unsigned int off_b;
432                 uint32_t mask;
433                 uint32_t data;
434
435                 if (i >= MLX5_MAX_MODIFY_NUM)
436                         return rte_flow_error_set(error, EINVAL,
437                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
438                                  "too many items to modify");
439                 /* Fetch variable byte size mask from the array. */
440                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
441                                            field->offset, field->size);
442                 if (!mask) {
443                         ++field;
444                         continue;
445                 }
446                 /* Deduce actual data width in bits from mask value. */
447                 off_b = rte_bsf32(mask);
448                 size_b = sizeof(uint32_t) * CHAR_BIT -
449                          off_b - __builtin_clz(mask);
450                 MLX5_ASSERT(size_b);
451                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
452                 actions[i] = (struct mlx5_modification_cmd) {
453                         .action_type = type,
454                         .field = field->id,
455                         .offset = off_b,
456                         .length = size_b,
457                 };
458                 /* Convert entire record to expected big-endian format. */
459                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
460                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
461                         MLX5_ASSERT(dcopy);
462                         actions[i].dst_field = dcopy->id;
463                         actions[i].dst_offset =
464                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
465                         /* Convert entire record to big-endian format. */
466                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
467                 } else {
468                         MLX5_ASSERT(item->spec);
469                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
470                                                    field->offset, field->size);
471                         /* Shift out the trailing masked bits from data. */
472                         data = (data & mask) >> off_b;
473                         actions[i].data1 = rte_cpu_to_be_32(data);
474                 }
475                 ++i;
476                 ++field;
477         } while (field->size);
478         if (resource->actions_num == i)
479                 return rte_flow_error_set(error, EINVAL,
480                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
481                                           "invalid modification flow item");
482         resource->actions_num = i;
483         return 0;
484 }
485
486 /**
487  * Convert modify-header set IPv4 address action to DV specification.
488  *
489  * @param[in,out] resource
490  *   Pointer to the modify-header resource.
491  * @param[in] action
492  *   Pointer to action specification.
493  * @param[out] error
494  *   Pointer to the error structure.
495  *
496  * @return
497  *   0 on success, a negative errno value otherwise and rte_errno is set.
498  */
499 static int
500 flow_dv_convert_action_modify_ipv4
501                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
502                          const struct rte_flow_action *action,
503                          struct rte_flow_error *error)
504 {
505         const struct rte_flow_action_set_ipv4 *conf =
506                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
507         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
508         struct rte_flow_item_ipv4 ipv4;
509         struct rte_flow_item_ipv4 ipv4_mask;
510
511         memset(&ipv4, 0, sizeof(ipv4));
512         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
513         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
514                 ipv4.hdr.src_addr = conf->ipv4_addr;
515                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
516         } else {
517                 ipv4.hdr.dst_addr = conf->ipv4_addr;
518                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
519         }
520         item.spec = &ipv4;
521         item.mask = &ipv4_mask;
522         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
523                                              MLX5_MODIFICATION_TYPE_SET, error);
524 }
525
526 /**
527  * Convert modify-header set IPv6 address action to DV specification.
528  *
529  * @param[in,out] resource
530  *   Pointer to the modify-header resource.
531  * @param[in] action
532  *   Pointer to action specification.
533  * @param[out] error
534  *   Pointer to the error structure.
535  *
536  * @return
537  *   0 on success, a negative errno value otherwise and rte_errno is set.
538  */
539 static int
540 flow_dv_convert_action_modify_ipv6
541                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
542                          const struct rte_flow_action *action,
543                          struct rte_flow_error *error)
544 {
545         const struct rte_flow_action_set_ipv6 *conf =
546                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
547         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
548         struct rte_flow_item_ipv6 ipv6;
549         struct rte_flow_item_ipv6 ipv6_mask;
550
551         memset(&ipv6, 0, sizeof(ipv6));
552         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
553         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
554                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
555                        sizeof(ipv6.hdr.src_addr));
556                 memcpy(&ipv6_mask.hdr.src_addr,
557                        &rte_flow_item_ipv6_mask.hdr.src_addr,
558                        sizeof(ipv6.hdr.src_addr));
559         } else {
560                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
561                        sizeof(ipv6.hdr.dst_addr));
562                 memcpy(&ipv6_mask.hdr.dst_addr,
563                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
564                        sizeof(ipv6.hdr.dst_addr));
565         }
566         item.spec = &ipv6;
567         item.mask = &ipv6_mask;
568         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
569                                              MLX5_MODIFICATION_TYPE_SET, error);
570 }
571
572 /**
573  * Convert modify-header set MAC address action to DV specification.
574  *
575  * @param[in,out] resource
576  *   Pointer to the modify-header resource.
577  * @param[in] action
578  *   Pointer to action specification.
579  * @param[out] error
580  *   Pointer to the error structure.
581  *
582  * @return
583  *   0 on success, a negative errno value otherwise and rte_errno is set.
584  */
585 static int
586 flow_dv_convert_action_modify_mac
587                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
588                          const struct rte_flow_action *action,
589                          struct rte_flow_error *error)
590 {
591         const struct rte_flow_action_set_mac *conf =
592                 (const struct rte_flow_action_set_mac *)(action->conf);
593         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
594         struct rte_flow_item_eth eth;
595         struct rte_flow_item_eth eth_mask;
596
597         memset(&eth, 0, sizeof(eth));
598         memset(&eth_mask, 0, sizeof(eth_mask));
599         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
600                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
601                        sizeof(eth.src.addr_bytes));
602                 memcpy(&eth_mask.src.addr_bytes,
603                        &rte_flow_item_eth_mask.src.addr_bytes,
604                        sizeof(eth_mask.src.addr_bytes));
605         } else {
606                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
607                        sizeof(eth.dst.addr_bytes));
608                 memcpy(&eth_mask.dst.addr_bytes,
609                        &rte_flow_item_eth_mask.dst.addr_bytes,
610                        sizeof(eth_mask.dst.addr_bytes));
611         }
612         item.spec = &eth;
613         item.mask = &eth_mask;
614         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
615                                              MLX5_MODIFICATION_TYPE_SET, error);
616 }
617
618 /**
619  * Convert modify-header set VLAN VID action to DV specification.
620  *
621  * @param[in,out] resource
622  *   Pointer to the modify-header resource.
623  * @param[in] action
624  *   Pointer to action specification.
625  * @param[out] error
626  *   Pointer to the error structure.
627  *
628  * @return
629  *   0 on success, a negative errno value otherwise and rte_errno is set.
630  */
631 static int
632 flow_dv_convert_action_modify_vlan_vid
633                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
634                          const struct rte_flow_action *action,
635                          struct rte_flow_error *error)
636 {
637         const struct rte_flow_action_of_set_vlan_vid *conf =
638                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
639         int i = resource->actions_num;
640         struct mlx5_modification_cmd *actions = resource->actions;
641         struct field_modify_info *field = modify_vlan_out_first_vid;
642
643         if (i >= MLX5_MAX_MODIFY_NUM)
644                 return rte_flow_error_set(error, EINVAL,
645                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
646                          "too many items to modify");
647         actions[i] = (struct mlx5_modification_cmd) {
648                 .action_type = MLX5_MODIFICATION_TYPE_SET,
649                 .field = field->id,
650                 .length = field->size,
651                 .offset = field->offset,
652         };
653         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
654         actions[i].data1 = conf->vlan_vid;
655         actions[i].data1 = actions[i].data1 << 16;
656         resource->actions_num = ++i;
657         return 0;
658 }
659
660 /**
661  * Convert modify-header set TP action to DV specification.
662  *
663  * @param[in,out] resource
664  *   Pointer to the modify-header resource.
665  * @param[in] action
666  *   Pointer to action specification.
667  * @param[in] items
668  *   Pointer to rte_flow_item objects list.
669  * @param[in] attr
670  *   Pointer to flow attributes structure.
671  * @param[in] dev_flow
672  *   Pointer to the sub flow.
673  * @param[in] tunnel_decap
674  *   Whether action is after tunnel decapsulation.
675  * @param[out] error
676  *   Pointer to the error structure.
677  *
678  * @return
679  *   0 on success, a negative errno value otherwise and rte_errno is set.
680  */
681 static int
682 flow_dv_convert_action_modify_tp
683                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
684                          const struct rte_flow_action *action,
685                          const struct rte_flow_item *items,
686                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
687                          bool tunnel_decap, struct rte_flow_error *error)
688 {
689         const struct rte_flow_action_set_tp *conf =
690                 (const struct rte_flow_action_set_tp *)(action->conf);
691         struct rte_flow_item item;
692         struct rte_flow_item_udp udp;
693         struct rte_flow_item_udp udp_mask;
694         struct rte_flow_item_tcp tcp;
695         struct rte_flow_item_tcp tcp_mask;
696         struct field_modify_info *field;
697
698         if (!attr->valid)
699                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
700         if (attr->udp) {
701                 memset(&udp, 0, sizeof(udp));
702                 memset(&udp_mask, 0, sizeof(udp_mask));
703                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
704                         udp.hdr.src_port = conf->port;
705                         udp_mask.hdr.src_port =
706                                         rte_flow_item_udp_mask.hdr.src_port;
707                 } else {
708                         udp.hdr.dst_port = conf->port;
709                         udp_mask.hdr.dst_port =
710                                         rte_flow_item_udp_mask.hdr.dst_port;
711                 }
712                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
713                 item.spec = &udp;
714                 item.mask = &udp_mask;
715                 field = modify_udp;
716         } else {
717                 MLX5_ASSERT(attr->tcp);
718                 memset(&tcp, 0, sizeof(tcp));
719                 memset(&tcp_mask, 0, sizeof(tcp_mask));
720                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
721                         tcp.hdr.src_port = conf->port;
722                         tcp_mask.hdr.src_port =
723                                         rte_flow_item_tcp_mask.hdr.src_port;
724                 } else {
725                         tcp.hdr.dst_port = conf->port;
726                         tcp_mask.hdr.dst_port =
727                                         rte_flow_item_tcp_mask.hdr.dst_port;
728                 }
729                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
730                 item.spec = &tcp;
731                 item.mask = &tcp_mask;
732                 field = modify_tcp;
733         }
734         return flow_dv_convert_modify_action(&item, field, NULL, resource,
735                                              MLX5_MODIFICATION_TYPE_SET, error);
736 }
737
738 /**
739  * Convert modify-header set TTL action to DV specification.
740  *
741  * @param[in,out] resource
742  *   Pointer to the modify-header resource.
743  * @param[in] action
744  *   Pointer to action specification.
745  * @param[in] items
746  *   Pointer to rte_flow_item objects list.
747  * @param[in] attr
748  *   Pointer to flow attributes structure.
749  * @param[in] dev_flow
750  *   Pointer to the sub flow.
751  * @param[in] tunnel_decap
752  *   Whether action is after tunnel decapsulation.
753  * @param[out] error
754  *   Pointer to the error structure.
755  *
756  * @return
757  *   0 on success, a negative errno value otherwise and rte_errno is set.
758  */
759 static int
760 flow_dv_convert_action_modify_ttl
761                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
762                          const struct rte_flow_action *action,
763                          const struct rte_flow_item *items,
764                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
765                          bool tunnel_decap, struct rte_flow_error *error)
766 {
767         const struct rte_flow_action_set_ttl *conf =
768                 (const struct rte_flow_action_set_ttl *)(action->conf);
769         struct rte_flow_item item;
770         struct rte_flow_item_ipv4 ipv4;
771         struct rte_flow_item_ipv4 ipv4_mask;
772         struct rte_flow_item_ipv6 ipv6;
773         struct rte_flow_item_ipv6 ipv6_mask;
774         struct field_modify_info *field;
775
776         if (!attr->valid)
777                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
778         if (attr->ipv4) {
779                 memset(&ipv4, 0, sizeof(ipv4));
780                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
781                 ipv4.hdr.time_to_live = conf->ttl_value;
782                 ipv4_mask.hdr.time_to_live = 0xFF;
783                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
784                 item.spec = &ipv4;
785                 item.mask = &ipv4_mask;
786                 field = modify_ipv4;
787         } else {
788                 MLX5_ASSERT(attr->ipv6);
789                 memset(&ipv6, 0, sizeof(ipv6));
790                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
791                 ipv6.hdr.hop_limits = conf->ttl_value;
792                 ipv6_mask.hdr.hop_limits = 0xFF;
793                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
794                 item.spec = &ipv6;
795                 item.mask = &ipv6_mask;
796                 field = modify_ipv6;
797         }
798         return flow_dv_convert_modify_action(&item, field, NULL, resource,
799                                              MLX5_MODIFICATION_TYPE_SET, error);
800 }
801
802 /**
803  * Convert modify-header decrement TTL action to DV specification.
804  *
805  * @param[in,out] resource
806  *   Pointer to the modify-header resource.
807  * @param[in] action
808  *   Pointer to action specification.
809  * @param[in] items
810  *   Pointer to rte_flow_item objects list.
811  * @param[in] attr
812  *   Pointer to flow attributes structure.
813  * @param[in] dev_flow
814  *   Pointer to the sub flow.
815  * @param[in] tunnel_decap
816  *   Whether action is after tunnel decapsulation.
817  * @param[out] error
818  *   Pointer to the error structure.
819  *
820  * @return
821  *   0 on success, a negative errno value otherwise and rte_errno is set.
822  */
823 static int
824 flow_dv_convert_action_modify_dec_ttl
825                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
826                          const struct rte_flow_item *items,
827                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
828                          bool tunnel_decap, struct rte_flow_error *error)
829 {
830         struct rte_flow_item item;
831         struct rte_flow_item_ipv4 ipv4;
832         struct rte_flow_item_ipv4 ipv4_mask;
833         struct rte_flow_item_ipv6 ipv6;
834         struct rte_flow_item_ipv6 ipv6_mask;
835         struct field_modify_info *field;
836
837         if (!attr->valid)
838                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
839         if (attr->ipv4) {
840                 memset(&ipv4, 0, sizeof(ipv4));
841                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
842                 ipv4.hdr.time_to_live = 0xFF;
843                 ipv4_mask.hdr.time_to_live = 0xFF;
844                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
845                 item.spec = &ipv4;
846                 item.mask = &ipv4_mask;
847                 field = modify_ipv4;
848         } else {
849                 MLX5_ASSERT(attr->ipv6);
850                 memset(&ipv6, 0, sizeof(ipv6));
851                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
852                 ipv6.hdr.hop_limits = 0xFF;
853                 ipv6_mask.hdr.hop_limits = 0xFF;
854                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
855                 item.spec = &ipv6;
856                 item.mask = &ipv6_mask;
857                 field = modify_ipv6;
858         }
859         return flow_dv_convert_modify_action(&item, field, NULL, resource,
860                                              MLX5_MODIFICATION_TYPE_ADD, error);
861 }
862
863 /**
864  * Convert modify-header increment/decrement TCP Sequence number
865  * to DV specification.
866  *
867  * @param[in,out] resource
868  *   Pointer to the modify-header resource.
869  * @param[in] action
870  *   Pointer to action specification.
871  * @param[out] error
872  *   Pointer to the error structure.
873  *
874  * @return
875  *   0 on success, a negative errno value otherwise and rte_errno is set.
876  */
877 static int
878 flow_dv_convert_action_modify_tcp_seq
879                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
880                          const struct rte_flow_action *action,
881                          struct rte_flow_error *error)
882 {
883         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
884         uint64_t value = rte_be_to_cpu_32(*conf);
885         struct rte_flow_item item;
886         struct rte_flow_item_tcp tcp;
887         struct rte_flow_item_tcp tcp_mask;
888
889         memset(&tcp, 0, sizeof(tcp));
890         memset(&tcp_mask, 0, sizeof(tcp_mask));
891         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
892                 /*
893                  * The HW has no decrement operation, only increment operation.
894                  * To simulate decrement X from Y using increment operation
895                  * we need to add UINT32_MAX X times to Y.
896                  * Each adding of UINT32_MAX decrements Y by 1.
897                  */
898                 value *= UINT32_MAX;
899         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
900         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
901         item.type = RTE_FLOW_ITEM_TYPE_TCP;
902         item.spec = &tcp;
903         item.mask = &tcp_mask;
904         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
905                                              MLX5_MODIFICATION_TYPE_ADD, error);
906 }
907
908 /**
909  * Convert modify-header increment/decrement TCP Acknowledgment number
910  * to DV specification.
911  *
912  * @param[in,out] resource
913  *   Pointer to the modify-header resource.
914  * @param[in] action
915  *   Pointer to action specification.
916  * @param[out] error
917  *   Pointer to the error structure.
918  *
919  * @return
920  *   0 on success, a negative errno value otherwise and rte_errno is set.
921  */
922 static int
923 flow_dv_convert_action_modify_tcp_ack
924                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
925                          const struct rte_flow_action *action,
926                          struct rte_flow_error *error)
927 {
928         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
929         uint64_t value = rte_be_to_cpu_32(*conf);
930         struct rte_flow_item item;
931         struct rte_flow_item_tcp tcp;
932         struct rte_flow_item_tcp tcp_mask;
933
934         memset(&tcp, 0, sizeof(tcp));
935         memset(&tcp_mask, 0, sizeof(tcp_mask));
936         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
937                 /*
938                  * The HW has no decrement operation, only increment operation.
939                  * To simulate decrement X from Y using increment operation
940                  * we need to add UINT32_MAX X times to Y.
941                  * Each adding of UINT32_MAX decrements Y by 1.
942                  */
943                 value *= UINT32_MAX;
944         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
945         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
946         item.type = RTE_FLOW_ITEM_TYPE_TCP;
947         item.spec = &tcp;
948         item.mask = &tcp_mask;
949         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
950                                              MLX5_MODIFICATION_TYPE_ADD, error);
951 }
952
953 static enum mlx5_modification_field reg_to_field[] = {
954         [REG_NON] = MLX5_MODI_OUT_NONE,
955         [REG_A] = MLX5_MODI_META_DATA_REG_A,
956         [REG_B] = MLX5_MODI_META_DATA_REG_B,
957         [REG_C_0] = MLX5_MODI_META_REG_C_0,
958         [REG_C_1] = MLX5_MODI_META_REG_C_1,
959         [REG_C_2] = MLX5_MODI_META_REG_C_2,
960         [REG_C_3] = MLX5_MODI_META_REG_C_3,
961         [REG_C_4] = MLX5_MODI_META_REG_C_4,
962         [REG_C_5] = MLX5_MODI_META_REG_C_5,
963         [REG_C_6] = MLX5_MODI_META_REG_C_6,
964         [REG_C_7] = MLX5_MODI_META_REG_C_7,
965 };
966
967 /**
968  * Convert register set to DV specification.
969  *
970  * @param[in,out] resource
971  *   Pointer to the modify-header resource.
972  * @param[in] action
973  *   Pointer to action specification.
974  * @param[out] error
975  *   Pointer to the error structure.
976  *
977  * @return
978  *   0 on success, a negative errno value otherwise and rte_errno is set.
979  */
980 static int
981 flow_dv_convert_action_set_reg
982                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
983                          const struct rte_flow_action *action,
984                          struct rte_flow_error *error)
985 {
986         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
987         struct mlx5_modification_cmd *actions = resource->actions;
988         uint32_t i = resource->actions_num;
989
990         if (i >= MLX5_MAX_MODIFY_NUM)
991                 return rte_flow_error_set(error, EINVAL,
992                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
993                                           "too many items to modify");
994         MLX5_ASSERT(conf->id != REG_NON);
995         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
996         actions[i] = (struct mlx5_modification_cmd) {
997                 .action_type = MLX5_MODIFICATION_TYPE_SET,
998                 .field = reg_to_field[conf->id],
999         };
1000         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1001         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1002         ++i;
1003         resource->actions_num = i;
1004         return 0;
1005 }
1006
1007 /**
1008  * Convert SET_TAG action to DV specification.
1009  *
1010  * @param[in] dev
1011  *   Pointer to the rte_eth_dev structure.
1012  * @param[in,out] resource
1013  *   Pointer to the modify-header resource.
1014  * @param[in] conf
1015  *   Pointer to action specification.
1016  * @param[out] error
1017  *   Pointer to the error structure.
1018  *
1019  * @return
1020  *   0 on success, a negative errno value otherwise and rte_errno is set.
1021  */
1022 static int
1023 flow_dv_convert_action_set_tag
1024                         (struct rte_eth_dev *dev,
1025                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1026                          const struct rte_flow_action_set_tag *conf,
1027                          struct rte_flow_error *error)
1028 {
1029         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1030         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1031         struct rte_flow_item item = {
1032                 .spec = &data,
1033                 .mask = &mask,
1034         };
1035         struct field_modify_info reg_c_x[] = {
1036                 [1] = {0, 0, 0},
1037         };
1038         enum mlx5_modification_field reg_type;
1039         int ret;
1040
1041         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1042         if (ret < 0)
1043                 return ret;
1044         MLX5_ASSERT(ret != REG_NON);
1045         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1046         reg_type = reg_to_field[ret];
1047         MLX5_ASSERT(reg_type > 0);
1048         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1049         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1050                                              MLX5_MODIFICATION_TYPE_SET, error);
1051 }
1052
1053 /**
1054  * Convert internal COPY_REG action to DV specification.
1055  *
1056  * @param[in] dev
1057  *   Pointer to the rte_eth_dev structure.
1058  * @param[in,out] res
1059  *   Pointer to the modify-header resource.
1060  * @param[in] action
1061  *   Pointer to action specification.
1062  * @param[out] error
1063  *   Pointer to the error structure.
1064  *
1065  * @return
1066  *   0 on success, a negative errno value otherwise and rte_errno is set.
1067  */
1068 static int
1069 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1070                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1071                                  const struct rte_flow_action *action,
1072                                  struct rte_flow_error *error)
1073 {
1074         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1075         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1076         struct rte_flow_item item = {
1077                 .spec = NULL,
1078                 .mask = &mask,
1079         };
1080         struct field_modify_info reg_src[] = {
1081                 {4, 0, reg_to_field[conf->src]},
1082                 {0, 0, 0},
1083         };
1084         struct field_modify_info reg_dst = {
1085                 .offset = 0,
1086                 .id = reg_to_field[conf->dst],
1087         };
1088         /* Adjust reg_c[0] usage according to reported mask. */
1089         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1090                 struct mlx5_priv *priv = dev->data->dev_private;
1091                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1092
1093                 MLX5_ASSERT(reg_c0);
1094                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1095                 if (conf->dst == REG_C_0) {
1096                         /* Copy to reg_c[0], within mask only. */
1097                         reg_dst.offset = rte_bsf32(reg_c0);
1098                         /*
1099                          * Mask is ignoring the enianness, because
1100                          * there is no conversion in datapath.
1101                          */
1102 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1103                         /* Copy from destination lower bits to reg_c[0]. */
1104                         mask = reg_c0 >> reg_dst.offset;
1105 #else
1106                         /* Copy from destination upper bits to reg_c[0]. */
1107                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1108                                           rte_fls_u32(reg_c0));
1109 #endif
1110                 } else {
1111                         mask = rte_cpu_to_be_32(reg_c0);
1112 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1113                         /* Copy from reg_c[0] to destination lower bits. */
1114                         reg_dst.offset = 0;
1115 #else
1116                         /* Copy from reg_c[0] to destination upper bits. */
1117                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1118                                          (rte_fls_u32(reg_c0) -
1119                                           rte_bsf32(reg_c0));
1120 #endif
1121                 }
1122         }
1123         return flow_dv_convert_modify_action(&item,
1124                                              reg_src, &reg_dst, res,
1125                                              MLX5_MODIFICATION_TYPE_COPY,
1126                                              error);
1127 }
1128
1129 /**
1130  * Convert MARK action to DV specification. This routine is used
1131  * in extensive metadata only and requires metadata register to be
1132  * handled. In legacy mode hardware tag resource is engaged.
1133  *
1134  * @param[in] dev
1135  *   Pointer to the rte_eth_dev structure.
1136  * @param[in] conf
1137  *   Pointer to MARK action specification.
1138  * @param[in,out] resource
1139  *   Pointer to the modify-header resource.
1140  * @param[out] error
1141  *   Pointer to the error structure.
1142  *
1143  * @return
1144  *   0 on success, a negative errno value otherwise and rte_errno is set.
1145  */
1146 static int
1147 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1148                             const struct rte_flow_action_mark *conf,
1149                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1150                             struct rte_flow_error *error)
1151 {
1152         struct mlx5_priv *priv = dev->data->dev_private;
1153         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1154                                            priv->sh->dv_mark_mask);
1155         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1156         struct rte_flow_item item = {
1157                 .spec = &data,
1158                 .mask = &mask,
1159         };
1160         struct field_modify_info reg_c_x[] = {
1161                 [1] = {0, 0, 0},
1162         };
1163         int reg;
1164
1165         if (!mask)
1166                 return rte_flow_error_set(error, EINVAL,
1167                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1168                                           NULL, "zero mark action mask");
1169         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1170         if (reg < 0)
1171                 return reg;
1172         MLX5_ASSERT(reg > 0);
1173         if (reg == REG_C_0) {
1174                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1175                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1176
1177                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1178                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1179                 mask = rte_cpu_to_be_32(mask << shl_c0);
1180         }
1181         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1182         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1183                                              MLX5_MODIFICATION_TYPE_SET, error);
1184 }
1185
1186 /**
1187  * Get metadata register index for specified steering domain.
1188  *
1189  * @param[in] dev
1190  *   Pointer to the rte_eth_dev structure.
1191  * @param[in] attr
1192  *   Attributes of flow to determine steering domain.
1193  * @param[out] error
1194  *   Pointer to the error structure.
1195  *
1196  * @return
1197  *   positive index on success, a negative errno value otherwise
1198  *   and rte_errno is set.
1199  */
1200 static enum modify_reg
1201 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1202                          const struct rte_flow_attr *attr,
1203                          struct rte_flow_error *error)
1204 {
1205         int reg =
1206                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1207                                           MLX5_METADATA_FDB :
1208                                             attr->egress ?
1209                                             MLX5_METADATA_TX :
1210                                             MLX5_METADATA_RX, 0, error);
1211         if (reg < 0)
1212                 return rte_flow_error_set(error,
1213                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1214                                           NULL, "unavailable "
1215                                           "metadata register");
1216         return reg;
1217 }
1218
1219 /**
1220  * Convert SET_META action to DV specification.
1221  *
1222  * @param[in] dev
1223  *   Pointer to the rte_eth_dev structure.
1224  * @param[in,out] resource
1225  *   Pointer to the modify-header resource.
1226  * @param[in] attr
1227  *   Attributes of flow that includes this item.
1228  * @param[in] conf
1229  *   Pointer to action specification.
1230  * @param[out] error
1231  *   Pointer to the error structure.
1232  *
1233  * @return
1234  *   0 on success, a negative errno value otherwise and rte_errno is set.
1235  */
1236 static int
1237 flow_dv_convert_action_set_meta
1238                         (struct rte_eth_dev *dev,
1239                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1240                          const struct rte_flow_attr *attr,
1241                          const struct rte_flow_action_set_meta *conf,
1242                          struct rte_flow_error *error)
1243 {
1244         uint32_t data = conf->data;
1245         uint32_t mask = conf->mask;
1246         struct rte_flow_item item = {
1247                 .spec = &data,
1248                 .mask = &mask,
1249         };
1250         struct field_modify_info reg_c_x[] = {
1251                 [1] = {0, 0, 0},
1252         };
1253         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1254
1255         if (reg < 0)
1256                 return reg;
1257         /*
1258          * In datapath code there is no endianness
1259          * coversions for perfromance reasons, all
1260          * pattern conversions are done in rte_flow.
1261          */
1262         if (reg == REG_C_0) {
1263                 struct mlx5_priv *priv = dev->data->dev_private;
1264                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1265                 uint32_t shl_c0;
1266
1267                 MLX5_ASSERT(msk_c0);
1268 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1269                 shl_c0 = rte_bsf32(msk_c0);
1270 #else
1271                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1272 #endif
1273                 mask <<= shl_c0;
1274                 data <<= shl_c0;
1275                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1276         }
1277         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1278         /* The routine expects parameters in memory as big-endian ones. */
1279         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1280                                              MLX5_MODIFICATION_TYPE_SET, error);
1281 }
1282
1283 /**
1284  * Convert modify-header set IPv4 DSCP action to DV specification.
1285  *
1286  * @param[in,out] resource
1287  *   Pointer to the modify-header resource.
1288  * @param[in] action
1289  *   Pointer to action specification.
1290  * @param[out] error
1291  *   Pointer to the error structure.
1292  *
1293  * @return
1294  *   0 on success, a negative errno value otherwise and rte_errno is set.
1295  */
1296 static int
1297 flow_dv_convert_action_modify_ipv4_dscp
1298                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1299                          const struct rte_flow_action *action,
1300                          struct rte_flow_error *error)
1301 {
1302         const struct rte_flow_action_set_dscp *conf =
1303                 (const struct rte_flow_action_set_dscp *)(action->conf);
1304         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1305         struct rte_flow_item_ipv4 ipv4;
1306         struct rte_flow_item_ipv4 ipv4_mask;
1307
1308         memset(&ipv4, 0, sizeof(ipv4));
1309         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1310         ipv4.hdr.type_of_service = conf->dscp;
1311         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1312         item.spec = &ipv4;
1313         item.mask = &ipv4_mask;
1314         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1315                                              MLX5_MODIFICATION_TYPE_SET, error);
1316 }
1317
1318 /**
1319  * Convert modify-header set IPv6 DSCP action to DV specification.
1320  *
1321  * @param[in,out] resource
1322  *   Pointer to the modify-header resource.
1323  * @param[in] action
1324  *   Pointer to action specification.
1325  * @param[out] error
1326  *   Pointer to the error structure.
1327  *
1328  * @return
1329  *   0 on success, a negative errno value otherwise and rte_errno is set.
1330  */
1331 static int
1332 flow_dv_convert_action_modify_ipv6_dscp
1333                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1334                          const struct rte_flow_action *action,
1335                          struct rte_flow_error *error)
1336 {
1337         const struct rte_flow_action_set_dscp *conf =
1338                 (const struct rte_flow_action_set_dscp *)(action->conf);
1339         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1340         struct rte_flow_item_ipv6 ipv6;
1341         struct rte_flow_item_ipv6 ipv6_mask;
1342
1343         memset(&ipv6, 0, sizeof(ipv6));
1344         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1345         /*
1346          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1347          * rdma-core only accept the DSCP bits byte aligned start from
1348          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1349          * bits in IPv6 case as rdma-core requires byte aligned value.
1350          */
1351         ipv6.hdr.vtc_flow = conf->dscp;
1352         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1353         item.spec = &ipv6;
1354         item.mask = &ipv6_mask;
1355         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1356                                              MLX5_MODIFICATION_TYPE_SET, error);
1357 }
1358
1359 /**
1360  * Validate MARK item.
1361  *
1362  * @param[in] dev
1363  *   Pointer to the rte_eth_dev structure.
1364  * @param[in] item
1365  *   Item specification.
1366  * @param[in] attr
1367  *   Attributes of flow that includes this item.
1368  * @param[out] error
1369  *   Pointer to error structure.
1370  *
1371  * @return
1372  *   0 on success, a negative errno value otherwise and rte_errno is set.
1373  */
1374 static int
1375 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1376                            const struct rte_flow_item *item,
1377                            const struct rte_flow_attr *attr __rte_unused,
1378                            struct rte_flow_error *error)
1379 {
1380         struct mlx5_priv *priv = dev->data->dev_private;
1381         struct mlx5_dev_config *config = &priv->config;
1382         const struct rte_flow_item_mark *spec = item->spec;
1383         const struct rte_flow_item_mark *mask = item->mask;
1384         const struct rte_flow_item_mark nic_mask = {
1385                 .id = priv->sh->dv_mark_mask,
1386         };
1387         int ret;
1388
1389         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1390                 return rte_flow_error_set(error, ENOTSUP,
1391                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1392                                           "extended metadata feature"
1393                                           " isn't enabled");
1394         if (!mlx5_flow_ext_mreg_supported(dev))
1395                 return rte_flow_error_set(error, ENOTSUP,
1396                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1397                                           "extended metadata register"
1398                                           " isn't supported");
1399         if (!nic_mask.id)
1400                 return rte_flow_error_set(error, ENOTSUP,
1401                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1402                                           "extended metadata register"
1403                                           " isn't available");
1404         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1405         if (ret < 0)
1406                 return ret;
1407         if (!spec)
1408                 return rte_flow_error_set(error, EINVAL,
1409                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1410                                           item->spec,
1411                                           "data cannot be empty");
1412         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1413                 return rte_flow_error_set(error, EINVAL,
1414                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1415                                           &spec->id,
1416                                           "mark id exceeds the limit");
1417         if (!mask)
1418                 mask = &nic_mask;
1419         if (!mask->id)
1420                 return rte_flow_error_set(error, EINVAL,
1421                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1422                                         "mask cannot be zero");
1423
1424         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1425                                         (const uint8_t *)&nic_mask,
1426                                         sizeof(struct rte_flow_item_mark),
1427                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1428         if (ret < 0)
1429                 return ret;
1430         return 0;
1431 }
1432
1433 /**
1434  * Validate META item.
1435  *
1436  * @param[in] dev
1437  *   Pointer to the rte_eth_dev structure.
1438  * @param[in] item
1439  *   Item specification.
1440  * @param[in] attr
1441  *   Attributes of flow that includes this item.
1442  * @param[out] error
1443  *   Pointer to error structure.
1444  *
1445  * @return
1446  *   0 on success, a negative errno value otherwise and rte_errno is set.
1447  */
1448 static int
1449 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1450                            const struct rte_flow_item *item,
1451                            const struct rte_flow_attr *attr,
1452                            struct rte_flow_error *error)
1453 {
1454         struct mlx5_priv *priv = dev->data->dev_private;
1455         struct mlx5_dev_config *config = &priv->config;
1456         const struct rte_flow_item_meta *spec = item->spec;
1457         const struct rte_flow_item_meta *mask = item->mask;
1458         struct rte_flow_item_meta nic_mask = {
1459                 .data = UINT32_MAX
1460         };
1461         int reg;
1462         int ret;
1463
1464         if (!spec)
1465                 return rte_flow_error_set(error, EINVAL,
1466                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1467                                           item->spec,
1468                                           "data cannot be empty");
1469         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1470                 if (!mlx5_flow_ext_mreg_supported(dev))
1471                         return rte_flow_error_set(error, ENOTSUP,
1472                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1473                                           "extended metadata register"
1474                                           " isn't supported");
1475                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1476                 if (reg < 0)
1477                         return reg;
1478                 if (reg == REG_B)
1479                         return rte_flow_error_set(error, ENOTSUP,
1480                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1481                                           "match on reg_b "
1482                                           "isn't supported");
1483                 if (reg != REG_A)
1484                         nic_mask.data = priv->sh->dv_meta_mask;
1485         } else if (attr->transfer) {
1486                 return rte_flow_error_set(error, ENOTSUP,
1487                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1488                                         "extended metadata feature "
1489                                         "should be enabled when "
1490                                         "meta item is requested "
1491                                         "with e-switch mode ");
1492         }
1493         if (!mask)
1494                 mask = &rte_flow_item_meta_mask;
1495         if (!mask->data)
1496                 return rte_flow_error_set(error, EINVAL,
1497                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1498                                         "mask cannot be zero");
1499
1500         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1501                                         (const uint8_t *)&nic_mask,
1502                                         sizeof(struct rte_flow_item_meta),
1503                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1504         return ret;
1505 }
1506
1507 /**
1508  * Validate TAG item.
1509  *
1510  * @param[in] dev
1511  *   Pointer to the rte_eth_dev structure.
1512  * @param[in] item
1513  *   Item specification.
1514  * @param[in] attr
1515  *   Attributes of flow that includes this item.
1516  * @param[out] error
1517  *   Pointer to error structure.
1518  *
1519  * @return
1520  *   0 on success, a negative errno value otherwise and rte_errno is set.
1521  */
1522 static int
1523 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1524                           const struct rte_flow_item *item,
1525                           const struct rte_flow_attr *attr __rte_unused,
1526                           struct rte_flow_error *error)
1527 {
1528         const struct rte_flow_item_tag *spec = item->spec;
1529         const struct rte_flow_item_tag *mask = item->mask;
1530         const struct rte_flow_item_tag nic_mask = {
1531                 .data = RTE_BE32(UINT32_MAX),
1532                 .index = 0xff,
1533         };
1534         int ret;
1535
1536         if (!mlx5_flow_ext_mreg_supported(dev))
1537                 return rte_flow_error_set(error, ENOTSUP,
1538                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1539                                           "extensive metadata register"
1540                                           " isn't supported");
1541         if (!spec)
1542                 return rte_flow_error_set(error, EINVAL,
1543                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1544                                           item->spec,
1545                                           "data cannot be empty");
1546         if (!mask)
1547                 mask = &rte_flow_item_tag_mask;
1548         if (!mask->data)
1549                 return rte_flow_error_set(error, EINVAL,
1550                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1551                                         "mask cannot be zero");
1552
1553         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1554                                         (const uint8_t *)&nic_mask,
1555                                         sizeof(struct rte_flow_item_tag),
1556                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1557         if (ret < 0)
1558                 return ret;
1559         if (mask->index != 0xff)
1560                 return rte_flow_error_set(error, EINVAL,
1561                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1562                                           "partial mask for tag index"
1563                                           " is not supported");
1564         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1565         if (ret < 0)
1566                 return ret;
1567         MLX5_ASSERT(ret != REG_NON);
1568         return 0;
1569 }
1570
1571 /**
1572  * Validate vport item.
1573  *
1574  * @param[in] dev
1575  *   Pointer to the rte_eth_dev structure.
1576  * @param[in] item
1577  *   Item specification.
1578  * @param[in] attr
1579  *   Attributes of flow that includes this item.
1580  * @param[in] item_flags
1581  *   Bit-fields that holds the items detected until now.
1582  * @param[out] error
1583  *   Pointer to error structure.
1584  *
1585  * @return
1586  *   0 on success, a negative errno value otherwise and rte_errno is set.
1587  */
1588 static int
1589 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1590                               const struct rte_flow_item *item,
1591                               const struct rte_flow_attr *attr,
1592                               uint64_t item_flags,
1593                               struct rte_flow_error *error)
1594 {
1595         const struct rte_flow_item_port_id *spec = item->spec;
1596         const struct rte_flow_item_port_id *mask = item->mask;
1597         const struct rte_flow_item_port_id switch_mask = {
1598                         .id = 0xffffffff,
1599         };
1600         struct mlx5_priv *esw_priv;
1601         struct mlx5_priv *dev_priv;
1602         int ret;
1603
1604         if (!attr->transfer)
1605                 return rte_flow_error_set(error, EINVAL,
1606                                           RTE_FLOW_ERROR_TYPE_ITEM,
1607                                           NULL,
1608                                           "match on port id is valid only"
1609                                           " when transfer flag is enabled");
1610         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1611                 return rte_flow_error_set(error, ENOTSUP,
1612                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1613                                           "multiple source ports are not"
1614                                           " supported");
1615         if (!mask)
1616                 mask = &switch_mask;
1617         if (mask->id != 0xffffffff)
1618                 return rte_flow_error_set(error, ENOTSUP,
1619                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1620                                            mask,
1621                                            "no support for partial mask on"
1622                                            " \"id\" field");
1623         ret = mlx5_flow_item_acceptable
1624                                 (item, (const uint8_t *)mask,
1625                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1626                                  sizeof(struct rte_flow_item_port_id),
1627                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1628         if (ret)
1629                 return ret;
1630         if (!spec)
1631                 return 0;
1632         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1633         if (!esw_priv)
1634                 return rte_flow_error_set(error, rte_errno,
1635                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1636                                           "failed to obtain E-Switch info for"
1637                                           " port");
1638         dev_priv = mlx5_dev_to_eswitch_info(dev);
1639         if (!dev_priv)
1640                 return rte_flow_error_set(error, rte_errno,
1641                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1642                                           NULL,
1643                                           "failed to obtain E-Switch info");
1644         if (esw_priv->domain_id != dev_priv->domain_id)
1645                 return rte_flow_error_set(error, EINVAL,
1646                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1647                                           "cannot match on a port from a"
1648                                           " different E-Switch");
1649         return 0;
1650 }
1651
1652 /**
1653  * Validate VLAN item.
1654  *
1655  * @param[in] item
1656  *   Item specification.
1657  * @param[in] item_flags
1658  *   Bit-fields that holds the items detected until now.
1659  * @param[in] dev
1660  *   Ethernet device flow is being created on.
1661  * @param[out] error
1662  *   Pointer to error structure.
1663  *
1664  * @return
1665  *   0 on success, a negative errno value otherwise and rte_errno is set.
1666  */
1667 static int
1668 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1669                            uint64_t item_flags,
1670                            struct rte_eth_dev *dev,
1671                            struct rte_flow_error *error)
1672 {
1673         const struct rte_flow_item_vlan *mask = item->mask;
1674         const struct rte_flow_item_vlan nic_mask = {
1675                 .tci = RTE_BE16(UINT16_MAX),
1676                 .inner_type = RTE_BE16(UINT16_MAX),
1677                 .has_more_vlan = 1,
1678         };
1679         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1680         int ret;
1681         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1682                                         MLX5_FLOW_LAYER_INNER_L4) :
1683                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1684                                         MLX5_FLOW_LAYER_OUTER_L4);
1685         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1686                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1687
1688         if (item_flags & vlanm)
1689                 return rte_flow_error_set(error, EINVAL,
1690                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1691                                           "multiple VLAN layers not supported");
1692         else if ((item_flags & l34m) != 0)
1693                 return rte_flow_error_set(error, EINVAL,
1694                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1695                                           "VLAN cannot follow L3/L4 layer");
1696         if (!mask)
1697                 mask = &rte_flow_item_vlan_mask;
1698         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1699                                         (const uint8_t *)&nic_mask,
1700                                         sizeof(struct rte_flow_item_vlan),
1701                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1702         if (ret)
1703                 return ret;
1704         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1705                 struct mlx5_priv *priv = dev->data->dev_private;
1706
1707                 if (priv->vmwa_context) {
1708                         /*
1709                          * Non-NULL context means we have a virtual machine
1710                          * and SR-IOV enabled, we have to create VLAN interface
1711                          * to make hypervisor to setup E-Switch vport
1712                          * context correctly. We avoid creating the multiple
1713                          * VLAN interfaces, so we cannot support VLAN tag mask.
1714                          */
1715                         return rte_flow_error_set(error, EINVAL,
1716                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1717                                                   item,
1718                                                   "VLAN tag mask is not"
1719                                                   " supported in virtual"
1720                                                   " environment");
1721                 }
1722         }
1723         return 0;
1724 }
1725
1726 /*
1727  * GTP flags are contained in 1 byte of the format:
1728  * -------------------------------------------
1729  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1730  * |-----------------------------------------|
1731  * | value | Version | PT | Res | E | S | PN |
1732  * -------------------------------------------
1733  *
1734  * Matching is supported only for GTP flags E, S, PN.
1735  */
1736 #define MLX5_GTP_FLAGS_MASK     0x07
1737
1738 /**
1739  * Validate GTP item.
1740  *
1741  * @param[in] dev
1742  *   Pointer to the rte_eth_dev structure.
1743  * @param[in] item
1744  *   Item specification.
1745  * @param[in] item_flags
1746  *   Bit-fields that holds the items detected until now.
1747  * @param[out] error
1748  *   Pointer to error structure.
1749  *
1750  * @return
1751  *   0 on success, a negative errno value otherwise and rte_errno is set.
1752  */
1753 static int
1754 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1755                           const struct rte_flow_item *item,
1756                           uint64_t item_flags,
1757                           struct rte_flow_error *error)
1758 {
1759         struct mlx5_priv *priv = dev->data->dev_private;
1760         const struct rte_flow_item_gtp *spec = item->spec;
1761         const struct rte_flow_item_gtp *mask = item->mask;
1762         const struct rte_flow_item_gtp nic_mask = {
1763                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1764                 .msg_type = 0xff,
1765                 .teid = RTE_BE32(0xffffffff),
1766         };
1767
1768         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1769                 return rte_flow_error_set(error, ENOTSUP,
1770                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1771                                           "GTP support is not enabled");
1772         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1773                 return rte_flow_error_set(error, ENOTSUP,
1774                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1775                                           "multiple tunnel layers not"
1776                                           " supported");
1777         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1778                 return rte_flow_error_set(error, EINVAL,
1779                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1780                                           "no outer UDP layer found");
1781         if (!mask)
1782                 mask = &rte_flow_item_gtp_mask;
1783         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1784                 return rte_flow_error_set(error, ENOTSUP,
1785                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1786                                           "Match is supported for GTP"
1787                                           " flags only");
1788         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1789                                          (const uint8_t *)&nic_mask,
1790                                          sizeof(struct rte_flow_item_gtp),
1791                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1792 }
1793
1794 /**
1795  * Validate IPV4 item.
1796  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1797  * add specific validation of fragment_offset field,
1798  *
1799  * @param[in] item
1800  *   Item specification.
1801  * @param[in] item_flags
1802  *   Bit-fields that holds the items detected until now.
1803  * @param[out] error
1804  *   Pointer to error structure.
1805  *
1806  * @return
1807  *   0 on success, a negative errno value otherwise and rte_errno is set.
1808  */
1809 static int
1810 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1811                            uint64_t item_flags,
1812                            uint64_t last_item,
1813                            uint16_t ether_type,
1814                            struct rte_flow_error *error)
1815 {
1816         int ret;
1817         const struct rte_flow_item_ipv4 *spec = item->spec;
1818         const struct rte_flow_item_ipv4 *last = item->last;
1819         const struct rte_flow_item_ipv4 *mask = item->mask;
1820         rte_be16_t fragment_offset_spec = 0;
1821         rte_be16_t fragment_offset_last = 0;
1822         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1823                 .hdr = {
1824                         .src_addr = RTE_BE32(0xffffffff),
1825                         .dst_addr = RTE_BE32(0xffffffff),
1826                         .type_of_service = 0xff,
1827                         .fragment_offset = RTE_BE16(0xffff),
1828                         .next_proto_id = 0xff,
1829                         .time_to_live = 0xff,
1830                 },
1831         };
1832
1833         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1834                                            ether_type, &nic_ipv4_mask,
1835                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1836         if (ret < 0)
1837                 return ret;
1838         if (spec && mask)
1839                 fragment_offset_spec = spec->hdr.fragment_offset &
1840                                        mask->hdr.fragment_offset;
1841         if (!fragment_offset_spec)
1842                 return 0;
1843         /*
1844          * spec and mask are valid, enforce using full mask to make sure the
1845          * complete value is used correctly.
1846          */
1847         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1848                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1849                 return rte_flow_error_set(error, EINVAL,
1850                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1851                                           item, "must use full mask for"
1852                                           " fragment_offset");
1853         /*
1854          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1855          * indicating this is 1st fragment of fragmented packet.
1856          * This is not yet supported in MLX5, return appropriate error message.
1857          */
1858         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1859                 return rte_flow_error_set(error, ENOTSUP,
1860                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1861                                           "match on first fragment not "
1862                                           "supported");
1863         if (fragment_offset_spec && !last)
1864                 return rte_flow_error_set(error, ENOTSUP,
1865                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1866                                           "specified value not supported");
1867         /* spec and last are valid, validate the specified range. */
1868         fragment_offset_last = last->hdr.fragment_offset &
1869                                mask->hdr.fragment_offset;
1870         /*
1871          * Match on fragment_offset spec 0x2001 and last 0x3fff
1872          * means MF is 1 and frag-offset is > 0.
1873          * This packet is fragment 2nd and onward, excluding last.
1874          * This is not yet supported in MLX5, return appropriate
1875          * error message.
1876          */
1877         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1878             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1879                 return rte_flow_error_set(error, ENOTSUP,
1880                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1881                                           last, "match on following "
1882                                           "fragments not supported");
1883         /*
1884          * Match on fragment_offset spec 0x0001 and last 0x1fff
1885          * means MF is 0 and frag-offset is > 0.
1886          * This packet is last fragment of fragmented packet.
1887          * This is not yet supported in MLX5, return appropriate
1888          * error message.
1889          */
1890         if (fragment_offset_spec == RTE_BE16(1) &&
1891             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1892                 return rte_flow_error_set(error, ENOTSUP,
1893                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1894                                           last, "match on last "
1895                                           "fragment not supported");
1896         /*
1897          * Match on fragment_offset spec 0x0001 and last 0x3fff
1898          * means MF and/or frag-offset is not 0.
1899          * This is a fragmented packet.
1900          * Other range values are invalid and rejected.
1901          */
1902         if (!(fragment_offset_spec == RTE_BE16(1) &&
1903               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1904                 return rte_flow_error_set(error, ENOTSUP,
1905                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1906                                           "specified range not supported");
1907         return 0;
1908 }
1909
1910 /**
1911  * Validate IPV6 fragment extension item.
1912  *
1913  * @param[in] item
1914  *   Item specification.
1915  * @param[in] item_flags
1916  *   Bit-fields that holds the items detected until now.
1917  * @param[out] error
1918  *   Pointer to error structure.
1919  *
1920  * @return
1921  *   0 on success, a negative errno value otherwise and rte_errno is set.
1922  */
1923 static int
1924 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1925                                     uint64_t item_flags,
1926                                     struct rte_flow_error *error)
1927 {
1928         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1929         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1930         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1931         rte_be16_t frag_data_spec = 0;
1932         rte_be16_t frag_data_last = 0;
1933         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1934         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1935                                       MLX5_FLOW_LAYER_OUTER_L4;
1936         int ret = 0;
1937         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1938                 .hdr = {
1939                         .next_header = 0xff,
1940                         .frag_data = RTE_BE16(0xffff),
1941                 },
1942         };
1943
1944         if (item_flags & l4m)
1945                 return rte_flow_error_set(error, EINVAL,
1946                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1947                                           "ipv6 fragment extension item cannot "
1948                                           "follow L4 item.");
1949         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1950             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1951                 return rte_flow_error_set(error, EINVAL,
1952                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1953                                           "ipv6 fragment extension item must "
1954                                           "follow ipv6 item");
1955         if (spec && mask)
1956                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1957         if (!frag_data_spec)
1958                 return 0;
1959         /*
1960          * spec and mask are valid, enforce using full mask to make sure the
1961          * complete value is used correctly.
1962          */
1963         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1964                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1965                 return rte_flow_error_set(error, EINVAL,
1966                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1967                                           item, "must use full mask for"
1968                                           " frag_data");
1969         /*
1970          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1971          * This is 1st fragment of fragmented packet.
1972          */
1973         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1974                 return rte_flow_error_set(error, ENOTSUP,
1975                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1976                                           "match on first fragment not "
1977                                           "supported");
1978         if (frag_data_spec && !last)
1979                 return rte_flow_error_set(error, EINVAL,
1980                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1981                                           "specified value not supported");
1982         ret = mlx5_flow_item_acceptable
1983                                 (item, (const uint8_t *)mask,
1984                                  (const uint8_t *)&nic_mask,
1985                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
1986                                  MLX5_ITEM_RANGE_ACCEPTED, error);
1987         if (ret)
1988                 return ret;
1989         /* spec and last are valid, validate the specified range. */
1990         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1991         /*
1992          * Match on frag_data spec 0x0009 and last 0xfff9
1993          * means M is 1 and frag-offset is > 0.
1994          * This packet is fragment 2nd and onward, excluding last.
1995          * This is not yet supported in MLX5, return appropriate
1996          * error message.
1997          */
1998         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1999                                        RTE_IPV6_EHDR_MF_MASK) &&
2000             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2001                 return rte_flow_error_set(error, ENOTSUP,
2002                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2003                                           last, "match on following "
2004                                           "fragments not supported");
2005         /*
2006          * Match on frag_data spec 0x0008 and last 0xfff8
2007          * means M is 0 and frag-offset is > 0.
2008          * This packet is last fragment of fragmented packet.
2009          * This is not yet supported in MLX5, return appropriate
2010          * error message.
2011          */
2012         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2013             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2014                 return rte_flow_error_set(error, ENOTSUP,
2015                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2016                                           last, "match on last "
2017                                           "fragment not supported");
2018         /* Other range values are invalid and rejected. */
2019         return rte_flow_error_set(error, EINVAL,
2020                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2021                                   "specified range not supported");
2022 }
2023
2024 /**
2025  * Validate the pop VLAN action.
2026  *
2027  * @param[in] dev
2028  *   Pointer to the rte_eth_dev structure.
2029  * @param[in] action_flags
2030  *   Holds the actions detected until now.
2031  * @param[in] action
2032  *   Pointer to the pop vlan action.
2033  * @param[in] item_flags
2034  *   The items found in this flow rule.
2035  * @param[in] attr
2036  *   Pointer to flow attributes.
2037  * @param[out] error
2038  *   Pointer to error structure.
2039  *
2040  * @return
2041  *   0 on success, a negative errno value otherwise and rte_errno is set.
2042  */
2043 static int
2044 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2045                                  uint64_t action_flags,
2046                                  const struct rte_flow_action *action,
2047                                  uint64_t item_flags,
2048                                  const struct rte_flow_attr *attr,
2049                                  struct rte_flow_error *error)
2050 {
2051         const struct mlx5_priv *priv = dev->data->dev_private;
2052
2053         (void)action;
2054         (void)attr;
2055         if (!priv->sh->pop_vlan_action)
2056                 return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2058                                           NULL,
2059                                           "pop vlan action is not supported");
2060         if (attr->egress)
2061                 return rte_flow_error_set(error, ENOTSUP,
2062                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2063                                           NULL,
2064                                           "pop vlan action not supported for "
2065                                           "egress");
2066         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2067                 return rte_flow_error_set(error, ENOTSUP,
2068                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2069                                           "no support for multiple VLAN "
2070                                           "actions");
2071         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2072         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2073             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2074                 return rte_flow_error_set(error, ENOTSUP,
2075                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2076                                           NULL,
2077                                           "cannot pop vlan after decap without "
2078                                           "match on inner vlan in the flow");
2079         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2080         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2081             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2082                 return rte_flow_error_set(error, ENOTSUP,
2083                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2084                                           NULL,
2085                                           "cannot pop vlan without a "
2086                                           "match on (outer) vlan in the flow");
2087         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2088                 return rte_flow_error_set(error, EINVAL,
2089                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2090                                           "wrong action order, port_id should "
2091                                           "be after pop VLAN action");
2092         if (!attr->transfer && priv->representor)
2093                 return rte_flow_error_set(error, ENOTSUP,
2094                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2095                                           "pop vlan action for VF representor "
2096                                           "not supported on NIC table");
2097         return 0;
2098 }
2099
2100 /**
2101  * Get VLAN default info from vlan match info.
2102  *
2103  * @param[in] items
2104  *   the list of item specifications.
2105  * @param[out] vlan
2106  *   pointer VLAN info to fill to.
2107  *
2108  * @return
2109  *   0 on success, a negative errno value otherwise and rte_errno is set.
2110  */
2111 static void
2112 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2113                                   struct rte_vlan_hdr *vlan)
2114 {
2115         const struct rte_flow_item_vlan nic_mask = {
2116                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2117                                 MLX5DV_FLOW_VLAN_VID_MASK),
2118                 .inner_type = RTE_BE16(0xffff),
2119         };
2120
2121         if (items == NULL)
2122                 return;
2123         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2124                 int type = items->type;
2125
2126                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2127                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2128                         break;
2129         }
2130         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2131                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2132                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2133
2134                 /* If VLAN item in pattern doesn't contain data, return here. */
2135                 if (!vlan_v)
2136                         return;
2137                 if (!vlan_m)
2138                         vlan_m = &nic_mask;
2139                 /* Only full match values are accepted */
2140                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2141                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2142                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2143                         vlan->vlan_tci |=
2144                                 rte_be_to_cpu_16(vlan_v->tci &
2145                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2146                 }
2147                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2148                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2149                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2150                         vlan->vlan_tci |=
2151                                 rte_be_to_cpu_16(vlan_v->tci &
2152                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2153                 }
2154                 if (vlan_m->inner_type == nic_mask.inner_type)
2155                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2156                                                            vlan_m->inner_type);
2157         }
2158 }
2159
2160 /**
2161  * Validate the push VLAN action.
2162  *
2163  * @param[in] dev
2164  *   Pointer to the rte_eth_dev structure.
2165  * @param[in] action_flags
2166  *   Holds the actions detected until now.
2167  * @param[in] item_flags
2168  *   The items found in this flow rule.
2169  * @param[in] action
2170  *   Pointer to the action structure.
2171  * @param[in] attr
2172  *   Pointer to flow attributes
2173  * @param[out] error
2174  *   Pointer to error structure.
2175  *
2176  * @return
2177  *   0 on success, a negative errno value otherwise and rte_errno is set.
2178  */
2179 static int
2180 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2181                                   uint64_t action_flags,
2182                                   const struct rte_flow_item_vlan *vlan_m,
2183                                   const struct rte_flow_action *action,
2184                                   const struct rte_flow_attr *attr,
2185                                   struct rte_flow_error *error)
2186 {
2187         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2188         const struct mlx5_priv *priv = dev->data->dev_private;
2189
2190         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2191             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2192                 return rte_flow_error_set(error, EINVAL,
2193                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2194                                           "invalid vlan ethertype");
2195         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2196                 return rte_flow_error_set(error, EINVAL,
2197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2198                                           "wrong action order, port_id should "
2199                                           "be after push VLAN");
2200         if (!attr->transfer && priv->representor)
2201                 return rte_flow_error_set(error, ENOTSUP,
2202                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2203                                           "push vlan action for VF representor "
2204                                           "not supported on NIC table");
2205         if (vlan_m &&
2206             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2207             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2208                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2209             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2210             !(mlx5_flow_find_action
2211                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2212                 return rte_flow_error_set(error, EINVAL,
2213                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2214                                           "not full match mask on VLAN PCP and "
2215                                           "there is no of_set_vlan_pcp action, "
2216                                           "push VLAN action cannot figure out "
2217                                           "PCP value");
2218         if (vlan_m &&
2219             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2220             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2221                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2222             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2223             !(mlx5_flow_find_action
2224                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2225                 return rte_flow_error_set(error, EINVAL,
2226                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2227                                           "not full match mask on VLAN VID and "
2228                                           "there is no of_set_vlan_vid action, "
2229                                           "push VLAN action cannot figure out "
2230                                           "VID value");
2231         (void)attr;
2232         return 0;
2233 }
2234
2235 /**
2236  * Validate the set VLAN PCP.
2237  *
2238  * @param[in] action_flags
2239  *   Holds the actions detected until now.
2240  * @param[in] actions
2241  *   Pointer to the list of actions remaining in the flow rule.
2242  * @param[out] error
2243  *   Pointer to error structure.
2244  *
2245  * @return
2246  *   0 on success, a negative errno value otherwise and rte_errno is set.
2247  */
2248 static int
2249 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2250                                      const struct rte_flow_action actions[],
2251                                      struct rte_flow_error *error)
2252 {
2253         const struct rte_flow_action *action = actions;
2254         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2255
2256         if (conf->vlan_pcp > 7)
2257                 return rte_flow_error_set(error, EINVAL,
2258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2259                                           "VLAN PCP value is too big");
2260         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2261                 return rte_flow_error_set(error, ENOTSUP,
2262                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2263                                           "set VLAN PCP action must follow "
2264                                           "the push VLAN action");
2265         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2266                 return rte_flow_error_set(error, ENOTSUP,
2267                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2268                                           "Multiple VLAN PCP modification are "
2269                                           "not supported");
2270         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2271                 return rte_flow_error_set(error, EINVAL,
2272                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2273                                           "wrong action order, port_id should "
2274                                           "be after set VLAN PCP");
2275         return 0;
2276 }
2277
2278 /**
2279  * Validate the set VLAN VID.
2280  *
2281  * @param[in] item_flags
2282  *   Holds the items detected in this rule.
2283  * @param[in] action_flags
2284  *   Holds the actions detected until now.
2285  * @param[in] actions
2286  *   Pointer to the list of actions remaining in the flow rule.
2287  * @param[out] error
2288  *   Pointer to error structure.
2289  *
2290  * @return
2291  *   0 on success, a negative errno value otherwise and rte_errno is set.
2292  */
2293 static int
2294 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2295                                      uint64_t action_flags,
2296                                      const struct rte_flow_action actions[],
2297                                      struct rte_flow_error *error)
2298 {
2299         const struct rte_flow_action *action = actions;
2300         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2301
2302         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2303                 return rte_flow_error_set(error, EINVAL,
2304                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2305                                           "VLAN VID value is too big");
2306         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2307             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2308                 return rte_flow_error_set(error, ENOTSUP,
2309                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2310                                           "set VLAN VID action must follow push"
2311                                           " VLAN action or match on VLAN item");
2312         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2313                 return rte_flow_error_set(error, ENOTSUP,
2314                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2315                                           "Multiple VLAN VID modifications are "
2316                                           "not supported");
2317         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2318                 return rte_flow_error_set(error, EINVAL,
2319                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2320                                           "wrong action order, port_id should "
2321                                           "be after set VLAN VID");
2322         return 0;
2323 }
2324
2325 /*
2326  * Validate the FLAG action.
2327  *
2328  * @param[in] dev
2329  *   Pointer to the rte_eth_dev structure.
2330  * @param[in] action_flags
2331  *   Holds the actions detected until now.
2332  * @param[in] attr
2333  *   Pointer to flow attributes
2334  * @param[out] error
2335  *   Pointer to error structure.
2336  *
2337  * @return
2338  *   0 on success, a negative errno value otherwise and rte_errno is set.
2339  */
2340 static int
2341 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2342                              uint64_t action_flags,
2343                              const struct rte_flow_attr *attr,
2344                              struct rte_flow_error *error)
2345 {
2346         struct mlx5_priv *priv = dev->data->dev_private;
2347         struct mlx5_dev_config *config = &priv->config;
2348         int ret;
2349
2350         /* Fall back if no extended metadata register support. */
2351         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2352                 return mlx5_flow_validate_action_flag(action_flags, attr,
2353                                                       error);
2354         /* Extensive metadata mode requires registers. */
2355         if (!mlx5_flow_ext_mreg_supported(dev))
2356                 return rte_flow_error_set(error, ENOTSUP,
2357                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2358                                           "no metadata registers "
2359                                           "to support flag action");
2360         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2361                 return rte_flow_error_set(error, ENOTSUP,
2362                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2363                                           "extended metadata register"
2364                                           " isn't available");
2365         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2366         if (ret < 0)
2367                 return ret;
2368         MLX5_ASSERT(ret > 0);
2369         if (action_flags & MLX5_FLOW_ACTION_MARK)
2370                 return rte_flow_error_set(error, EINVAL,
2371                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2372                                           "can't mark and flag in same flow");
2373         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2374                 return rte_flow_error_set(error, EINVAL,
2375                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2376                                           "can't have 2 flag"
2377                                           " actions in same flow");
2378         return 0;
2379 }
2380
2381 /**
2382  * Validate MARK action.
2383  *
2384  * @param[in] dev
2385  *   Pointer to the rte_eth_dev structure.
2386  * @param[in] action
2387  *   Pointer to action.
2388  * @param[in] action_flags
2389  *   Holds the actions detected until now.
2390  * @param[in] attr
2391  *   Pointer to flow attributes
2392  * @param[out] error
2393  *   Pointer to error structure.
2394  *
2395  * @return
2396  *   0 on success, a negative errno value otherwise and rte_errno is set.
2397  */
2398 static int
2399 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2400                              const struct rte_flow_action *action,
2401                              uint64_t action_flags,
2402                              const struct rte_flow_attr *attr,
2403                              struct rte_flow_error *error)
2404 {
2405         struct mlx5_priv *priv = dev->data->dev_private;
2406         struct mlx5_dev_config *config = &priv->config;
2407         const struct rte_flow_action_mark *mark = action->conf;
2408         int ret;
2409
2410         /* Fall back if no extended metadata register support. */
2411         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2412                 return mlx5_flow_validate_action_mark(action, action_flags,
2413                                                       attr, error);
2414         /* Extensive metadata mode requires registers. */
2415         if (!mlx5_flow_ext_mreg_supported(dev))
2416                 return rte_flow_error_set(error, ENOTSUP,
2417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2418                                           "no metadata registers "
2419                                           "to support mark action");
2420         if (!priv->sh->dv_mark_mask)
2421                 return rte_flow_error_set(error, ENOTSUP,
2422                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2423                                           "extended metadata register"
2424                                           " isn't available");
2425         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2426         if (ret < 0)
2427                 return ret;
2428         MLX5_ASSERT(ret > 0);
2429         if (!mark)
2430                 return rte_flow_error_set(error, EINVAL,
2431                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2432                                           "configuration cannot be null");
2433         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2434                 return rte_flow_error_set(error, EINVAL,
2435                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2436                                           &mark->id,
2437                                           "mark id exceeds the limit");
2438         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2439                 return rte_flow_error_set(error, EINVAL,
2440                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2441                                           "can't flag and mark in same flow");
2442         if (action_flags & MLX5_FLOW_ACTION_MARK)
2443                 return rte_flow_error_set(error, EINVAL,
2444                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2445                                           "can't have 2 mark actions in same"
2446                                           " flow");
2447         return 0;
2448 }
2449
2450 /**
2451  * Validate SET_META action.
2452  *
2453  * @param[in] dev
2454  *   Pointer to the rte_eth_dev structure.
2455  * @param[in] action
2456  *   Pointer to the action structure.
2457  * @param[in] action_flags
2458  *   Holds the actions detected until now.
2459  * @param[in] attr
2460  *   Pointer to flow attributes
2461  * @param[out] error
2462  *   Pointer to error structure.
2463  *
2464  * @return
2465  *   0 on success, a negative errno value otherwise and rte_errno is set.
2466  */
2467 static int
2468 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2469                                  const struct rte_flow_action *action,
2470                                  uint64_t action_flags __rte_unused,
2471                                  const struct rte_flow_attr *attr,
2472                                  struct rte_flow_error *error)
2473 {
2474         const struct rte_flow_action_set_meta *conf;
2475         uint32_t nic_mask = UINT32_MAX;
2476         int reg;
2477
2478         if (!mlx5_flow_ext_mreg_supported(dev))
2479                 return rte_flow_error_set(error, ENOTSUP,
2480                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2481                                           "extended metadata register"
2482                                           " isn't supported");
2483         reg = flow_dv_get_metadata_reg(dev, attr, error);
2484         if (reg < 0)
2485                 return reg;
2486         if (reg != REG_A && reg != REG_B) {
2487                 struct mlx5_priv *priv = dev->data->dev_private;
2488
2489                 nic_mask = priv->sh->dv_meta_mask;
2490         }
2491         if (!(action->conf))
2492                 return rte_flow_error_set(error, EINVAL,
2493                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2494                                           "configuration cannot be null");
2495         conf = (const struct rte_flow_action_set_meta *)action->conf;
2496         if (!conf->mask)
2497                 return rte_flow_error_set(error, EINVAL,
2498                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2499                                           "zero mask doesn't have any effect");
2500         if (conf->mask & ~nic_mask)
2501                 return rte_flow_error_set(error, EINVAL,
2502                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2503                                           "meta data must be within reg C0");
2504         return 0;
2505 }
2506
2507 /**
2508  * Validate SET_TAG action.
2509  *
2510  * @param[in] dev
2511  *   Pointer to the rte_eth_dev structure.
2512  * @param[in] action
2513  *   Pointer to the action structure.
2514  * @param[in] action_flags
2515  *   Holds the actions detected until now.
2516  * @param[in] attr
2517  *   Pointer to flow attributes
2518  * @param[out] error
2519  *   Pointer to error structure.
2520  *
2521  * @return
2522  *   0 on success, a negative errno value otherwise and rte_errno is set.
2523  */
2524 static int
2525 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2526                                 const struct rte_flow_action *action,
2527                                 uint64_t action_flags,
2528                                 const struct rte_flow_attr *attr,
2529                                 struct rte_flow_error *error)
2530 {
2531         const struct rte_flow_action_set_tag *conf;
2532         const uint64_t terminal_action_flags =
2533                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2534                 MLX5_FLOW_ACTION_RSS;
2535         int ret;
2536
2537         if (!mlx5_flow_ext_mreg_supported(dev))
2538                 return rte_flow_error_set(error, ENOTSUP,
2539                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2540                                           "extensive metadata register"
2541                                           " isn't supported");
2542         if (!(action->conf))
2543                 return rte_flow_error_set(error, EINVAL,
2544                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2545                                           "configuration cannot be null");
2546         conf = (const struct rte_flow_action_set_tag *)action->conf;
2547         if (!conf->mask)
2548                 return rte_flow_error_set(error, EINVAL,
2549                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2550                                           "zero mask doesn't have any effect");
2551         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2552         if (ret < 0)
2553                 return ret;
2554         if (!attr->transfer && attr->ingress &&
2555             (action_flags & terminal_action_flags))
2556                 return rte_flow_error_set(error, EINVAL,
2557                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2558                                           "set_tag has no effect"
2559                                           " with terminal actions");
2560         return 0;
2561 }
2562
2563 /**
2564  * Validate count action.
2565  *
2566  * @param[in] dev
2567  *   Pointer to rte_eth_dev structure.
2568  * @param[out] error
2569  *   Pointer to error structure.
2570  *
2571  * @return
2572  *   0 on success, a negative errno value otherwise and rte_errno is set.
2573  */
2574 static int
2575 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2576                               struct rte_flow_error *error)
2577 {
2578         struct mlx5_priv *priv = dev->data->dev_private;
2579
2580         if (!priv->config.devx)
2581                 goto notsup_err;
2582 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2583         return 0;
2584 #endif
2585 notsup_err:
2586         return rte_flow_error_set
2587                       (error, ENOTSUP,
2588                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2589                        NULL,
2590                        "count action not supported");
2591 }
2592
2593 /**
2594  * Validate the L2 encap action.
2595  *
2596  * @param[in] dev
2597  *   Pointer to the rte_eth_dev structure.
2598  * @param[in] action_flags
2599  *   Holds the actions detected until now.
2600  * @param[in] action
2601  *   Pointer to the action structure.
2602  * @param[in] attr
2603  *   Pointer to flow attributes.
2604  * @param[out] error
2605  *   Pointer to error structure.
2606  *
2607  * @return
2608  *   0 on success, a negative errno value otherwise and rte_errno is set.
2609  */
2610 static int
2611 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2612                                  uint64_t action_flags,
2613                                  const struct rte_flow_action *action,
2614                                  const struct rte_flow_attr *attr,
2615                                  struct rte_flow_error *error)
2616 {
2617         const struct mlx5_priv *priv = dev->data->dev_private;
2618
2619         if (!(action->conf))
2620                 return rte_flow_error_set(error, EINVAL,
2621                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2622                                           "configuration cannot be null");
2623         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2624                 return rte_flow_error_set(error, EINVAL,
2625                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2626                                           "can only have a single encap action "
2627                                           "in a flow");
2628         if (!attr->transfer && priv->representor)
2629                 return rte_flow_error_set(error, ENOTSUP,
2630                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2631                                           "encap action for VF representor "
2632                                           "not supported on NIC table");
2633         return 0;
2634 }
2635
2636 /**
2637  * Validate a decap action.
2638  *
2639  * @param[in] dev
2640  *   Pointer to the rte_eth_dev structure.
2641  * @param[in] action_flags
2642  *   Holds the actions detected until now.
2643  * @param[in] attr
2644  *   Pointer to flow attributes
2645  * @param[out] error
2646  *   Pointer to error structure.
2647  *
2648  * @return
2649  *   0 on success, a negative errno value otherwise and rte_errno is set.
2650  */
2651 static int
2652 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2653                               uint64_t action_flags,
2654                               const struct rte_flow_attr *attr,
2655                               struct rte_flow_error *error)
2656 {
2657         const struct mlx5_priv *priv = dev->data->dev_private;
2658
2659         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2660             !priv->config.decap_en)
2661                 return rte_flow_error_set(error, ENOTSUP,
2662                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2663                                           "decap is not enabled");
2664         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2665                 return rte_flow_error_set(error, ENOTSUP,
2666                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2667                                           action_flags &
2668                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2669                                           "have a single decap action" : "decap "
2670                                           "after encap is not supported");
2671         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2672                 return rte_flow_error_set(error, EINVAL,
2673                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2674                                           "can't have decap action after"
2675                                           " modify action");
2676         if (attr->egress)
2677                 return rte_flow_error_set(error, ENOTSUP,
2678                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2679                                           NULL,
2680                                           "decap action not supported for "
2681                                           "egress");
2682         if (!attr->transfer && priv->representor)
2683                 return rte_flow_error_set(error, ENOTSUP,
2684                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2685                                           "decap action for VF representor "
2686                                           "not supported on NIC table");
2687         return 0;
2688 }
2689
2690 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2691
2692 /**
2693  * Validate the raw encap and decap actions.
2694  *
2695  * @param[in] dev
2696  *   Pointer to the rte_eth_dev structure.
2697  * @param[in] decap
2698  *   Pointer to the decap action.
2699  * @param[in] encap
2700  *   Pointer to the encap action.
2701  * @param[in] attr
2702  *   Pointer to flow attributes
2703  * @param[in/out] action_flags
2704  *   Holds the actions detected until now.
2705  * @param[out] actions_n
2706  *   pointer to the number of actions counter.
2707  * @param[out] error
2708  *   Pointer to error structure.
2709  *
2710  * @return
2711  *   0 on success, a negative errno value otherwise and rte_errno is set.
2712  */
2713 static int
2714 flow_dv_validate_action_raw_encap_decap
2715         (struct rte_eth_dev *dev,
2716          const struct rte_flow_action_raw_decap *decap,
2717          const struct rte_flow_action_raw_encap *encap,
2718          const struct rte_flow_attr *attr, uint64_t *action_flags,
2719          int *actions_n, struct rte_flow_error *error)
2720 {
2721         const struct mlx5_priv *priv = dev->data->dev_private;
2722         int ret;
2723
2724         if (encap && (!encap->size || !encap->data))
2725                 return rte_flow_error_set(error, EINVAL,
2726                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2727                                           "raw encap data cannot be empty");
2728         if (decap && encap) {
2729                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2730                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2731                         /* L3 encap. */
2732                         decap = NULL;
2733                 else if (encap->size <=
2734                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2735                            decap->size >
2736                            MLX5_ENCAPSULATION_DECISION_SIZE)
2737                         /* L3 decap. */
2738                         encap = NULL;
2739                 else if (encap->size >
2740                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2741                            decap->size >
2742                            MLX5_ENCAPSULATION_DECISION_SIZE)
2743                         /* 2 L2 actions: encap and decap. */
2744                         ;
2745                 else
2746                         return rte_flow_error_set(error,
2747                                 ENOTSUP,
2748                                 RTE_FLOW_ERROR_TYPE_ACTION,
2749                                 NULL, "unsupported too small "
2750                                 "raw decap and too small raw "
2751                                 "encap combination");
2752         }
2753         if (decap) {
2754                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2755                                                     error);
2756                 if (ret < 0)
2757                         return ret;
2758                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2759                 ++(*actions_n);
2760         }
2761         if (encap) {
2762                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2763                         return rte_flow_error_set(error, ENOTSUP,
2764                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2765                                                   NULL,
2766                                                   "small raw encap size");
2767                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2768                         return rte_flow_error_set(error, EINVAL,
2769                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2770                                                   NULL,
2771                                                   "more than one encap action");
2772                 if (!attr->transfer && priv->representor)
2773                         return rte_flow_error_set
2774                                         (error, ENOTSUP,
2775                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2776                                          "encap action for VF representor "
2777                                          "not supported on NIC table");
2778                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2779                 ++(*actions_n);
2780         }
2781         return 0;
2782 }
2783
2784 /**
2785  * Match encap_decap resource.
2786  *
2787  * @param list
2788  *   Pointer to the hash list.
2789  * @param entry
2790  *   Pointer to exist resource entry object.
2791  * @param key
2792  *   Key of the new entry.
2793  * @param ctx_cb
2794  *   Pointer to new encap_decap resource.
2795  *
2796  * @return
2797  *   0 on matching, none-zero otherwise.
2798  */
2799 int
2800 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2801                              struct mlx5_hlist_entry *entry,
2802                              uint64_t key __rte_unused, void *cb_ctx)
2803 {
2804         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2805         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2806         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2807
2808         cache_resource = container_of(entry,
2809                                       struct mlx5_flow_dv_encap_decap_resource,
2810                                       entry);
2811         if (resource->entry.key == cache_resource->entry.key &&
2812             resource->reformat_type == cache_resource->reformat_type &&
2813             resource->ft_type == cache_resource->ft_type &&
2814             resource->flags == cache_resource->flags &&
2815             resource->size == cache_resource->size &&
2816             !memcmp((const void *)resource->buf,
2817                     (const void *)cache_resource->buf,
2818                     resource->size))
2819                 return 0;
2820         return -1;
2821 }
2822
2823 /**
2824  * Allocate encap_decap resource.
2825  *
2826  * @param list
2827  *   Pointer to the hash list.
2828  * @param entry
2829  *   Pointer to exist resource entry object.
2830  * @param ctx_cb
2831  *   Pointer to new encap_decap resource.
2832  *
2833  * @return
2834  *   0 on matching, none-zero otherwise.
2835  */
2836 struct mlx5_hlist_entry *
2837 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2838                               uint64_t key __rte_unused,
2839                               void *cb_ctx)
2840 {
2841         struct mlx5_dev_ctx_shared *sh = list->ctx;
2842         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2843         struct mlx5dv_dr_domain *domain;
2844         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2845         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2846         uint32_t idx;
2847         int ret;
2848
2849         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2850                 domain = sh->fdb_domain;
2851         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2852                 domain = sh->rx_domain;
2853         else
2854                 domain = sh->tx_domain;
2855         /* Register new encap/decap resource. */
2856         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2857                                        &idx);
2858         if (!cache_resource) {
2859                 rte_flow_error_set(ctx->error, ENOMEM,
2860                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2861                                    "cannot allocate resource memory");
2862                 return NULL;
2863         }
2864         *cache_resource = *resource;
2865         cache_resource->idx = idx;
2866         ret = mlx5_flow_os_create_flow_action_packet_reformat
2867                                         (sh->ctx, domain, cache_resource,
2868                                          &cache_resource->action);
2869         if (ret) {
2870                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2871                 rte_flow_error_set(ctx->error, ENOMEM,
2872                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2873                                    NULL, "cannot create action");
2874                 return NULL;
2875         }
2876
2877         return &cache_resource->entry;
2878 }
2879
2880 /**
2881  * Find existing encap/decap resource or create and register a new one.
2882  *
2883  * @param[in, out] dev
2884  *   Pointer to rte_eth_dev structure.
2885  * @param[in, out] resource
2886  *   Pointer to encap/decap resource.
2887  * @parm[in, out] dev_flow
2888  *   Pointer to the dev_flow.
2889  * @param[out] error
2890  *   pointer to error structure.
2891  *
2892  * @return
2893  *   0 on success otherwise -errno and errno is set.
2894  */
2895 static int
2896 flow_dv_encap_decap_resource_register
2897                         (struct rte_eth_dev *dev,
2898                          struct mlx5_flow_dv_encap_decap_resource *resource,
2899                          struct mlx5_flow *dev_flow,
2900                          struct rte_flow_error *error)
2901 {
2902         struct mlx5_priv *priv = dev->data->dev_private;
2903         struct mlx5_dev_ctx_shared *sh = priv->sh;
2904         struct mlx5_hlist_entry *entry;
2905         union mlx5_flow_encap_decap_key encap_decap_key = {
2906                 {
2907                         .ft_type = resource->ft_type,
2908                         .refmt_type = resource->reformat_type,
2909                         .buf_size = resource->size,
2910                         .table_level = !!dev_flow->dv.group,
2911                         .cksum = 0,
2912                 }
2913         };
2914         struct mlx5_flow_cb_ctx ctx = {
2915                 .error = error,
2916                 .data = resource,
2917         };
2918
2919         resource->flags = dev_flow->dv.group ? 0 : 1;
2920         encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2921                                                 resource->size, 0);
2922         resource->entry.key = encap_decap_key.v64;
2923         entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
2924                                     &ctx);
2925         if (!entry)
2926                 return -rte_errno;
2927         resource = container_of(entry, typeof(*resource), entry);
2928         dev_flow->dv.encap_decap = resource;
2929         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2930         return 0;
2931 }
2932
2933 /**
2934  * Find existing table jump resource or create and register a new one.
2935  *
2936  * @param[in, out] dev
2937  *   Pointer to rte_eth_dev structure.
2938  * @param[in, out] tbl
2939  *   Pointer to flow table resource.
2940  * @parm[in, out] dev_flow
2941  *   Pointer to the dev_flow.
2942  * @param[out] error
2943  *   pointer to error structure.
2944  *
2945  * @return
2946  *   0 on success otherwise -errno and errno is set.
2947  */
2948 static int
2949 flow_dv_jump_tbl_resource_register
2950                         (struct rte_eth_dev *dev __rte_unused,
2951                          struct mlx5_flow_tbl_resource *tbl,
2952                          struct mlx5_flow *dev_flow,
2953                          struct rte_flow_error *error __rte_unused)
2954 {
2955         struct mlx5_flow_tbl_data_entry *tbl_data =
2956                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2957
2958         MLX5_ASSERT(tbl);
2959         MLX5_ASSERT(tbl_data->jump.action);
2960         dev_flow->handle->rix_jump = tbl_data->idx;
2961         dev_flow->dv.jump = &tbl_data->jump;
2962         return 0;
2963 }
2964
2965 int
2966 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
2967                          struct mlx5_cache_entry *entry, void *cb_ctx)
2968 {
2969         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2970         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2971         struct mlx5_flow_dv_port_id_action_resource *res =
2972                         container_of(entry, typeof(*res), entry);
2973
2974         return ref->port_id != res->port_id;
2975 }
2976
2977 struct mlx5_cache_entry *
2978 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
2979                           struct mlx5_cache_entry *entry __rte_unused,
2980                           void *cb_ctx)
2981 {
2982         struct mlx5_dev_ctx_shared *sh = list->ctx;
2983         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2984         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2985         struct mlx5_flow_dv_port_id_action_resource *cache;
2986         uint32_t idx;
2987         int ret;
2988
2989         /* Register new port id action resource. */
2990         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
2991         if (!cache) {
2992                 rte_flow_error_set(ctx->error, ENOMEM,
2993                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2994                                    "cannot allocate port_id action cache memory");
2995                 return NULL;
2996         }
2997         *cache = *ref;
2998         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
2999                                                         ref->port_id,
3000                                                         &cache->action);
3001         if (ret) {
3002                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3003                 rte_flow_error_set(ctx->error, ENOMEM,
3004                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3005                                    "cannot create action");
3006                 return NULL;
3007         }
3008         return &cache->entry;
3009 }
3010
3011 /**
3012  * Find existing table port ID resource or create and register a new one.
3013  *
3014  * @param[in, out] dev
3015  *   Pointer to rte_eth_dev structure.
3016  * @param[in, out] resource
3017  *   Pointer to port ID action resource.
3018  * @parm[in, out] dev_flow
3019  *   Pointer to the dev_flow.
3020  * @param[out] error
3021  *   pointer to error structure.
3022  *
3023  * @return
3024  *   0 on success otherwise -errno and errno is set.
3025  */
3026 static int
3027 flow_dv_port_id_action_resource_register
3028                         (struct rte_eth_dev *dev,
3029                          struct mlx5_flow_dv_port_id_action_resource *resource,
3030                          struct mlx5_flow *dev_flow,
3031                          struct rte_flow_error *error)
3032 {
3033         struct mlx5_priv *priv = dev->data->dev_private;
3034         struct mlx5_cache_entry *entry;
3035         struct mlx5_flow_dv_port_id_action_resource *cache;
3036         struct mlx5_flow_cb_ctx ctx = {
3037                 .error = error,
3038                 .data = resource,
3039         };
3040
3041         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3042         if (!entry)
3043                 return -rte_errno;
3044         cache = container_of(entry, typeof(*cache), entry);
3045         dev_flow->dv.port_id_action = cache;
3046         dev_flow->handle->rix_port_id_action = cache->idx;
3047         return 0;
3048 }
3049
3050 int
3051 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3052                          struct mlx5_cache_entry *entry, void *cb_ctx)
3053 {
3054         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3055         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3056         struct mlx5_flow_dv_push_vlan_action_resource *res =
3057                         container_of(entry, typeof(*res), entry);
3058
3059         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3060 }
3061
3062 struct mlx5_cache_entry *
3063 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3064                           struct mlx5_cache_entry *entry __rte_unused,
3065                           void *cb_ctx)
3066 {
3067         struct mlx5_dev_ctx_shared *sh = list->ctx;
3068         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3069         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3070         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3071         struct mlx5dv_dr_domain *domain;
3072         uint32_t idx;
3073         int ret;
3074
3075         /* Register new port id action resource. */
3076         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3077         if (!cache) {
3078                 rte_flow_error_set(ctx->error, ENOMEM,
3079                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3080                                    "cannot allocate push_vlan action cache memory");
3081                 return NULL;
3082         }
3083         *cache = *ref;
3084         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3085                 domain = sh->fdb_domain;
3086         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3087                 domain = sh->rx_domain;
3088         else
3089                 domain = sh->tx_domain;
3090         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3091                                                         &cache->action);
3092         if (ret) {
3093                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3094                 rte_flow_error_set(ctx->error, ENOMEM,
3095                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3096                                    "cannot create push vlan action");
3097                 return NULL;
3098         }
3099         return &cache->entry;
3100 }
3101
3102 /**
3103  * Find existing push vlan resource or create and register a new one.
3104  *
3105  * @param [in, out] dev
3106  *   Pointer to rte_eth_dev structure.
3107  * @param[in, out] resource
3108  *   Pointer to port ID action resource.
3109  * @parm[in, out] dev_flow
3110  *   Pointer to the dev_flow.
3111  * @param[out] error
3112  *   pointer to error structure.
3113  *
3114  * @return
3115  *   0 on success otherwise -errno and errno is set.
3116  */
3117 static int
3118 flow_dv_push_vlan_action_resource_register
3119                        (struct rte_eth_dev *dev,
3120                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3121                         struct mlx5_flow *dev_flow,
3122                         struct rte_flow_error *error)
3123 {
3124         struct mlx5_priv *priv = dev->data->dev_private;
3125         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3126         struct mlx5_cache_entry *entry;
3127         struct mlx5_flow_cb_ctx ctx = {
3128                 .error = error,
3129                 .data = resource,
3130         };
3131
3132         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3133         if (!entry)
3134                 return -rte_errno;
3135         cache = container_of(entry, typeof(*cache), entry);
3136
3137         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3138         dev_flow->dv.push_vlan_res = cache;
3139         return 0;
3140 }
3141
3142 /**
3143  * Get the size of specific rte_flow_item_type hdr size
3144  *
3145  * @param[in] item_type
3146  *   Tested rte_flow_item_type.
3147  *
3148  * @return
3149  *   sizeof struct item_type, 0 if void or irrelevant.
3150  */
3151 static size_t
3152 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3153 {
3154         size_t retval;
3155
3156         switch (item_type) {
3157         case RTE_FLOW_ITEM_TYPE_ETH:
3158                 retval = sizeof(struct rte_ether_hdr);
3159                 break;
3160         case RTE_FLOW_ITEM_TYPE_VLAN:
3161                 retval = sizeof(struct rte_vlan_hdr);
3162                 break;
3163         case RTE_FLOW_ITEM_TYPE_IPV4:
3164                 retval = sizeof(struct rte_ipv4_hdr);
3165                 break;
3166         case RTE_FLOW_ITEM_TYPE_IPV6:
3167                 retval = sizeof(struct rte_ipv6_hdr);
3168                 break;
3169         case RTE_FLOW_ITEM_TYPE_UDP:
3170                 retval = sizeof(struct rte_udp_hdr);
3171                 break;
3172         case RTE_FLOW_ITEM_TYPE_TCP:
3173                 retval = sizeof(struct rte_tcp_hdr);
3174                 break;
3175         case RTE_FLOW_ITEM_TYPE_VXLAN:
3176         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3177                 retval = sizeof(struct rte_vxlan_hdr);
3178                 break;
3179         case RTE_FLOW_ITEM_TYPE_GRE:
3180         case RTE_FLOW_ITEM_TYPE_NVGRE:
3181                 retval = sizeof(struct rte_gre_hdr);
3182                 break;
3183         case RTE_FLOW_ITEM_TYPE_MPLS:
3184                 retval = sizeof(struct rte_mpls_hdr);
3185                 break;
3186         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3187         default:
3188                 retval = 0;
3189                 break;
3190         }
3191         return retval;
3192 }
3193
3194 #define MLX5_ENCAP_IPV4_VERSION         0x40
3195 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3196 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3197 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3198 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3199 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3200 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3201
3202 /**
3203  * Convert the encap action data from list of rte_flow_item to raw buffer
3204  *
3205  * @param[in] items
3206  *   Pointer to rte_flow_item objects list.
3207  * @param[out] buf
3208  *   Pointer to the output buffer.
3209  * @param[out] size
3210  *   Pointer to the output buffer size.
3211  * @param[out] error
3212  *   Pointer to the error structure.
3213  *
3214  * @return
3215  *   0 on success, a negative errno value otherwise and rte_errno is set.
3216  */
3217 static int
3218 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3219                            size_t *size, struct rte_flow_error *error)
3220 {
3221         struct rte_ether_hdr *eth = NULL;
3222         struct rte_vlan_hdr *vlan = NULL;
3223         struct rte_ipv4_hdr *ipv4 = NULL;
3224         struct rte_ipv6_hdr *ipv6 = NULL;
3225         struct rte_udp_hdr *udp = NULL;
3226         struct rte_vxlan_hdr *vxlan = NULL;
3227         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3228         struct rte_gre_hdr *gre = NULL;
3229         size_t len;
3230         size_t temp_size = 0;
3231
3232         if (!items)
3233                 return rte_flow_error_set(error, EINVAL,
3234                                           RTE_FLOW_ERROR_TYPE_ACTION,
3235                                           NULL, "invalid empty data");
3236         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3237                 len = flow_dv_get_item_hdr_len(items->type);
3238                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3239                         return rte_flow_error_set(error, EINVAL,
3240                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3241                                                   (void *)items->type,
3242                                                   "items total size is too big"
3243                                                   " for encap action");
3244                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3245                 switch (items->type) {
3246                 case RTE_FLOW_ITEM_TYPE_ETH:
3247                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3248                         break;
3249                 case RTE_FLOW_ITEM_TYPE_VLAN:
3250                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3251                         if (!eth)
3252                                 return rte_flow_error_set(error, EINVAL,
3253                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3254                                                 (void *)items->type,
3255                                                 "eth header not found");
3256                         if (!eth->ether_type)
3257                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3258                         break;
3259                 case RTE_FLOW_ITEM_TYPE_IPV4:
3260                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3261                         if (!vlan && !eth)
3262                                 return rte_flow_error_set(error, EINVAL,
3263                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3264                                                 (void *)items->type,
3265                                                 "neither eth nor vlan"
3266                                                 " header found");
3267                         if (vlan && !vlan->eth_proto)
3268                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3269                         else if (eth && !eth->ether_type)
3270                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3271                         if (!ipv4->version_ihl)
3272                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3273                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3274                         if (!ipv4->time_to_live)
3275                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3276                         break;
3277                 case RTE_FLOW_ITEM_TYPE_IPV6:
3278                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3279                         if (!vlan && !eth)
3280                                 return rte_flow_error_set(error, EINVAL,
3281                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3282                                                 (void *)items->type,
3283                                                 "neither eth nor vlan"
3284                                                 " header found");
3285                         if (vlan && !vlan->eth_proto)
3286                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3287                         else if (eth && !eth->ether_type)
3288                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3289                         if (!ipv6->vtc_flow)
3290                                 ipv6->vtc_flow =
3291                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3292                         if (!ipv6->hop_limits)
3293                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3294                         break;
3295                 case RTE_FLOW_ITEM_TYPE_UDP:
3296                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3297                         if (!ipv4 && !ipv6)
3298                                 return rte_flow_error_set(error, EINVAL,
3299                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3300                                                 (void *)items->type,
3301                                                 "ip header not found");
3302                         if (ipv4 && !ipv4->next_proto_id)
3303                                 ipv4->next_proto_id = IPPROTO_UDP;
3304                         else if (ipv6 && !ipv6->proto)
3305                                 ipv6->proto = IPPROTO_UDP;
3306                         break;
3307                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3308                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3309                         if (!udp)
3310                                 return rte_flow_error_set(error, EINVAL,
3311                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3312                                                 (void *)items->type,
3313                                                 "udp header not found");
3314                         if (!udp->dst_port)
3315                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3316                         if (!vxlan->vx_flags)
3317                                 vxlan->vx_flags =
3318                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3319                         break;
3320                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3321                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3322                         if (!udp)
3323                                 return rte_flow_error_set(error, EINVAL,
3324                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3325                                                 (void *)items->type,
3326                                                 "udp header not found");
3327                         if (!vxlan_gpe->proto)
3328                                 return rte_flow_error_set(error, EINVAL,
3329                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3330                                                 (void *)items->type,
3331                                                 "next protocol not found");
3332                         if (!udp->dst_port)
3333                                 udp->dst_port =
3334                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3335                         if (!vxlan_gpe->vx_flags)
3336                                 vxlan_gpe->vx_flags =
3337                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3338                         break;
3339                 case RTE_FLOW_ITEM_TYPE_GRE:
3340                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3341                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3342                         if (!gre->proto)
3343                                 return rte_flow_error_set(error, EINVAL,
3344                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3345                                                 (void *)items->type,
3346                                                 "next protocol not found");
3347                         if (!ipv4 && !ipv6)
3348                                 return rte_flow_error_set(error, EINVAL,
3349                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3350                                                 (void *)items->type,
3351                                                 "ip header not found");
3352                         if (ipv4 && !ipv4->next_proto_id)
3353                                 ipv4->next_proto_id = IPPROTO_GRE;
3354                         else if (ipv6 && !ipv6->proto)
3355                                 ipv6->proto = IPPROTO_GRE;
3356                         break;
3357                 case RTE_FLOW_ITEM_TYPE_VOID:
3358                         break;
3359                 default:
3360                         return rte_flow_error_set(error, EINVAL,
3361                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3362                                                   (void *)items->type,
3363                                                   "unsupported item type");
3364                         break;
3365                 }
3366                 temp_size += len;
3367         }
3368         *size = temp_size;
3369         return 0;
3370 }
3371
3372 static int
3373 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3374 {
3375         struct rte_ether_hdr *eth = NULL;
3376         struct rte_vlan_hdr *vlan = NULL;
3377         struct rte_ipv6_hdr *ipv6 = NULL;
3378         struct rte_udp_hdr *udp = NULL;
3379         char *next_hdr;
3380         uint16_t proto;
3381
3382         eth = (struct rte_ether_hdr *)data;
3383         next_hdr = (char *)(eth + 1);
3384         proto = RTE_BE16(eth->ether_type);
3385
3386         /* VLAN skipping */
3387         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3388                 vlan = (struct rte_vlan_hdr *)next_hdr;
3389                 proto = RTE_BE16(vlan->eth_proto);
3390                 next_hdr += sizeof(struct rte_vlan_hdr);
3391         }
3392
3393         /* HW calculates IPv4 csum. no need to proceed */
3394         if (proto == RTE_ETHER_TYPE_IPV4)
3395                 return 0;
3396
3397         /* non IPv4/IPv6 header. not supported */
3398         if (proto != RTE_ETHER_TYPE_IPV6) {
3399                 return rte_flow_error_set(error, ENOTSUP,
3400                                           RTE_FLOW_ERROR_TYPE_ACTION,
3401                                           NULL, "Cannot offload non IPv4/IPv6");
3402         }
3403
3404         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3405
3406         /* ignore non UDP */
3407         if (ipv6->proto != IPPROTO_UDP)
3408                 return 0;
3409
3410         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3411         udp->dgram_cksum = 0;
3412
3413         return 0;
3414 }
3415
3416 /**
3417  * Convert L2 encap action to DV specification.
3418  *
3419  * @param[in] dev
3420  *   Pointer to rte_eth_dev structure.
3421  * @param[in] action
3422  *   Pointer to action structure.
3423  * @param[in, out] dev_flow
3424  *   Pointer to the mlx5_flow.
3425  * @param[in] transfer
3426  *   Mark if the flow is E-Switch flow.
3427  * @param[out] error
3428  *   Pointer to the error structure.
3429  *
3430  * @return
3431  *   0 on success, a negative errno value otherwise and rte_errno is set.
3432  */
3433 static int
3434 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3435                                const struct rte_flow_action *action,
3436                                struct mlx5_flow *dev_flow,
3437                                uint8_t transfer,
3438                                struct rte_flow_error *error)
3439 {
3440         const struct rte_flow_item *encap_data;
3441         const struct rte_flow_action_raw_encap *raw_encap_data;
3442         struct mlx5_flow_dv_encap_decap_resource res = {
3443                 .reformat_type =
3444                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3445                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3446                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3447         };
3448
3449         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3450                 raw_encap_data =
3451                         (const struct rte_flow_action_raw_encap *)action->conf;
3452                 res.size = raw_encap_data->size;
3453                 memcpy(res.buf, raw_encap_data->data, res.size);
3454         } else {
3455                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3456                         encap_data =
3457                                 ((const struct rte_flow_action_vxlan_encap *)
3458                                                 action->conf)->definition;
3459                 else
3460                         encap_data =
3461                                 ((const struct rte_flow_action_nvgre_encap *)
3462                                                 action->conf)->definition;
3463                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3464                                                &res.size, error))
3465                         return -rte_errno;
3466         }
3467         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3468                 return -rte_errno;
3469         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3470                 return rte_flow_error_set(error, EINVAL,
3471                                           RTE_FLOW_ERROR_TYPE_ACTION,
3472                                           NULL, "can't create L2 encap action");
3473         return 0;
3474 }
3475
3476 /**
3477  * Convert L2 decap action to DV specification.
3478  *
3479  * @param[in] dev
3480  *   Pointer to rte_eth_dev structure.
3481  * @param[in, out] dev_flow
3482  *   Pointer to the mlx5_flow.
3483  * @param[in] transfer
3484  *   Mark if the flow is E-Switch flow.
3485  * @param[out] error
3486  *   Pointer to the error structure.
3487  *
3488  * @return
3489  *   0 on success, a negative errno value otherwise and rte_errno is set.
3490  */
3491 static int
3492 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3493                                struct mlx5_flow *dev_flow,
3494                                uint8_t transfer,
3495                                struct rte_flow_error *error)
3496 {
3497         struct mlx5_flow_dv_encap_decap_resource res = {
3498                 .size = 0,
3499                 .reformat_type =
3500                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3501                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3502                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3503         };
3504
3505         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3506                 return rte_flow_error_set(error, EINVAL,
3507                                           RTE_FLOW_ERROR_TYPE_ACTION,
3508                                           NULL, "can't create L2 decap action");
3509         return 0;
3510 }
3511
3512 /**
3513  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3514  *
3515  * @param[in] dev
3516  *   Pointer to rte_eth_dev structure.
3517  * @param[in] action
3518  *   Pointer to action structure.
3519  * @param[in, out] dev_flow
3520  *   Pointer to the mlx5_flow.
3521  * @param[in] attr
3522  *   Pointer to the flow attributes.
3523  * @param[out] error
3524  *   Pointer to the error structure.
3525  *
3526  * @return
3527  *   0 on success, a negative errno value otherwise and rte_errno is set.
3528  */
3529 static int
3530 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3531                                 const struct rte_flow_action *action,
3532                                 struct mlx5_flow *dev_flow,
3533                                 const struct rte_flow_attr *attr,
3534                                 struct rte_flow_error *error)
3535 {
3536         const struct rte_flow_action_raw_encap *encap_data;
3537         struct mlx5_flow_dv_encap_decap_resource res;
3538
3539         memset(&res, 0, sizeof(res));
3540         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3541         res.size = encap_data->size;
3542         memcpy(res.buf, encap_data->data, res.size);
3543         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3544                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3545                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3546         if (attr->transfer)
3547                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3548         else
3549                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3550                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3551         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3552                 return rte_flow_error_set(error, EINVAL,
3553                                           RTE_FLOW_ERROR_TYPE_ACTION,
3554                                           NULL, "can't create encap action");
3555         return 0;
3556 }
3557
3558 /**
3559  * Create action push VLAN.
3560  *
3561  * @param[in] dev
3562  *   Pointer to rte_eth_dev structure.
3563  * @param[in] attr
3564  *   Pointer to the flow attributes.
3565  * @param[in] vlan
3566  *   Pointer to the vlan to push to the Ethernet header.
3567  * @param[in, out] dev_flow
3568  *   Pointer to the mlx5_flow.
3569  * @param[out] error
3570  *   Pointer to the error structure.
3571  *
3572  * @return
3573  *   0 on success, a negative errno value otherwise and rte_errno is set.
3574  */
3575 static int
3576 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3577                                 const struct rte_flow_attr *attr,
3578                                 const struct rte_vlan_hdr *vlan,
3579                                 struct mlx5_flow *dev_flow,
3580                                 struct rte_flow_error *error)
3581 {
3582         struct mlx5_flow_dv_push_vlan_action_resource res;
3583
3584         memset(&res, 0, sizeof(res));
3585         res.vlan_tag =
3586                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3587                                  vlan->vlan_tci);
3588         if (attr->transfer)
3589                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3590         else
3591                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3592                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3593         return flow_dv_push_vlan_action_resource_register
3594                                             (dev, &res, dev_flow, error);
3595 }
3596
3597 static int fdb_mirror;
3598
3599 /**
3600  * Validate the modify-header actions.
3601  *
3602  * @param[in] action_flags
3603  *   Holds the actions detected until now.
3604  * @param[in] action
3605  *   Pointer to the modify action.
3606  * @param[out] error
3607  *   Pointer to error structure.
3608  *
3609  * @return
3610  *   0 on success, a negative errno value otherwise and rte_errno is set.
3611  */
3612 static int
3613 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3614                                    const struct rte_flow_action *action,
3615                                    struct rte_flow_error *error)
3616 {
3617         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3618                 return rte_flow_error_set(error, EINVAL,
3619                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3620                                           NULL, "action configuration not set");
3621         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3622                 return rte_flow_error_set(error, EINVAL,
3623                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3624                                           "can't have encap action before"
3625                                           " modify action");
3626         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3627                 return rte_flow_error_set(error, EINVAL,
3628                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3629                                           "can't support sample action before"
3630                                           " modify action for E-Switch"
3631                                           " mirroring");
3632         return 0;
3633 }
3634
3635 /**
3636  * Validate the modify-header MAC address actions.
3637  *
3638  * @param[in] action_flags
3639  *   Holds the actions detected until now.
3640  * @param[in] action
3641  *   Pointer to the modify action.
3642  * @param[in] item_flags
3643  *   Holds the items detected.
3644  * @param[out] error
3645  *   Pointer to error structure.
3646  *
3647  * @return
3648  *   0 on success, a negative errno value otherwise and rte_errno is set.
3649  */
3650 static int
3651 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3652                                    const struct rte_flow_action *action,
3653                                    const uint64_t item_flags,
3654                                    struct rte_flow_error *error)
3655 {
3656         int ret = 0;
3657
3658         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3659         if (!ret) {
3660                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3661                         return rte_flow_error_set(error, EINVAL,
3662                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3663                                                   NULL,
3664                                                   "no L2 item in pattern");
3665         }
3666         return ret;
3667 }
3668
3669 /**
3670  * Validate the modify-header IPv4 address actions.
3671  *
3672  * @param[in] action_flags
3673  *   Holds the actions detected until now.
3674  * @param[in] action
3675  *   Pointer to the modify action.
3676  * @param[in] item_flags
3677  *   Holds the items detected.
3678  * @param[out] error
3679  *   Pointer to error structure.
3680  *
3681  * @return
3682  *   0 on success, a negative errno value otherwise and rte_errno is set.
3683  */
3684 static int
3685 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3686                                     const struct rte_flow_action *action,
3687                                     const uint64_t item_flags,
3688                                     struct rte_flow_error *error)
3689 {
3690         int ret = 0;
3691         uint64_t layer;
3692
3693         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3694         if (!ret) {
3695                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3696                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3697                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3698                 if (!(item_flags & layer))
3699                         return rte_flow_error_set(error, EINVAL,
3700                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3701                                                   NULL,
3702                                                   "no ipv4 item in pattern");
3703         }
3704         return ret;
3705 }
3706
3707 /**
3708  * Validate the modify-header IPv6 address actions.
3709  *
3710  * @param[in] action_flags
3711  *   Holds the actions detected until now.
3712  * @param[in] action
3713  *   Pointer to the modify action.
3714  * @param[in] item_flags
3715  *   Holds the items detected.
3716  * @param[out] error
3717  *   Pointer to error structure.
3718  *
3719  * @return
3720  *   0 on success, a negative errno value otherwise and rte_errno is set.
3721  */
3722 static int
3723 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3724                                     const struct rte_flow_action *action,
3725                                     const uint64_t item_flags,
3726                                     struct rte_flow_error *error)
3727 {
3728         int ret = 0;
3729         uint64_t layer;
3730
3731         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3732         if (!ret) {
3733                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3734                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3735                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3736                 if (!(item_flags & layer))
3737                         return rte_flow_error_set(error, EINVAL,
3738                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3739                                                   NULL,
3740                                                   "no ipv6 item in pattern");
3741         }
3742         return ret;
3743 }
3744
3745 /**
3746  * Validate the modify-header TP actions.
3747  *
3748  * @param[in] action_flags
3749  *   Holds the actions detected until now.
3750  * @param[in] action
3751  *   Pointer to the modify action.
3752  * @param[in] item_flags
3753  *   Holds the items detected.
3754  * @param[out] error
3755  *   Pointer to error structure.
3756  *
3757  * @return
3758  *   0 on success, a negative errno value otherwise and rte_errno is set.
3759  */
3760 static int
3761 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3762                                   const struct rte_flow_action *action,
3763                                   const uint64_t item_flags,
3764                                   struct rte_flow_error *error)
3765 {
3766         int ret = 0;
3767         uint64_t layer;
3768
3769         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3770         if (!ret) {
3771                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3772                                  MLX5_FLOW_LAYER_INNER_L4 :
3773                                  MLX5_FLOW_LAYER_OUTER_L4;
3774                 if (!(item_flags & layer))
3775                         return rte_flow_error_set(error, EINVAL,
3776                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3777                                                   NULL, "no transport layer "
3778                                                   "in pattern");
3779         }
3780         return ret;
3781 }
3782
3783 /**
3784  * Validate the modify-header actions of increment/decrement
3785  * TCP Sequence-number.
3786  *
3787  * @param[in] action_flags
3788  *   Holds the actions detected until now.
3789  * @param[in] action
3790  *   Pointer to the modify action.
3791  * @param[in] item_flags
3792  *   Holds the items detected.
3793  * @param[out] error
3794  *   Pointer to error structure.
3795  *
3796  * @return
3797  *   0 on success, a negative errno value otherwise and rte_errno is set.
3798  */
3799 static int
3800 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3801                                        const struct rte_flow_action *action,
3802                                        const uint64_t item_flags,
3803                                        struct rte_flow_error *error)
3804 {
3805         int ret = 0;
3806         uint64_t layer;
3807
3808         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3809         if (!ret) {
3810                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3811                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3812                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3813                 if (!(item_flags & layer))
3814                         return rte_flow_error_set(error, EINVAL,
3815                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3816                                                   NULL, "no TCP item in"
3817                                                   " pattern");
3818                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3819                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3820                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3821                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3822                         return rte_flow_error_set(error, EINVAL,
3823                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3824                                                   NULL,
3825                                                   "cannot decrease and increase"
3826                                                   " TCP sequence number"
3827                                                   " at the same time");
3828         }
3829         return ret;
3830 }
3831
3832 /**
3833  * Validate the modify-header actions of increment/decrement
3834  * TCP Acknowledgment number.
3835  *
3836  * @param[in] action_flags
3837  *   Holds the actions detected until now.
3838  * @param[in] action
3839  *   Pointer to the modify action.
3840  * @param[in] item_flags
3841  *   Holds the items detected.
3842  * @param[out] error
3843  *   Pointer to error structure.
3844  *
3845  * @return
3846  *   0 on success, a negative errno value otherwise and rte_errno is set.
3847  */
3848 static int
3849 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3850                                        const struct rte_flow_action *action,
3851                                        const uint64_t item_flags,
3852                                        struct rte_flow_error *error)
3853 {
3854         int ret = 0;
3855         uint64_t layer;
3856
3857         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3858         if (!ret) {
3859                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3860                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3861                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3862                 if (!(item_flags & layer))
3863                         return rte_flow_error_set(error, EINVAL,
3864                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3865                                                   NULL, "no TCP item in"
3866                                                   " pattern");
3867                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3868                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3869                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3870                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3871                         return rte_flow_error_set(error, EINVAL,
3872                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3873                                                   NULL,
3874                                                   "cannot decrease and increase"
3875                                                   " TCP acknowledgment number"
3876                                                   " at the same time");
3877         }
3878         return ret;
3879 }
3880
3881 /**
3882  * Validate the modify-header TTL actions.
3883  *
3884  * @param[in] action_flags
3885  *   Holds the actions detected until now.
3886  * @param[in] action
3887  *   Pointer to the modify action.
3888  * @param[in] item_flags
3889  *   Holds the items detected.
3890  * @param[out] error
3891  *   Pointer to error structure.
3892  *
3893  * @return
3894  *   0 on success, a negative errno value otherwise and rte_errno is set.
3895  */
3896 static int
3897 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3898                                    const struct rte_flow_action *action,
3899                                    const uint64_t item_flags,
3900                                    struct rte_flow_error *error)
3901 {
3902         int ret = 0;
3903         uint64_t layer;
3904
3905         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3906         if (!ret) {
3907                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3908                                  MLX5_FLOW_LAYER_INNER_L3 :
3909                                  MLX5_FLOW_LAYER_OUTER_L3;
3910                 if (!(item_flags & layer))
3911                         return rte_flow_error_set(error, EINVAL,
3912                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3913                                                   NULL,
3914                                                   "no IP protocol in pattern");
3915         }
3916         return ret;
3917 }
3918
3919 /**
3920  * Validate jump action.
3921  *
3922  * @param[in] action
3923  *   Pointer to the jump action.
3924  * @param[in] action_flags
3925  *   Holds the actions detected until now.
3926  * @param[in] attributes
3927  *   Pointer to flow attributes
3928  * @param[in] external
3929  *   Action belongs to flow rule created by request external to PMD.
3930  * @param[out] error
3931  *   Pointer to error structure.
3932  *
3933  * @return
3934  *   0 on success, a negative errno value otherwise and rte_errno is set.
3935  */
3936 static int
3937 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3938                              const struct mlx5_flow_tunnel *tunnel,
3939                              const struct rte_flow_action *action,
3940                              uint64_t action_flags,
3941                              const struct rte_flow_attr *attributes,
3942                              bool external, struct rte_flow_error *error)
3943 {
3944         uint32_t target_group, table;
3945         int ret = 0;
3946         struct flow_grp_info grp_info = {
3947                 .external = !!external,
3948                 .transfer = !!attributes->transfer,
3949                 .fdb_def_rule = 1,
3950                 .std_tbl_fix = 0
3951         };
3952         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3953                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3954                 return rte_flow_error_set(error, EINVAL,
3955                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3956                                           "can't have 2 fate actions in"
3957                                           " same flow");
3958         if (action_flags & MLX5_FLOW_ACTION_METER)
3959                 return rte_flow_error_set(error, ENOTSUP,
3960                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3961                                           "jump with meter not support");
3962         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3963                 return rte_flow_error_set(error, EINVAL,
3964                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3965                                           "E-Switch mirroring can't support"
3966                                           " Sample action and jump action in"
3967                                           " same flow now");
3968         if (!action->conf)
3969                 return rte_flow_error_set(error, EINVAL,
3970                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3971                                           NULL, "action configuration not set");
3972         target_group =
3973                 ((const struct rte_flow_action_jump *)action->conf)->group;
3974         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3975                                        grp_info, error);
3976         if (ret)
3977                 return ret;
3978         if (attributes->group == target_group &&
3979             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3980                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3981                 return rte_flow_error_set(error, EINVAL,
3982                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3983                                           "target group must be other than"
3984                                           " the current flow group");
3985         return 0;
3986 }
3987
3988 /*
3989  * Validate the port_id action.
3990  *
3991  * @param[in] dev
3992  *   Pointer to rte_eth_dev structure.
3993  * @param[in] action_flags
3994  *   Bit-fields that holds the actions detected until now.
3995  * @param[in] action
3996  *   Port_id RTE action structure.
3997  * @param[in] attr
3998  *   Attributes of flow that includes this action.
3999  * @param[out] error
4000  *   Pointer to error structure.
4001  *
4002  * @return
4003  *   0 on success, a negative errno value otherwise and rte_errno is set.
4004  */
4005 static int
4006 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4007                                 uint64_t action_flags,
4008                                 const struct rte_flow_action *action,
4009                                 const struct rte_flow_attr *attr,
4010                                 struct rte_flow_error *error)
4011 {
4012         const struct rte_flow_action_port_id *port_id;
4013         struct mlx5_priv *act_priv;
4014         struct mlx5_priv *dev_priv;
4015         uint16_t port;
4016
4017         if (!attr->transfer)
4018                 return rte_flow_error_set(error, ENOTSUP,
4019                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4020                                           NULL,
4021                                           "port id action is valid in transfer"
4022                                           " mode only");
4023         if (!action || !action->conf)
4024                 return rte_flow_error_set(error, ENOTSUP,
4025                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4026                                           NULL,
4027                                           "port id action parameters must be"
4028                                           " specified");
4029         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4030                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4031                 return rte_flow_error_set(error, EINVAL,
4032                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4033                                           "can have only one fate actions in"
4034                                           " a flow");
4035         dev_priv = mlx5_dev_to_eswitch_info(dev);
4036         if (!dev_priv)
4037                 return rte_flow_error_set(error, rte_errno,
4038                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4039                                           NULL,
4040                                           "failed to obtain E-Switch info");
4041         port_id = action->conf;
4042         port = port_id->original ? dev->data->port_id : port_id->id;
4043         act_priv = mlx5_port_to_eswitch_info(port, false);
4044         if (!act_priv)
4045                 return rte_flow_error_set
4046                                 (error, rte_errno,
4047                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4048                                  "failed to obtain E-Switch port id for port");
4049         if (act_priv->domain_id != dev_priv->domain_id)
4050                 return rte_flow_error_set
4051                                 (error, EINVAL,
4052                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4053                                  "port does not belong to"
4054                                  " E-Switch being configured");
4055         return 0;
4056 }
4057
4058 /**
4059  * Get the maximum number of modify header actions.
4060  *
4061  * @param dev
4062  *   Pointer to rte_eth_dev structure.
4063  * @param flags
4064  *   Flags bits to check if root level.
4065  *
4066  * @return
4067  *   Max number of modify header actions device can support.
4068  */
4069 static inline unsigned int
4070 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4071                               uint64_t flags)
4072 {
4073         /*
4074          * There's no way to directly query the max capacity from FW.
4075          * The maximal value on root table should be assumed to be supported.
4076          */
4077         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4078                 return MLX5_MAX_MODIFY_NUM;
4079         else
4080                 return MLX5_ROOT_TBL_MODIFY_NUM;
4081 }
4082
4083 /**
4084  * Validate the meter action.
4085  *
4086  * @param[in] dev
4087  *   Pointer to rte_eth_dev structure.
4088  * @param[in] action_flags
4089  *   Bit-fields that holds the actions detected until now.
4090  * @param[in] action
4091  *   Pointer to the meter action.
4092  * @param[in] attr
4093  *   Attributes of flow that includes this action.
4094  * @param[out] error
4095  *   Pointer to error structure.
4096  *
4097  * @return
4098  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4099  */
4100 static int
4101 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4102                                 uint64_t action_flags,
4103                                 const struct rte_flow_action *action,
4104                                 const struct rte_flow_attr *attr,
4105                                 struct rte_flow_error *error)
4106 {
4107         struct mlx5_priv *priv = dev->data->dev_private;
4108         const struct rte_flow_action_meter *am = action->conf;
4109         struct mlx5_flow_meter *fm;
4110
4111         if (!am)
4112                 return rte_flow_error_set(error, EINVAL,
4113                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4114                                           "meter action conf is NULL");
4115
4116         if (action_flags & MLX5_FLOW_ACTION_METER)
4117                 return rte_flow_error_set(error, ENOTSUP,
4118                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4119                                           "meter chaining not support");
4120         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4121                 return rte_flow_error_set(error, ENOTSUP,
4122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4123                                           "meter with jump not support");
4124         if (!priv->mtr_en)
4125                 return rte_flow_error_set(error, ENOTSUP,
4126                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4127                                           NULL,
4128                                           "meter action not supported");
4129         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4130         if (!fm)
4131                 return rte_flow_error_set(error, EINVAL,
4132                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4133                                           "Meter not found");
4134         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4135               (!fm->ingress && !attr->ingress && attr->egress) ||
4136               (!fm->egress && !attr->egress && attr->ingress))))
4137                 return rte_flow_error_set(error, EINVAL,
4138                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4139                                           "Flow attributes are either invalid "
4140                                           "or have a conflict with current "
4141                                           "meter attributes");
4142         return 0;
4143 }
4144
4145 /**
4146  * Validate the age action.
4147  *
4148  * @param[in] action_flags
4149  *   Holds the actions detected until now.
4150  * @param[in] action
4151  *   Pointer to the age action.
4152  * @param[in] dev
4153  *   Pointer to the Ethernet device structure.
4154  * @param[out] error
4155  *   Pointer to error structure.
4156  *
4157  * @return
4158  *   0 on success, a negative errno value otherwise and rte_errno is set.
4159  */
4160 static int
4161 flow_dv_validate_action_age(uint64_t action_flags,
4162                             const struct rte_flow_action *action,
4163                             struct rte_eth_dev *dev,
4164                             struct rte_flow_error *error)
4165 {
4166         struct mlx5_priv *priv = dev->data->dev_private;
4167         const struct rte_flow_action_age *age = action->conf;
4168
4169         if (!priv->config.devx || priv->sh->cmng.counter_fallback)
4170                 return rte_flow_error_set(error, ENOTSUP,
4171                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4172                                           NULL,
4173                                           "age action not supported");
4174         if (!(action->conf))
4175                 return rte_flow_error_set(error, EINVAL,
4176                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4177                                           "configuration cannot be null");
4178         if (!(age->timeout))
4179                 return rte_flow_error_set(error, EINVAL,
4180                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4181                                           "invalid timeout value 0");
4182         if (action_flags & MLX5_FLOW_ACTION_AGE)
4183                 return rte_flow_error_set(error, EINVAL,
4184                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4185                                           "duplicate age actions set");
4186         return 0;
4187 }
4188
4189 /**
4190  * Validate the modify-header IPv4 DSCP actions.
4191  *
4192  * @param[in] action_flags
4193  *   Holds the actions detected until now.
4194  * @param[in] action
4195  *   Pointer to the modify action.
4196  * @param[in] item_flags
4197  *   Holds the items detected.
4198  * @param[out] error
4199  *   Pointer to error structure.
4200  *
4201  * @return
4202  *   0 on success, a negative errno value otherwise and rte_errno is set.
4203  */
4204 static int
4205 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4206                                          const struct rte_flow_action *action,
4207                                          const uint64_t item_flags,
4208                                          struct rte_flow_error *error)
4209 {
4210         int ret = 0;
4211
4212         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4213         if (!ret) {
4214                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4215                         return rte_flow_error_set(error, EINVAL,
4216                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4217                                                   NULL,
4218                                                   "no ipv4 item in pattern");
4219         }
4220         return ret;
4221 }
4222
4223 /**
4224  * Validate the modify-header IPv6 DSCP actions.
4225  *
4226  * @param[in] action_flags
4227  *   Holds the actions detected until now.
4228  * @param[in] action
4229  *   Pointer to the modify action.
4230  * @param[in] item_flags
4231  *   Holds the items detected.
4232  * @param[out] error
4233  *   Pointer to error structure.
4234  *
4235  * @return
4236  *   0 on success, a negative errno value otherwise and rte_errno is set.
4237  */
4238 static int
4239 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4240                                          const struct rte_flow_action *action,
4241                                          const uint64_t item_flags,
4242                                          struct rte_flow_error *error)
4243 {
4244         int ret = 0;
4245
4246         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4247         if (!ret) {
4248                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4249                         return rte_flow_error_set(error, EINVAL,
4250                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4251                                                   NULL,
4252                                                   "no ipv6 item in pattern");
4253         }
4254         return ret;
4255 }
4256
4257 /**
4258  * Match modify-header resource.
4259  *
4260  * @param list
4261  *   Pointer to the hash list.
4262  * @param entry
4263  *   Pointer to exist resource entry object.
4264  * @param key
4265  *   Key of the new entry.
4266  * @param ctx
4267  *   Pointer to new modify-header resource.
4268  *
4269  * @return
4270  *   0 on matching, non-zero otherwise.
4271  */
4272 int
4273 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4274                         struct mlx5_hlist_entry *entry,
4275                         uint64_t key __rte_unused, void *cb_ctx)
4276 {
4277         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4278         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4279         struct mlx5_flow_dv_modify_hdr_resource *resource =
4280                         container_of(entry, typeof(*resource), entry);
4281         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4282
4283         key_len += ref->actions_num * sizeof(ref->actions[0]);
4284         return ref->actions_num != resource->actions_num ||
4285                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4286 }
4287
4288 struct mlx5_hlist_entry *
4289 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4290                          void *cb_ctx)
4291 {
4292         struct mlx5_dev_ctx_shared *sh = list->ctx;
4293         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4294         struct mlx5dv_dr_domain *ns;
4295         struct mlx5_flow_dv_modify_hdr_resource *entry;
4296         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4297         int ret;
4298         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4299         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4300
4301         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4302                             SOCKET_ID_ANY);
4303         if (!entry) {
4304                 rte_flow_error_set(ctx->error, ENOMEM,
4305                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4306                                    "cannot allocate resource memory");
4307                 return NULL;
4308         }
4309         rte_memcpy(&entry->ft_type,
4310                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4311                    key_len + data_len);
4312         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4313                 ns = sh->fdb_domain;
4314         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4315                 ns = sh->tx_domain;
4316         else
4317                 ns = sh->rx_domain;
4318         ret = mlx5_flow_os_create_flow_action_modify_header
4319                                         (sh->ctx, ns, entry,
4320                                          data_len, &entry->action);
4321         if (ret) {
4322                 mlx5_free(entry);
4323                 rte_flow_error_set(ctx->error, ENOMEM,
4324                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4325                                    NULL, "cannot create modification action");
4326                 return NULL;
4327         }
4328         return &entry->entry;
4329 }
4330
4331 /**
4332  * Validate the sample action.
4333  *
4334  * @param[in] action_flags
4335  *   Holds the actions detected until now.
4336  * @param[in] action
4337  *   Pointer to the sample action.
4338  * @param[in] dev
4339  *   Pointer to the Ethernet device structure.
4340  * @param[in] attr
4341  *   Attributes of flow that includes this action.
4342  * @param[out] error
4343  *   Pointer to error structure.
4344  *
4345  * @return
4346  *   0 on success, a negative errno value otherwise and rte_errno is set.
4347  */
4348 static int
4349 flow_dv_validate_action_sample(uint64_t action_flags,
4350                                const struct rte_flow_action *action,
4351                                struct rte_eth_dev *dev,
4352                                const struct rte_flow_attr *attr,
4353                                struct rte_flow_error *error)
4354 {
4355         struct mlx5_priv *priv = dev->data->dev_private;
4356         struct mlx5_dev_config *dev_conf = &priv->config;
4357         const struct rte_flow_action_sample *sample = action->conf;
4358         const struct rte_flow_action *act;
4359         uint64_t sub_action_flags = 0;
4360         uint16_t queue_index = 0xFFFF;
4361         int actions_n = 0;
4362         int ret;
4363         fdb_mirror = 0;
4364
4365         if (!sample)
4366                 return rte_flow_error_set(error, EINVAL,
4367                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4368                                           "configuration cannot be NULL");
4369         if (sample->ratio == 0)
4370                 return rte_flow_error_set(error, EINVAL,
4371                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4372                                           "ratio value starts from 1");
4373         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4374                 return rte_flow_error_set(error, ENOTSUP,
4375                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4376                                           NULL,
4377                                           "sample action not supported");
4378         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4379                 return rte_flow_error_set(error, EINVAL,
4380                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4381                                           "Multiple sample actions not "
4382                                           "supported");
4383         if (action_flags & MLX5_FLOW_ACTION_METER)
4384                 return rte_flow_error_set(error, EINVAL,
4385                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4386                                           "wrong action order, meter should "
4387                                           "be after sample action");
4388         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4389                 return rte_flow_error_set(error, EINVAL,
4390                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4391                                           "wrong action order, jump should "
4392                                           "be after sample action");
4393         act = sample->actions;
4394         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4395                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4396                         return rte_flow_error_set(error, ENOTSUP,
4397                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4398                                                   act, "too many actions");
4399                 switch (act->type) {
4400                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4401                         ret = mlx5_flow_validate_action_queue(act,
4402                                                               sub_action_flags,
4403                                                               dev,
4404                                                               attr, error);
4405                         if (ret < 0)
4406                                 return ret;
4407                         queue_index = ((const struct rte_flow_action_queue *)
4408                                                         (act->conf))->index;
4409                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4410                         ++actions_n;
4411                         break;
4412                 case RTE_FLOW_ACTION_TYPE_MARK:
4413                         ret = flow_dv_validate_action_mark(dev, act,
4414                                                            sub_action_flags,
4415                                                            attr, error);
4416                         if (ret < 0)
4417                                 return ret;
4418                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4419                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4420                                                 MLX5_FLOW_ACTION_MARK_EXT;
4421                         else
4422                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4423                         ++actions_n;
4424                         break;
4425                 case RTE_FLOW_ACTION_TYPE_COUNT:
4426                         ret = flow_dv_validate_action_count(dev, error);
4427                         if (ret < 0)
4428                                 return ret;
4429                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4430                         ++actions_n;
4431                         break;
4432                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4433                         ret = flow_dv_validate_action_port_id(dev,
4434                                                               sub_action_flags,
4435                                                               act,
4436                                                               attr,
4437                                                               error);
4438                         if (ret)
4439                                 return ret;
4440                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4441                         ++actions_n;
4442                         break;
4443                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4444                         ret = flow_dv_validate_action_raw_encap_decap
4445                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4446                                  &actions_n, error);
4447                         if (ret < 0)
4448                                 return ret;
4449                         ++actions_n;
4450                         break;
4451                 default:
4452                         return rte_flow_error_set(error, ENOTSUP,
4453                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4454                                                   NULL,
4455                                                   "Doesn't support optional "
4456                                                   "action");
4457                 }
4458         }
4459         if (attr->ingress && !attr->transfer) {
4460                 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4461                         return rte_flow_error_set(error, EINVAL,
4462                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4463                                                   NULL,
4464                                                   "Ingress must has a dest "
4465                                                   "QUEUE for Sample");
4466         } else if (attr->egress && !attr->transfer) {
4467                 return rte_flow_error_set(error, ENOTSUP,
4468                                           RTE_FLOW_ERROR_TYPE_ACTION,
4469                                           NULL,
4470                                           "Sample Only support Ingress "
4471                                           "or E-Switch");
4472         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4473                 MLX5_ASSERT(attr->transfer);
4474                 if (sample->ratio > 1)
4475                         return rte_flow_error_set(error, ENOTSUP,
4476                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4477                                                   NULL,
4478                                                   "E-Switch doesn't support "
4479                                                   "any optional action "
4480                                                   "for sampling");
4481                 fdb_mirror = 1;
4482                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4483                         return rte_flow_error_set(error, ENOTSUP,
4484                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4485                                                   NULL,
4486                                                   "unsupported action QUEUE");
4487                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4488                         return rte_flow_error_set(error, EINVAL,
4489                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4490                                                   NULL,
4491                                                   "E-Switch must has a dest "
4492                                                   "port for mirroring");
4493         }
4494         /* Continue validation for Xcap actions.*/
4495         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4496             (queue_index == 0xFFFF ||
4497              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4498                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4499                      MLX5_FLOW_XCAP_ACTIONS)
4500                         return rte_flow_error_set(error, ENOTSUP,
4501                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4502                                                   NULL, "encap and decap "
4503                                                   "combination aren't "
4504                                                   "supported");
4505                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4506                                                         MLX5_FLOW_ACTION_ENCAP))
4507                         return rte_flow_error_set(error, ENOTSUP,
4508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4509                                                   NULL, "encap is not supported"
4510                                                   " for ingress traffic");
4511         }
4512         return 0;
4513 }
4514
4515 /**
4516  * Find existing modify-header resource or create and register a new one.
4517  *
4518  * @param dev[in, out]
4519  *   Pointer to rte_eth_dev structure.
4520  * @param[in, out] resource
4521  *   Pointer to modify-header resource.
4522  * @parm[in, out] dev_flow
4523  *   Pointer to the dev_flow.
4524  * @param[out] error
4525  *   pointer to error structure.
4526  *
4527  * @return
4528  *   0 on success otherwise -errno and errno is set.
4529  */
4530 static int
4531 flow_dv_modify_hdr_resource_register
4532                         (struct rte_eth_dev *dev,
4533                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4534                          struct mlx5_flow *dev_flow,
4535                          struct rte_flow_error *error)
4536 {
4537         struct mlx5_priv *priv = dev->data->dev_private;
4538         struct mlx5_dev_ctx_shared *sh = priv->sh;
4539         uint32_t key_len = sizeof(*resource) -
4540                            offsetof(typeof(*resource), ft_type) +
4541                            resource->actions_num * sizeof(resource->actions[0]);
4542         struct mlx5_hlist_entry *entry;
4543         struct mlx5_flow_cb_ctx ctx = {
4544                 .error = error,
4545                 .data = resource,
4546         };
4547
4548         resource->flags = dev_flow->dv.group ? 0 :
4549                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4550         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4551                                     resource->flags))
4552                 return rte_flow_error_set(error, EOVERFLOW,
4553                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4554                                           "too many modify header items");
4555         resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4556         entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
4557         if (!entry)
4558                 return -rte_errno;
4559         resource = container_of(entry, typeof(*resource), entry);
4560         dev_flow->handle->dvh.modify_hdr = resource;
4561         return 0;
4562 }
4563
4564 /**
4565  * Get DV flow counter by index.
4566  *
4567  * @param[in] dev
4568  *   Pointer to the Ethernet device structure.
4569  * @param[in] idx
4570  *   mlx5 flow counter index in the container.
4571  * @param[out] ppool
4572  *   mlx5 flow counter pool in the container,
4573  *
4574  * @return
4575  *   Pointer to the counter, NULL otherwise.
4576  */
4577 static struct mlx5_flow_counter *
4578 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4579                            uint32_t idx,
4580                            struct mlx5_flow_counter_pool **ppool)
4581 {
4582         struct mlx5_priv *priv = dev->data->dev_private;
4583         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4584         struct mlx5_flow_counter_pool *pool;
4585
4586         /* Decrease to original index and clear shared bit. */
4587         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4588         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4589         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4590         MLX5_ASSERT(pool);
4591         if (ppool)
4592                 *ppool = pool;
4593         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4594 }
4595
4596 /**
4597  * Check the devx counter belongs to the pool.
4598  *
4599  * @param[in] pool
4600  *   Pointer to the counter pool.
4601  * @param[in] id
4602  *   The counter devx ID.
4603  *
4604  * @return
4605  *   True if counter belongs to the pool, false otherwise.
4606  */
4607 static bool
4608 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4609 {
4610         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4611                    MLX5_COUNTERS_PER_POOL;
4612
4613         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4614                 return true;
4615         return false;
4616 }
4617
4618 /**
4619  * Get a pool by devx counter ID.
4620  *
4621  * @param[in] cmng
4622  *   Pointer to the counter management.
4623  * @param[in] id
4624  *   The counter devx ID.
4625  *
4626  * @return
4627  *   The counter pool pointer if exists, NULL otherwise,
4628  */
4629 static struct mlx5_flow_counter_pool *
4630 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4631 {
4632         uint32_t i;
4633         struct mlx5_flow_counter_pool *pool = NULL;
4634
4635         rte_spinlock_lock(&cmng->pool_update_sl);
4636         /* Check last used pool. */
4637         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4638             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4639                 pool = cmng->pools[cmng->last_pool_idx];
4640                 goto out;
4641         }
4642         /* ID out of range means no suitable pool in the container. */
4643         if (id > cmng->max_id || id < cmng->min_id)
4644                 goto out;
4645         /*
4646          * Find the pool from the end of the container, since mostly counter
4647          * ID is sequence increasing, and the last pool should be the needed
4648          * one.
4649          */
4650         i = cmng->n_valid;
4651         while (i--) {
4652                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4653
4654                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4655                         pool = pool_tmp;
4656                         break;
4657                 }
4658         }
4659 out:
4660         rte_spinlock_unlock(&cmng->pool_update_sl);
4661         return pool;
4662 }
4663
4664 /**
4665  * Resize a counter container.
4666  *
4667  * @param[in] dev
4668  *   Pointer to the Ethernet device structure.
4669  *
4670  * @return
4671  *   0 on success, otherwise negative errno value and rte_errno is set.
4672  */
4673 static int
4674 flow_dv_container_resize(struct rte_eth_dev *dev)
4675 {
4676         struct mlx5_priv *priv = dev->data->dev_private;
4677         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4678         void *old_pools = cmng->pools;
4679         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4680         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4681         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4682
4683         if (!pools) {
4684                 rte_errno = ENOMEM;
4685                 return -ENOMEM;
4686         }
4687         if (old_pools)
4688                 memcpy(pools, old_pools, cmng->n *
4689                                        sizeof(struct mlx5_flow_counter_pool *));
4690         cmng->n = resize;
4691         cmng->pools = pools;
4692         if (old_pools)
4693                 mlx5_free(old_pools);
4694         return 0;
4695 }
4696
4697 /**
4698  * Query a devx flow counter.
4699  *
4700  * @param[in] dev
4701  *   Pointer to the Ethernet device structure.
4702  * @param[in] cnt
4703  *   Index to the flow counter.
4704  * @param[out] pkts
4705  *   The statistics value of packets.
4706  * @param[out] bytes
4707  *   The statistics value of bytes.
4708  *
4709  * @return
4710  *   0 on success, otherwise a negative errno value and rte_errno is set.
4711  */
4712 static inline int
4713 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4714                      uint64_t *bytes)
4715 {
4716         struct mlx5_priv *priv = dev->data->dev_private;
4717         struct mlx5_flow_counter_pool *pool = NULL;
4718         struct mlx5_flow_counter *cnt;
4719         int offset;
4720
4721         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4722         MLX5_ASSERT(pool);
4723         if (priv->sh->cmng.counter_fallback)
4724                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4725                                         0, pkts, bytes, 0, NULL, NULL, 0);
4726         rte_spinlock_lock(&pool->sl);
4727         if (!pool->raw) {
4728                 *pkts = 0;
4729                 *bytes = 0;
4730         } else {
4731                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4732                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4733                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4734         }
4735         rte_spinlock_unlock(&pool->sl);
4736         return 0;
4737 }
4738
4739 /**
4740  * Create and initialize a new counter pool.
4741  *
4742  * @param[in] dev
4743  *   Pointer to the Ethernet device structure.
4744  * @param[out] dcs
4745  *   The devX counter handle.
4746  * @param[in] age
4747  *   Whether the pool is for counter that was allocated for aging.
4748  * @param[in/out] cont_cur
4749  *   Pointer to the container pointer, it will be update in pool resize.
4750  *
4751  * @return
4752  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4753  */
4754 static struct mlx5_flow_counter_pool *
4755 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4756                     uint32_t age)
4757 {
4758         struct mlx5_priv *priv = dev->data->dev_private;
4759         struct mlx5_flow_counter_pool *pool;
4760         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4761         bool fallback = priv->sh->cmng.counter_fallback;
4762         uint32_t size = sizeof(*pool);
4763
4764         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4765         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4766         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4767         if (!pool) {
4768                 rte_errno = ENOMEM;
4769                 return NULL;
4770         }
4771         pool->raw = NULL;
4772         pool->is_aged = !!age;
4773         pool->query_gen = 0;
4774         pool->min_dcs = dcs;
4775         rte_spinlock_init(&pool->sl);
4776         rte_spinlock_init(&pool->csl);
4777         TAILQ_INIT(&pool->counters[0]);
4778         TAILQ_INIT(&pool->counters[1]);
4779         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4780         rte_spinlock_lock(&cmng->pool_update_sl);
4781         pool->index = cmng->n_valid;
4782         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4783                 mlx5_free(pool);
4784                 rte_spinlock_unlock(&cmng->pool_update_sl);
4785                 return NULL;
4786         }
4787         cmng->pools[pool->index] = pool;
4788         cmng->n_valid++;
4789         if (unlikely(fallback)) {
4790                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4791
4792                 if (base < cmng->min_id)
4793                         cmng->min_id = base;
4794                 if (base > cmng->max_id)
4795                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4796                 cmng->last_pool_idx = pool->index;
4797         }
4798         rte_spinlock_unlock(&cmng->pool_update_sl);
4799         return pool;
4800 }
4801
4802 /**
4803  * Prepare a new counter and/or a new counter pool.
4804  *
4805  * @param[in] dev
4806  *   Pointer to the Ethernet device structure.
4807  * @param[out] cnt_free
4808  *   Where to put the pointer of a new counter.
4809  * @param[in] age
4810  *   Whether the pool is for counter that was allocated for aging.
4811  *
4812  * @return
4813  *   The counter pool pointer and @p cnt_free is set on success,
4814  *   NULL otherwise and rte_errno is set.
4815  */
4816 static struct mlx5_flow_counter_pool *
4817 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4818                              struct mlx5_flow_counter **cnt_free,
4819                              uint32_t age)
4820 {
4821         struct mlx5_priv *priv = dev->data->dev_private;
4822         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4823         struct mlx5_flow_counter_pool *pool;
4824         struct mlx5_counters tmp_tq;
4825         struct mlx5_devx_obj *dcs = NULL;
4826         struct mlx5_flow_counter *cnt;
4827         enum mlx5_counter_type cnt_type =
4828                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4829         bool fallback = priv->sh->cmng.counter_fallback;
4830         uint32_t i;
4831
4832         if (fallback) {
4833                 /* bulk_bitmap must be 0 for single counter allocation. */
4834                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4835                 if (!dcs)
4836                         return NULL;
4837                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4838                 if (!pool) {
4839                         pool = flow_dv_pool_create(dev, dcs, age);
4840                         if (!pool) {
4841                                 mlx5_devx_cmd_destroy(dcs);
4842                                 return NULL;
4843                         }
4844                 }
4845                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4846                 cnt = MLX5_POOL_GET_CNT(pool, i);
4847                 cnt->pool = pool;
4848                 cnt->dcs_when_free = dcs;
4849                 *cnt_free = cnt;
4850                 return pool;
4851         }
4852         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4853         if (!dcs) {
4854                 rte_errno = ENODATA;
4855                 return NULL;
4856         }
4857         pool = flow_dv_pool_create(dev, dcs, age);
4858         if (!pool) {
4859                 mlx5_devx_cmd_destroy(dcs);
4860                 return NULL;
4861         }
4862         TAILQ_INIT(&tmp_tq);
4863         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4864                 cnt = MLX5_POOL_GET_CNT(pool, i);
4865                 cnt->pool = pool;
4866                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4867         }
4868         rte_spinlock_lock(&cmng->csl[cnt_type]);
4869         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4870         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4871         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4872         (*cnt_free)->pool = pool;
4873         return pool;
4874 }
4875
4876 /**
4877  * Allocate a flow counter.
4878  *
4879  * @param[in] dev
4880  *   Pointer to the Ethernet device structure.
4881  * @param[in] age
4882  *   Whether the counter was allocated for aging.
4883  *
4884  * @return
4885  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4886  */
4887 static uint32_t
4888 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4889 {
4890         struct mlx5_priv *priv = dev->data->dev_private;
4891         struct mlx5_flow_counter_pool *pool = NULL;
4892         struct mlx5_flow_counter *cnt_free = NULL;
4893         bool fallback = priv->sh->cmng.counter_fallback;
4894         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4895         enum mlx5_counter_type cnt_type =
4896                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4897         uint32_t cnt_idx;
4898
4899         if (!priv->config.devx) {
4900                 rte_errno = ENOTSUP;
4901                 return 0;
4902         }
4903         /* Get free counters from container. */
4904         rte_spinlock_lock(&cmng->csl[cnt_type]);
4905         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4906         if (cnt_free)
4907                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4908         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4909         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4910                 goto err;
4911         pool = cnt_free->pool;
4912         if (fallback)
4913                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4914         /* Create a DV counter action only in the first time usage. */
4915         if (!cnt_free->action) {
4916                 uint16_t offset;
4917                 struct mlx5_devx_obj *dcs;
4918                 int ret;
4919
4920                 if (!fallback) {
4921                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4922                         dcs = pool->min_dcs;
4923                 } else {
4924                         offset = 0;
4925                         dcs = cnt_free->dcs_when_free;
4926                 }
4927                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4928                                                             &cnt_free->action);
4929                 if (ret) {
4930                         rte_errno = errno;
4931                         goto err;
4932                 }
4933         }
4934         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4935                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4936         /* Update the counter reset values. */
4937         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4938                                  &cnt_free->bytes))
4939                 goto err;
4940         if (!fallback && !priv->sh->cmng.query_thread_on)
4941                 /* Start the asynchronous batch query by the host thread. */
4942                 mlx5_set_query_alarm(priv->sh);
4943         return cnt_idx;
4944 err:
4945         if (cnt_free) {
4946                 cnt_free->pool = pool;
4947                 if (fallback)
4948                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4949                 rte_spinlock_lock(&cmng->csl[cnt_type]);
4950                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4951                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4952         }
4953         return 0;
4954 }
4955
4956 /**
4957  * Allocate a shared flow counter.
4958  *
4959  * @param[in] ctx
4960  *   Pointer to the shared counter configuration.
4961  * @param[in] data
4962  *   Pointer to save the allocated counter index.
4963  *
4964  * @return
4965  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4966  */
4967
4968 static int32_t
4969 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4970 {
4971         struct mlx5_shared_counter_conf *conf = ctx;
4972         struct rte_eth_dev *dev = conf->dev;
4973         struct mlx5_flow_counter *cnt;
4974
4975         data->dword = flow_dv_counter_alloc(dev, 0);
4976         data->dword |= MLX5_CNT_SHARED_OFFSET;
4977         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4978         cnt->shared_info.id = conf->id;
4979         return 0;
4980 }
4981
4982 /**
4983  * Get a shared flow counter.
4984  *
4985  * @param[in] dev
4986  *   Pointer to the Ethernet device structure.
4987  * @param[in] id
4988  *   Counter identifier.
4989  *
4990  * @return
4991  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4992  */
4993 static uint32_t
4994 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4995 {
4996         struct mlx5_priv *priv = dev->data->dev_private;
4997         struct mlx5_shared_counter_conf conf = {
4998                 .dev = dev,
4999                 .id = id,
5000         };
5001         union mlx5_l3t_data data = {
5002                 .dword = 0,
5003         };
5004
5005         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5006                                flow_dv_counter_alloc_shared_cb, &conf);
5007         return data.dword;
5008 }
5009
5010 /**
5011  * Get age param from counter index.
5012  *
5013  * @param[in] dev
5014  *   Pointer to the Ethernet device structure.
5015  * @param[in] counter
5016  *   Index to the counter handler.
5017  *
5018  * @return
5019  *   The aging parameter specified for the counter index.
5020  */
5021 static struct mlx5_age_param*
5022 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5023                                 uint32_t counter)
5024 {
5025         struct mlx5_flow_counter *cnt;
5026         struct mlx5_flow_counter_pool *pool = NULL;
5027
5028         flow_dv_counter_get_by_idx(dev, counter, &pool);
5029         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5030         cnt = MLX5_POOL_GET_CNT(pool, counter);
5031         return MLX5_CNT_TO_AGE(cnt);
5032 }
5033
5034 /**
5035  * Remove a flow counter from aged counter list.
5036  *
5037  * @param[in] dev
5038  *   Pointer to the Ethernet device structure.
5039  * @param[in] counter
5040  *   Index to the counter handler.
5041  * @param[in] cnt
5042  *   Pointer to the counter handler.
5043  */
5044 static void
5045 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5046                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5047 {
5048         struct mlx5_age_info *age_info;
5049         struct mlx5_age_param *age_param;
5050         struct mlx5_priv *priv = dev->data->dev_private;
5051         uint16_t expected = AGE_CANDIDATE;
5052
5053         age_info = GET_PORT_AGE_INFO(priv);
5054         age_param = flow_dv_counter_idx_get_age(dev, counter);
5055         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5056                                          AGE_FREE, false, __ATOMIC_RELAXED,
5057                                          __ATOMIC_RELAXED)) {
5058                 /**
5059                  * We need the lock even it is age timeout,
5060                  * since counter may still in process.
5061                  */
5062                 rte_spinlock_lock(&age_info->aged_sl);
5063                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5064                 rte_spinlock_unlock(&age_info->aged_sl);
5065                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5066         }
5067 }
5068
5069 /**
5070  * Release a flow counter.
5071  *
5072  * @param[in] dev
5073  *   Pointer to the Ethernet device structure.
5074  * @param[in] counter
5075  *   Index to the counter handler.
5076  */
5077 static void
5078 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
5079 {
5080         struct mlx5_priv *priv = dev->data->dev_private;
5081         struct mlx5_flow_counter_pool *pool = NULL;
5082         struct mlx5_flow_counter *cnt;
5083         enum mlx5_counter_type cnt_type;
5084
5085         if (!counter)
5086                 return;
5087         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5088         MLX5_ASSERT(pool);
5089         if (IS_SHARED_CNT(counter) &&
5090             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5091                 return;
5092         if (pool->is_aged)
5093                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5094         cnt->pool = pool;
5095         /*
5096          * Put the counter back to list to be updated in none fallback mode.
5097          * Currently, we are using two list alternately, while one is in query,
5098          * add the freed counter to the other list based on the pool query_gen
5099          * value. After query finishes, add counter the list to the global
5100          * container counter list. The list changes while query starts. In
5101          * this case, lock will not be needed as query callback and release
5102          * function both operate with the different list.
5103          *
5104          */
5105         if (!priv->sh->cmng.counter_fallback) {
5106                 rte_spinlock_lock(&pool->csl);
5107                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5108                 rte_spinlock_unlock(&pool->csl);
5109         } else {
5110                 cnt->dcs_when_free = cnt->dcs_when_active;
5111                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5112                                            MLX5_COUNTER_TYPE_ORIGIN;
5113                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5114                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5115                                   cnt, next);
5116                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5117         }
5118 }
5119
5120 /**
5121  * Verify the @p attributes will be correctly understood by the NIC and store
5122  * them in the @p flow if everything is correct.
5123  *
5124  * @param[in] dev
5125  *   Pointer to dev struct.
5126  * @param[in] attributes
5127  *   Pointer to flow attributes
5128  * @param[in] external
5129  *   This flow rule is created by request external to PMD.
5130  * @param[out] error
5131  *   Pointer to error structure.
5132  *
5133  * @return
5134  *   - 0 on success and non root table.
5135  *   - 1 on success and root table.
5136  *   - a negative errno value otherwise and rte_errno is set.
5137  */
5138 static int
5139 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5140                             const struct mlx5_flow_tunnel *tunnel,
5141                             const struct rte_flow_attr *attributes,
5142                             struct flow_grp_info grp_info,
5143                             struct rte_flow_error *error)
5144 {
5145         struct mlx5_priv *priv = dev->data->dev_private;
5146         uint32_t priority_max = priv->config.flow_prio - 1;
5147         int ret = 0;
5148
5149 #ifndef HAVE_MLX5DV_DR
5150         RTE_SET_USED(tunnel);
5151         RTE_SET_USED(grp_info);
5152         if (attributes->group)
5153                 return rte_flow_error_set(error, ENOTSUP,
5154                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5155                                           NULL,
5156                                           "groups are not supported");
5157 #else
5158         uint32_t table = 0;
5159
5160         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5161                                        grp_info, error);
5162         if (ret)
5163                 return ret;
5164         if (!table)
5165                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5166 #endif
5167         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5168             attributes->priority >= priority_max)
5169                 return rte_flow_error_set(error, ENOTSUP,
5170                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5171                                           NULL,
5172                                           "priority out of range");
5173         if (attributes->transfer) {
5174                 if (!priv->config.dv_esw_en)
5175                         return rte_flow_error_set
5176                                 (error, ENOTSUP,
5177                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5178                                  "E-Switch dr is not supported");
5179                 if (!(priv->representor || priv->master))
5180                         return rte_flow_error_set
5181                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5182                                  NULL, "E-Switch configuration can only be"
5183                                  " done by a master or a representor device");
5184                 if (attributes->egress)
5185                         return rte_flow_error_set
5186                                 (error, ENOTSUP,
5187                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5188                                  "egress is not supported");
5189         }
5190         if (!(attributes->egress ^ attributes->ingress))
5191                 return rte_flow_error_set(error, ENOTSUP,
5192                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5193                                           "must specify exactly one of "
5194                                           "ingress or egress");
5195         return ret;
5196 }
5197
5198 /**
5199  * Internal validation function. For validating both actions and items.
5200  *
5201  * @param[in] dev
5202  *   Pointer to the rte_eth_dev structure.
5203  * @param[in] attr
5204  *   Pointer to the flow attributes.
5205  * @param[in] items
5206  *   Pointer to the list of items.
5207  * @param[in] actions
5208  *   Pointer to the list of actions.
5209  * @param[in] external
5210  *   This flow rule is created by request external to PMD.
5211  * @param[in] hairpin
5212  *   Number of hairpin TX actions, 0 means classic flow.
5213  * @param[out] error
5214  *   Pointer to the error structure.
5215  *
5216  * @return
5217  *   0 on success, a negative errno value otherwise and rte_errno is set.
5218  */
5219 static int
5220 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5221                  const struct rte_flow_item items[],
5222                  const struct rte_flow_action actions[],
5223                  bool external, int hairpin, struct rte_flow_error *error)
5224 {
5225         int ret;
5226         uint64_t action_flags = 0;
5227         uint64_t item_flags = 0;
5228         uint64_t last_item = 0;
5229         uint8_t next_protocol = 0xff;
5230         uint16_t ether_type = 0;
5231         int actions_n = 0;
5232         uint8_t item_ipv6_proto = 0;
5233         const struct rte_flow_item *gre_item = NULL;
5234         const struct rte_flow_action_raw_decap *decap;
5235         const struct rte_flow_action_raw_encap *encap;
5236         const struct rte_flow_action_rss *rss;
5237         const struct rte_flow_item_tcp nic_tcp_mask = {
5238                 .hdr = {
5239                         .tcp_flags = 0xFF,
5240                         .src_port = RTE_BE16(UINT16_MAX),
5241                         .dst_port = RTE_BE16(UINT16_MAX),
5242                 }
5243         };
5244         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5245                 .hdr = {
5246                         .src_addr =
5247                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5248                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5249                         .dst_addr =
5250                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5251                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5252                         .vtc_flow = RTE_BE32(0xffffffff),
5253                         .proto = 0xff,
5254                         .hop_limits = 0xff,
5255                 },
5256                 .has_frag_ext = 1,
5257         };
5258         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5259                 .hdr = {
5260                         .common = {
5261                                 .u32 =
5262                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5263                                         .type = 0xFF,
5264                                         }).u32),
5265                         },
5266                         .dummy[0] = 0xffffffff,
5267                 },
5268         };
5269         struct mlx5_priv *priv = dev->data->dev_private;
5270         struct mlx5_dev_config *dev_conf = &priv->config;
5271         uint16_t queue_index = 0xFFFF;
5272         const struct rte_flow_item_vlan *vlan_m = NULL;
5273         int16_t rw_act_num = 0;
5274         uint64_t is_root;
5275         const struct mlx5_flow_tunnel *tunnel;
5276         struct flow_grp_info grp_info = {
5277                 .external = !!external,
5278                 .transfer = !!attr->transfer,
5279                 .fdb_def_rule = !!priv->fdb_def_rule,
5280         };
5281         const struct rte_eth_hairpin_conf *conf;
5282
5283         if (items == NULL)
5284                 return -1;
5285         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5286                 tunnel = flow_items_to_tunnel(items);
5287                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5288                                 MLX5_FLOW_ACTION_DECAP;
5289         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5290                 tunnel = flow_actions_to_tunnel(actions);
5291                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5292         } else {
5293                 tunnel = NULL;
5294         }
5295         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5296                                 (dev, tunnel, attr, items, actions);
5297         ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
5298         if (ret < 0)
5299                 return ret;
5300         is_root = (uint64_t)ret;
5301         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5302                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5303                 int type = items->type;
5304
5305                 if (!mlx5_flow_os_item_supported(type))
5306                         return rte_flow_error_set(error, ENOTSUP,
5307                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5308                                                   NULL, "item not supported");
5309                 switch (type) {
5310                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5311                         if (items[0].type != (typeof(items[0].type))
5312                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5313                                 return rte_flow_error_set
5314                                                 (error, EINVAL,
5315                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5316                                                 NULL, "MLX5 private items "
5317                                                 "must be the first");
5318                         break;
5319                 case RTE_FLOW_ITEM_TYPE_VOID:
5320                         break;
5321                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5322                         ret = flow_dv_validate_item_port_id
5323                                         (dev, items, attr, item_flags, error);
5324                         if (ret < 0)
5325                                 return ret;
5326                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5327                         break;
5328                 case RTE_FLOW_ITEM_TYPE_ETH:
5329                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5330                                                           true, error);
5331                         if (ret < 0)
5332                                 return ret;
5333                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5334                                              MLX5_FLOW_LAYER_OUTER_L2;
5335                         if (items->mask != NULL && items->spec != NULL) {
5336                                 ether_type =
5337                                         ((const struct rte_flow_item_eth *)
5338                                          items->spec)->type;
5339                                 ether_type &=
5340                                         ((const struct rte_flow_item_eth *)
5341                                          items->mask)->type;
5342                                 ether_type = rte_be_to_cpu_16(ether_type);
5343                         } else {
5344                                 ether_type = 0;
5345                         }
5346                         break;
5347                 case RTE_FLOW_ITEM_TYPE_VLAN:
5348                         ret = flow_dv_validate_item_vlan(items, item_flags,
5349                                                          dev, error);
5350                         if (ret < 0)
5351                                 return ret;
5352                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5353                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5354                         if (items->mask != NULL && items->spec != NULL) {
5355                                 ether_type =
5356                                         ((const struct rte_flow_item_vlan *)
5357                                          items->spec)->inner_type;
5358                                 ether_type &=
5359                                         ((const struct rte_flow_item_vlan *)
5360                                          items->mask)->inner_type;
5361                                 ether_type = rte_be_to_cpu_16(ether_type);
5362                         } else {
5363                                 ether_type = 0;
5364                         }
5365                         /* Store outer VLAN mask for of_push_vlan action. */
5366                         if (!tunnel)
5367                                 vlan_m = items->mask;
5368                         break;
5369                 case RTE_FLOW_ITEM_TYPE_IPV4:
5370                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5371                                                   &item_flags, &tunnel);
5372                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5373                                                          last_item, ether_type,
5374                                                          error);
5375                         if (ret < 0)
5376                                 return ret;
5377                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5378                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5379                         if (items->mask != NULL &&
5380                             ((const struct rte_flow_item_ipv4 *)
5381                              items->mask)->hdr.next_proto_id) {
5382                                 next_protocol =
5383                                         ((const struct rte_flow_item_ipv4 *)
5384                                          (items->spec))->hdr.next_proto_id;
5385                                 next_protocol &=
5386                                         ((const struct rte_flow_item_ipv4 *)
5387                                          (items->mask))->hdr.next_proto_id;
5388                         } else {
5389                                 /* Reset for inner layer. */
5390                                 next_protocol = 0xff;
5391                         }
5392                         break;
5393                 case RTE_FLOW_ITEM_TYPE_IPV6:
5394                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5395                                                   &item_flags, &tunnel);
5396                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5397                                                            last_item,
5398                                                            ether_type,
5399                                                            &nic_ipv6_mask,
5400                                                            error);
5401                         if (ret < 0)
5402                                 return ret;
5403                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5404                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5405                         if (items->mask != NULL &&
5406                             ((const struct rte_flow_item_ipv6 *)
5407                              items->mask)->hdr.proto) {
5408                                 item_ipv6_proto =
5409                                         ((const struct rte_flow_item_ipv6 *)
5410                                          items->spec)->hdr.proto;
5411                                 next_protocol =
5412                                         ((const struct rte_flow_item_ipv6 *)
5413                                          items->spec)->hdr.proto;
5414                                 next_protocol &=
5415                                         ((const struct rte_flow_item_ipv6 *)
5416                                          items->mask)->hdr.proto;
5417                         } else {
5418                                 /* Reset for inner layer. */
5419                                 next_protocol = 0xff;
5420                         }
5421                         break;
5422                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5423                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5424                                                                   item_flags,
5425                                                                   error);
5426                         if (ret < 0)
5427                                 return ret;
5428                         last_item = tunnel ?
5429                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5430                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5431                         if (items->mask != NULL &&
5432                             ((const struct rte_flow_item_ipv6_frag_ext *)
5433                              items->mask)->hdr.next_header) {
5434                                 next_protocol =
5435                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5436                                  items->spec)->hdr.next_header;
5437                                 next_protocol &=
5438                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5439                                  items->mask)->hdr.next_header;
5440                         } else {
5441                                 /* Reset for inner layer. */
5442                                 next_protocol = 0xff;
5443                         }
5444                         break;
5445                 case RTE_FLOW_ITEM_TYPE_TCP:
5446                         ret = mlx5_flow_validate_item_tcp
5447                                                 (items, item_flags,
5448                                                  next_protocol,
5449                                                  &nic_tcp_mask,
5450                                                  error);
5451                         if (ret < 0)
5452                                 return ret;
5453                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5454                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5455                         break;
5456                 case RTE_FLOW_ITEM_TYPE_UDP:
5457                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5458                                                           next_protocol,
5459                                                           error);
5460                         if (ret < 0)
5461                                 return ret;
5462                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5463                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5464                         break;
5465                 case RTE_FLOW_ITEM_TYPE_GRE:
5466                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5467                                                           next_protocol, error);
5468                         if (ret < 0)
5469                                 return ret;
5470                         gre_item = items;
5471                         last_item = MLX5_FLOW_LAYER_GRE;
5472                         break;
5473                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5474                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5475                                                             next_protocol,
5476                                                             error);
5477                         if (ret < 0)
5478                                 return ret;
5479                         last_item = MLX5_FLOW_LAYER_NVGRE;
5480                         break;
5481                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5482                         ret = mlx5_flow_validate_item_gre_key
5483                                 (items, item_flags, gre_item, error);
5484                         if (ret < 0)
5485                                 return ret;
5486                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5487                         break;
5488                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5489                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5490                                                             error);
5491                         if (ret < 0)
5492                                 return ret;
5493                         last_item = MLX5_FLOW_LAYER_VXLAN;
5494                         break;
5495                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5496                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5497                                                                 item_flags, dev,
5498                                                                 error);
5499                         if (ret < 0)
5500                                 return ret;
5501                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5502                         break;
5503                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5504                         ret = mlx5_flow_validate_item_geneve(items,
5505                                                              item_flags, dev,
5506                                                              error);
5507                         if (ret < 0)
5508                                 return ret;
5509                         last_item = MLX5_FLOW_LAYER_GENEVE;
5510                         break;
5511                 case RTE_FLOW_ITEM_TYPE_MPLS:
5512                         ret = mlx5_flow_validate_item_mpls(dev, items,
5513                                                            item_flags,
5514                                                            last_item, error);
5515                         if (ret < 0)
5516                                 return ret;
5517                         last_item = MLX5_FLOW_LAYER_MPLS;
5518                         break;
5519
5520                 case RTE_FLOW_ITEM_TYPE_MARK:
5521                         ret = flow_dv_validate_item_mark(dev, items, attr,
5522                                                          error);
5523                         if (ret < 0)
5524                                 return ret;
5525                         last_item = MLX5_FLOW_ITEM_MARK;
5526                         break;
5527                 case RTE_FLOW_ITEM_TYPE_META:
5528                         ret = flow_dv_validate_item_meta(dev, items, attr,
5529                                                          error);
5530                         if (ret < 0)
5531                                 return ret;
5532                         last_item = MLX5_FLOW_ITEM_METADATA;
5533                         break;
5534                 case RTE_FLOW_ITEM_TYPE_ICMP:
5535                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5536                                                            next_protocol,
5537                                                            error);
5538                         if (ret < 0)
5539                                 return ret;
5540                         last_item = MLX5_FLOW_LAYER_ICMP;
5541                         break;
5542                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5543                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5544                                                             next_protocol,
5545                                                             error);
5546                         if (ret < 0)
5547                                 return ret;
5548                         item_ipv6_proto = IPPROTO_ICMPV6;
5549                         last_item = MLX5_FLOW_LAYER_ICMP6;
5550                         break;
5551                 case RTE_FLOW_ITEM_TYPE_TAG:
5552                         ret = flow_dv_validate_item_tag(dev, items,
5553                                                         attr, error);
5554                         if (ret < 0)
5555                                 return ret;
5556                         last_item = MLX5_FLOW_ITEM_TAG;
5557                         break;
5558                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5559                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5560                         break;
5561                 case RTE_FLOW_ITEM_TYPE_GTP:
5562                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5563                                                         error);
5564                         if (ret < 0)
5565                                 return ret;
5566                         last_item = MLX5_FLOW_LAYER_GTP;
5567                         break;
5568                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5569                         /* Capacity will be checked in the translate stage. */
5570                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5571                                                             last_item,
5572                                                             ether_type,
5573                                                             &nic_ecpri_mask,
5574                                                             error);
5575                         if (ret < 0)
5576                                 return ret;
5577                         last_item = MLX5_FLOW_LAYER_ECPRI;
5578                         break;
5579                 default:
5580                         return rte_flow_error_set(error, ENOTSUP,
5581                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5582                                                   NULL, "item not supported");
5583                 }
5584                 item_flags |= last_item;
5585         }
5586         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5587                 int type = actions->type;
5588
5589                 if (!mlx5_flow_os_action_supported(type))
5590                         return rte_flow_error_set(error, ENOTSUP,
5591                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5592                                                   actions,
5593                                                   "action not supported");
5594                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5595                         return rte_flow_error_set(error, ENOTSUP,
5596                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5597                                                   actions, "too many actions");
5598                 switch (type) {
5599                 case RTE_FLOW_ACTION_TYPE_VOID:
5600                         break;
5601                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5602                         ret = flow_dv_validate_action_port_id(dev,
5603                                                               action_flags,
5604                                                               actions,
5605                                                               attr,
5606                                                               error);
5607                         if (ret)
5608                                 return ret;
5609                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5610                         ++actions_n;
5611                         break;
5612                 case RTE_FLOW_ACTION_TYPE_FLAG:
5613                         ret = flow_dv_validate_action_flag(dev, action_flags,
5614                                                            attr, error);
5615                         if (ret < 0)
5616                                 return ret;
5617                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5618                                 /* Count all modify-header actions as one. */
5619                                 if (!(action_flags &
5620                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5621                                         ++actions_n;
5622                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5623                                                 MLX5_FLOW_ACTION_MARK_EXT;
5624                         } else {
5625                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5626                                 ++actions_n;
5627                         }
5628                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5629                         break;
5630                 case RTE_FLOW_ACTION_TYPE_MARK:
5631                         ret = flow_dv_validate_action_mark(dev, actions,
5632                                                            action_flags,
5633                                                            attr, error);
5634                         if (ret < 0)
5635                                 return ret;
5636                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5637                                 /* Count all modify-header actions as one. */
5638                                 if (!(action_flags &
5639                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5640                                         ++actions_n;
5641                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5642                                                 MLX5_FLOW_ACTION_MARK_EXT;
5643                         } else {
5644                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5645                                 ++actions_n;
5646                         }
5647                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5648                         break;
5649                 case RTE_FLOW_ACTION_TYPE_SET_META:
5650                         ret = flow_dv_validate_action_set_meta(dev, actions,
5651                                                                action_flags,
5652                                                                attr, error);
5653                         if (ret < 0)
5654                                 return ret;
5655                         /* Count all modify-header actions as one action. */
5656                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5657                                 ++actions_n;
5658                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5659                         rw_act_num += MLX5_ACT_NUM_SET_META;
5660                         break;
5661                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5662                         ret = flow_dv_validate_action_set_tag(dev, actions,
5663                                                               action_flags,
5664                                                               attr, error);
5665                         if (ret < 0)
5666                                 return ret;
5667                         /* Count all modify-header actions as one action. */
5668                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5669                                 ++actions_n;
5670                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5671                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5672                         break;
5673                 case RTE_FLOW_ACTION_TYPE_DROP:
5674                         ret = mlx5_flow_validate_action_drop(action_flags,
5675                                                              attr, error);
5676                         if (ret < 0)
5677                                 return ret;
5678                         action_flags |= MLX5_FLOW_ACTION_DROP;
5679                         ++actions_n;
5680                         break;
5681                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5682                         ret = mlx5_flow_validate_action_queue(actions,
5683                                                               action_flags, dev,
5684                                                               attr, error);
5685                         if (ret < 0)
5686                                 return ret;
5687                         queue_index = ((const struct rte_flow_action_queue *)
5688                                                         (actions->conf))->index;
5689                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5690                         ++actions_n;
5691                         break;
5692                 case RTE_FLOW_ACTION_TYPE_RSS:
5693                         rss = actions->conf;
5694                         ret = mlx5_flow_validate_action_rss(actions,
5695                                                             action_flags, dev,
5696                                                             attr, item_flags,
5697                                                             error);
5698                         if (ret < 0)
5699                                 return ret;
5700                         if (rss != NULL && rss->queue_num)
5701                                 queue_index = rss->queue[0];
5702                         action_flags |= MLX5_FLOW_ACTION_RSS;
5703                         ++actions_n;
5704                         break;
5705                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5706                         ret =
5707                         mlx5_flow_validate_action_default_miss(action_flags,
5708                                         attr, error);
5709                         if (ret < 0)
5710                                 return ret;
5711                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5712                         ++actions_n;
5713                         break;
5714                 case RTE_FLOW_ACTION_TYPE_COUNT:
5715                         ret = flow_dv_validate_action_count(dev, error);
5716                         if (ret < 0)
5717                                 return ret;
5718                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5719                         ++actions_n;
5720                         break;
5721                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5722                         if (flow_dv_validate_action_pop_vlan(dev,
5723                                                              action_flags,
5724                                                              actions,
5725                                                              item_flags, attr,
5726                                                              error))
5727                                 return -rte_errno;
5728                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5729                         ++actions_n;
5730                         break;
5731                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5732                         ret = flow_dv_validate_action_push_vlan(dev,
5733                                                                 action_flags,
5734                                                                 vlan_m,
5735                                                                 actions, attr,
5736                                                                 error);
5737                         if (ret < 0)
5738                                 return ret;
5739                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5740                         ++actions_n;
5741                         break;
5742                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5743                         ret = flow_dv_validate_action_set_vlan_pcp
5744                                                 (action_flags, actions, error);
5745                         if (ret < 0)
5746                                 return ret;
5747                         /* Count PCP with push_vlan command. */
5748                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5749                         break;
5750                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5751                         ret = flow_dv_validate_action_set_vlan_vid
5752                                                 (item_flags, action_flags,
5753                                                  actions, error);
5754                         if (ret < 0)
5755                                 return ret;
5756                         /* Count VID with push_vlan command. */
5757                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5758                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5759                         break;
5760                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5761                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5762                         ret = flow_dv_validate_action_l2_encap(dev,
5763                                                                action_flags,
5764                                                                actions, attr,
5765                                                                error);
5766                         if (ret < 0)
5767                                 return ret;
5768                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5769                         ++actions_n;
5770                         break;
5771                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5772                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5773                         ret = flow_dv_validate_action_decap(dev, action_flags,
5774                                                             attr, error);
5775                         if (ret < 0)
5776                                 return ret;
5777                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5778                         ++actions_n;
5779                         break;
5780                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5781                         ret = flow_dv_validate_action_raw_encap_decap
5782                                 (dev, NULL, actions->conf, attr, &action_flags,
5783                                  &actions_n, error);
5784                         if (ret < 0)
5785                                 return ret;
5786                         break;
5787                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5788                         decap = actions->conf;
5789                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5790                                 ;
5791                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5792                                 encap = NULL;
5793                                 actions--;
5794                         } else {
5795                                 encap = actions->conf;
5796                         }
5797                         ret = flow_dv_validate_action_raw_encap_decap
5798                                            (dev,
5799                                             decap ? decap : &empty_decap, encap,
5800                                             attr, &action_flags, &actions_n,
5801                                             error);
5802                         if (ret < 0)
5803                                 return ret;
5804                         break;
5805                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5806                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5807                         ret = flow_dv_validate_action_modify_mac(action_flags,
5808                                                                  actions,
5809                                                                  item_flags,
5810                                                                  error);
5811                         if (ret < 0)
5812                                 return ret;
5813                         /* Count all modify-header actions as one action. */
5814                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5815                                 ++actions_n;
5816                         action_flags |= actions->type ==
5817                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5818                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5819                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5820                         /*
5821                          * Even if the source and destination MAC addresses have
5822                          * overlap in the header with 4B alignment, the convert
5823                          * function will handle them separately and 4 SW actions
5824                          * will be created. And 2 actions will be added each
5825                          * time no matter how many bytes of address will be set.
5826                          */
5827                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5828                         break;
5829                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5830                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5831                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5832                                                                   actions,
5833                                                                   item_flags,
5834                                                                   error);
5835                         if (ret < 0)
5836                                 return ret;
5837                         /* Count all modify-header actions as one action. */
5838                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5839                                 ++actions_n;
5840                         action_flags |= actions->type ==
5841                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5842                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5843                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5844                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5845                         break;
5846                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5847                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5848                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5849                                                                   actions,
5850                                                                   item_flags,
5851                                                                   error);
5852                         if (ret < 0)
5853                                 return ret;
5854                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5855                                 return rte_flow_error_set(error, ENOTSUP,
5856                                         RTE_FLOW_ERROR_TYPE_ACTION,
5857                                         actions,
5858                                         "Can't change header "
5859                                         "with ICMPv6 proto");
5860                         /* Count all modify-header actions as one action. */
5861                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5862                                 ++actions_n;
5863                         action_flags |= actions->type ==
5864                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5865                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5866                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5867                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5868                         break;
5869                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5870                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5871                         ret = flow_dv_validate_action_modify_tp(action_flags,
5872                                                                 actions,
5873                                                                 item_flags,
5874                                                                 error);
5875                         if (ret < 0)
5876                                 return ret;
5877                         /* Count all modify-header actions as one action. */
5878                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5879                                 ++actions_n;
5880                         action_flags |= actions->type ==
5881                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5882                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5883                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5884                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5885                         break;
5886                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5887                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5888                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5889                                                                  actions,
5890                                                                  item_flags,
5891                                                                  error);
5892                         if (ret < 0)
5893                                 return ret;
5894                         /* Count all modify-header actions as one action. */
5895                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5896                                 ++actions_n;
5897                         action_flags |= actions->type ==
5898                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5899                                                 MLX5_FLOW_ACTION_SET_TTL :
5900                                                 MLX5_FLOW_ACTION_DEC_TTL;
5901                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5902                         break;
5903                 case RTE_FLOW_ACTION_TYPE_JUMP:
5904                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5905                                                            action_flags,
5906                                                            attr, external,
5907                                                            error);
5908                         if (ret)
5909                                 return ret;
5910                         ++actions_n;
5911                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5912                         break;
5913                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5914                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5915                         ret = flow_dv_validate_action_modify_tcp_seq
5916                                                                 (action_flags,
5917                                                                  actions,
5918                                                                  item_flags,
5919                                                                  error);
5920                         if (ret < 0)
5921                                 return ret;
5922                         /* Count all modify-header actions as one action. */
5923                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5924                                 ++actions_n;
5925                         action_flags |= actions->type ==
5926                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5927                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5928                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5929                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5930                         break;
5931                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5932                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5933                         ret = flow_dv_validate_action_modify_tcp_ack
5934                                                                 (action_flags,
5935                                                                  actions,
5936                                                                  item_flags,
5937                                                                  error);
5938                         if (ret < 0)
5939                                 return ret;
5940                         /* Count all modify-header actions as one action. */
5941                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5942                                 ++actions_n;
5943                         action_flags |= actions->type ==
5944                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5945                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5946                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5947                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5948                         break;
5949                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5950                         break;
5951                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5952                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5953                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5954                         break;
5955                 case RTE_FLOW_ACTION_TYPE_METER:
5956                         ret = mlx5_flow_validate_action_meter(dev,
5957                                                               action_flags,
5958                                                               actions, attr,
5959                                                               error);
5960                         if (ret < 0)
5961                                 return ret;
5962                         action_flags |= MLX5_FLOW_ACTION_METER;
5963                         ++actions_n;
5964                         /* Meter action will add one more TAG action. */
5965                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5966                         break;
5967                 case RTE_FLOW_ACTION_TYPE_AGE:
5968                         ret = flow_dv_validate_action_age(action_flags,
5969                                                           actions, dev,
5970                                                           error);
5971                         if (ret < 0)
5972                                 return ret;
5973                         action_flags |= MLX5_FLOW_ACTION_AGE;
5974                         ++actions_n;
5975                         break;
5976                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5977                         ret = flow_dv_validate_action_modify_ipv4_dscp
5978                                                          (action_flags,
5979                                                           actions,
5980                                                           item_flags,
5981                                                           error);
5982                         if (ret < 0)
5983                                 return ret;
5984                         /* Count all modify-header actions as one action. */
5985                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5986                                 ++actions_n;
5987                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5988                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5989                         break;
5990                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5991                         ret = flow_dv_validate_action_modify_ipv6_dscp
5992                                                                 (action_flags,
5993                                                                  actions,
5994                                                                  item_flags,
5995                                                                  error);
5996                         if (ret < 0)
5997                                 return ret;
5998                         /* Count all modify-header actions as one action. */
5999                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6000                                 ++actions_n;
6001                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6002                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6003                         break;
6004                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6005                         ret = flow_dv_validate_action_sample(action_flags,
6006                                                              actions, dev,
6007                                                              attr, error);
6008                         if (ret < 0)
6009                                 return ret;
6010                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6011                         ++actions_n;
6012                         break;
6013                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6014                         if (actions[0].type != (typeof(actions[0].type))
6015                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6016                                 return rte_flow_error_set
6017                                                 (error, EINVAL,
6018                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6019                                                 NULL, "MLX5 private action "
6020                                                 "must be the first");
6021
6022                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6023                         break;
6024                 default:
6025                         return rte_flow_error_set(error, ENOTSUP,
6026                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6027                                                   actions,
6028                                                   "action not supported");
6029                 }
6030         }
6031         /*
6032          * Validate actions in flow rules
6033          * - Explicit decap action is prohibited by the tunnel offload API.
6034          * - Drop action in tunnel steer rule is prohibited by the API.
6035          * - Application cannot use MARK action because it's value can mask
6036          *   tunnel default miss nitification.
6037          * - JUMP in tunnel match rule has no support in current PMD
6038          *   implementation.
6039          * - TAG & META are reserved for future uses.
6040          */
6041         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6042                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6043                                             MLX5_FLOW_ACTION_MARK     |
6044                                             MLX5_FLOW_ACTION_SET_TAG  |
6045                                             MLX5_FLOW_ACTION_SET_META |
6046                                             MLX5_FLOW_ACTION_DROP;
6047
6048                 if (action_flags & bad_actions_mask)
6049                         return rte_flow_error_set
6050                                         (error, EINVAL,
6051                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6052                                         "Invalid RTE action in tunnel "
6053                                         "set decap rule");
6054                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6055                         return rte_flow_error_set
6056                                         (error, EINVAL,
6057                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6058                                         "tunnel set decap rule must terminate "
6059                                         "with JUMP");
6060                 if (!attr->ingress)
6061                         return rte_flow_error_set
6062                                         (error, EINVAL,
6063                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6064                                         "tunnel flows for ingress traffic only");
6065         }
6066         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6067                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6068                                             MLX5_FLOW_ACTION_MARK    |
6069                                             MLX5_FLOW_ACTION_SET_TAG |
6070                                             MLX5_FLOW_ACTION_SET_META;
6071
6072                 if (action_flags & bad_actions_mask)
6073                         return rte_flow_error_set
6074                                         (error, EINVAL,
6075                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6076                                         "Invalid RTE action in tunnel "
6077                                         "set match rule");
6078         }
6079         /*
6080          * Validate the drop action mutual exclusion with other actions.
6081          * Drop action is mutually-exclusive with any other action, except for
6082          * Count action.
6083          */
6084         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6085             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6086                 return rte_flow_error_set(error, EINVAL,
6087                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6088                                           "Drop action is mutually-exclusive "
6089                                           "with any other action, except for "
6090                                           "Count action");
6091         /* Eswitch has few restrictions on using items and actions */
6092         if (attr->transfer) {
6093                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6094                     action_flags & MLX5_FLOW_ACTION_FLAG)
6095                         return rte_flow_error_set(error, ENOTSUP,
6096                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6097                                                   NULL,
6098                                                   "unsupported action FLAG");
6099                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6100                     action_flags & MLX5_FLOW_ACTION_MARK)
6101                         return rte_flow_error_set(error, ENOTSUP,
6102                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6103                                                   NULL,
6104                                                   "unsupported action MARK");
6105                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6106                         return rte_flow_error_set(error, ENOTSUP,
6107                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6108                                                   NULL,
6109                                                   "unsupported action QUEUE");
6110                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6111                         return rte_flow_error_set(error, ENOTSUP,
6112                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6113                                                   NULL,
6114                                                   "unsupported action RSS");
6115                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6116                         return rte_flow_error_set(error, EINVAL,
6117                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6118                                                   actions,
6119                                                   "no fate action is found");
6120         } else {
6121                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6122                         return rte_flow_error_set(error, EINVAL,
6123                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6124                                                   actions,
6125                                                   "no fate action is found");
6126         }
6127         /*
6128          * Continue validation for Xcap and VLAN actions.
6129          * If hairpin is working in explicit TX rule mode, there is no actions
6130          * splitting and the validation of hairpin ingress flow should be the
6131          * same as other standard flows.
6132          */
6133         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6134                              MLX5_FLOW_VLAN_ACTIONS)) &&
6135             (queue_index == 0xFFFF ||
6136              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6137              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6138              conf->tx_explicit != 0))) {
6139                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6140                     MLX5_FLOW_XCAP_ACTIONS)
6141                         return rte_flow_error_set(error, ENOTSUP,
6142                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6143                                                   NULL, "encap and decap "
6144                                                   "combination aren't supported");
6145                 if (!attr->transfer && attr->ingress) {
6146                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6147                                 return rte_flow_error_set
6148                                                 (error, ENOTSUP,
6149                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6150                                                  NULL, "encap is not supported"
6151                                                  " for ingress traffic");
6152                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6153                                 return rte_flow_error_set
6154                                                 (error, ENOTSUP,
6155                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6156                                                  NULL, "push VLAN action not "
6157                                                  "supported for ingress");
6158                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6159                                         MLX5_FLOW_VLAN_ACTIONS)
6160                                 return rte_flow_error_set
6161                                                 (error, ENOTSUP,
6162                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6163                                                  NULL, "no support for "
6164                                                  "multiple VLAN actions");
6165                 }
6166         }
6167         /*
6168          * Hairpin flow will add one more TAG action in TX implicit mode.
6169          * In TX explicit mode, there will be no hairpin flow ID.
6170          */
6171         if (hairpin > 0)
6172                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6173         /* extra metadata enabled: one more TAG action will be add. */
6174         if (dev_conf->dv_flow_en &&
6175             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6176             mlx5_flow_ext_mreg_supported(dev))
6177                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6178         if ((uint32_t)rw_act_num >
6179                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6180                 return rte_flow_error_set(error, ENOTSUP,
6181                                           RTE_FLOW_ERROR_TYPE_ACTION,
6182                                           NULL, "too many header modify"
6183                                           " actions to support");
6184         }
6185         return 0;
6186 }
6187
6188 /**
6189  * Internal preparation function. Allocates the DV flow size,
6190  * this size is constant.
6191  *
6192  * @param[in] dev
6193  *   Pointer to the rte_eth_dev structure.
6194  * @param[in] attr
6195  *   Pointer to the flow attributes.
6196  * @param[in] items
6197  *   Pointer to the list of items.
6198  * @param[in] actions
6199  *   Pointer to the list of actions.
6200  * @param[out] error
6201  *   Pointer to the error structure.
6202  *
6203  * @return
6204  *   Pointer to mlx5_flow object on success,
6205  *   otherwise NULL and rte_errno is set.
6206  */
6207 static struct mlx5_flow *
6208 flow_dv_prepare(struct rte_eth_dev *dev,
6209                 const struct rte_flow_attr *attr __rte_unused,
6210                 const struct rte_flow_item items[] __rte_unused,
6211                 const struct rte_flow_action actions[] __rte_unused,
6212                 struct rte_flow_error *error)
6213 {
6214         uint32_t handle_idx = 0;
6215         struct mlx5_flow *dev_flow;
6216         struct mlx5_flow_handle *dev_handle;
6217         struct mlx5_priv *priv = dev->data->dev_private;
6218         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6219
6220         MLX5_ASSERT(wks);
6221         /* In case of corrupting the memory. */
6222         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6223                 rte_flow_error_set(error, ENOSPC,
6224                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6225                                    "not free temporary device flow");
6226                 return NULL;
6227         }
6228         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6229                                    &handle_idx);
6230         if (!dev_handle) {
6231                 rte_flow_error_set(error, ENOMEM,
6232                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6233                                    "not enough memory to create flow handle");
6234                 return NULL;
6235         }
6236         MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6237         dev_flow = &wks->flows[wks->flow_idx++];
6238         dev_flow->handle = dev_handle;
6239         dev_flow->handle_idx = handle_idx;
6240         /*
6241          * In some old rdma-core releases, before continuing, a check of the
6242          * length of matching parameter will be done at first. It needs to use
6243          * the length without misc4 param. If the flow has misc4 support, then
6244          * the length needs to be adjusted accordingly. Each param member is
6245          * aligned with a 64B boundary naturally.
6246          */
6247         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6248                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6249         /*
6250          * The matching value needs to be cleared to 0 before using. In the
6251          * past, it will be automatically cleared when using rte_*alloc
6252          * API. The time consumption will be almost the same as before.
6253          */
6254         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6255         dev_flow->ingress = attr->ingress;
6256         dev_flow->dv.transfer = attr->transfer;
6257         return dev_flow;
6258 }
6259
6260 #ifdef RTE_LIBRTE_MLX5_DEBUG
6261 /**
6262  * Sanity check for match mask and value. Similar to check_valid_spec() in
6263  * kernel driver. If unmasked bit is present in value, it returns failure.
6264  *
6265  * @param match_mask
6266  *   pointer to match mask buffer.
6267  * @param match_value
6268  *   pointer to match value buffer.
6269  *
6270  * @return
6271  *   0 if valid, -EINVAL otherwise.
6272  */
6273 static int
6274 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6275 {
6276         uint8_t *m = match_mask;
6277         uint8_t *v = match_value;
6278         unsigned int i;
6279
6280         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6281                 if (v[i] & ~m[i]) {
6282                         DRV_LOG(ERR,
6283                                 "match_value differs from match_criteria"
6284                                 " %p[%u] != %p[%u]",
6285                                 match_value, i, match_mask, i);
6286                         return -EINVAL;
6287                 }
6288         }
6289         return 0;
6290 }
6291 #endif
6292
6293 /**
6294  * Add match of ip_version.
6295  *
6296  * @param[in] group
6297  *   Flow group.
6298  * @param[in] headers_v
6299  *   Values header pointer.
6300  * @param[in] headers_m
6301  *   Masks header pointer.
6302  * @param[in] ip_version
6303  *   The IP version to set.
6304  */
6305 static inline void
6306 flow_dv_set_match_ip_version(uint32_t group,
6307                              void *headers_v,
6308                              void *headers_m,
6309                              uint8_t ip_version)
6310 {
6311         if (group == 0)
6312                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6313         else
6314                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6315                          ip_version);
6316         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6317         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6318         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6319 }
6320
6321 /**
6322  * Add Ethernet item to matcher and to the value.
6323  *
6324  * @param[in, out] matcher
6325  *   Flow matcher.
6326  * @param[in, out] key
6327  *   Flow matcher value.
6328  * @param[in] item
6329  *   Flow pattern to translate.
6330  * @param[in] inner
6331  *   Item is inner pattern.
6332  */
6333 static void
6334 flow_dv_translate_item_eth(void *matcher, void *key,
6335                            const struct rte_flow_item *item, int inner,
6336                            uint32_t group)
6337 {
6338         const struct rte_flow_item_eth *eth_m = item->mask;
6339         const struct rte_flow_item_eth *eth_v = item->spec;
6340         const struct rte_flow_item_eth nic_mask = {
6341                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6342                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6343                 .type = RTE_BE16(0xffff),
6344                 .has_vlan = 0,
6345         };
6346         void *hdrs_m;
6347         void *hdrs_v;
6348         char *l24_v;
6349         unsigned int i;
6350
6351         if (!eth_v)
6352                 return;
6353         if (!eth_m)
6354                 eth_m = &nic_mask;
6355         if (inner) {
6356                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6357                                          inner_headers);
6358                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6359         } else {
6360                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6361                                          outer_headers);
6362                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6363         }
6364         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6365                &eth_m->dst, sizeof(eth_m->dst));
6366         /* The value must be in the range of the mask. */
6367         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6368         for (i = 0; i < sizeof(eth_m->dst); ++i)
6369                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6370         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6371                &eth_m->src, sizeof(eth_m->src));
6372         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6373         /* The value must be in the range of the mask. */
6374         for (i = 0; i < sizeof(eth_m->dst); ++i)
6375                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6376         /*
6377          * HW supports match on one Ethertype, the Ethertype following the last
6378          * VLAN tag of the packet (see PRM).
6379          * Set match on ethertype only if ETH header is not followed by VLAN.
6380          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6381          * ethertype, and use ip_version field instead.
6382          * eCPRI over Ether layer will use type value 0xAEFE.
6383          */
6384         if (eth_m->type == 0xFFFF) {
6385                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6386                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6387                 switch (eth_v->type) {
6388                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6389                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6390                         return;
6391                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6392                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6393                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6394                         return;
6395                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6396                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6397                         return;
6398                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6399                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6400                         return;
6401                 default:
6402                         break;
6403                 }
6404         }
6405         if (eth_m->has_vlan) {
6406                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6407                 if (eth_v->has_vlan) {
6408                         /*
6409                          * Here, when also has_more_vlan field in VLAN item is
6410                          * not set, only single-tagged packets will be matched.
6411                          */
6412                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6413                         return;
6414                 }
6415         }
6416         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6417                  rte_be_to_cpu_16(eth_m->type));
6418         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6419         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6420 }
6421
6422 /**
6423  * Add VLAN item to matcher and to the value.
6424  *
6425  * @param[in, out] dev_flow
6426  *   Flow descriptor.
6427  * @param[in, out] matcher
6428  *   Flow matcher.
6429  * @param[in, out] key
6430  *   Flow matcher value.
6431  * @param[in] item
6432  *   Flow pattern to translate.
6433  * @param[in] inner
6434  *   Item is inner pattern.
6435  */
6436 static void
6437 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6438                             void *matcher, void *key,
6439                             const struct rte_flow_item *item,
6440                             int inner, uint32_t group)
6441 {
6442         const struct rte_flow_item_vlan *vlan_m = item->mask;
6443         const struct rte_flow_item_vlan *vlan_v = item->spec;
6444         void *hdrs_m;
6445         void *hdrs_v;
6446         uint16_t tci_m;
6447         uint16_t tci_v;
6448
6449         if (inner) {
6450                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6451                                          inner_headers);
6452                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6453         } else {
6454                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6455                                          outer_headers);
6456                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6457                 /*
6458                  * This is workaround, masks are not supported,
6459                  * and pre-validated.
6460                  */
6461                 if (vlan_v)
6462                         dev_flow->handle->vf_vlan.tag =
6463                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6464         }
6465         /*
6466          * When VLAN item exists in flow, mark packet as tagged,
6467          * even if TCI is not specified.
6468          */
6469         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6470                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6471                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6472         }
6473         if (!vlan_v)
6474                 return;
6475         if (!vlan_m)
6476                 vlan_m = &rte_flow_item_vlan_mask;
6477         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6478         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6479         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6480         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6481         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6482         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6483         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6484         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6485         /*
6486          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6487          * ethertype, and use ip_version field instead.
6488          */
6489         if (vlan_m->inner_type == 0xFFFF) {
6490                 switch (vlan_v->inner_type) {
6491                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6492                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6493                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6494                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6495                         return;
6496                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6497                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6498                         return;
6499                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6500                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6501                         return;
6502                 default:
6503                         break;
6504                 }
6505         }
6506         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6507                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6508                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6509                 /* Only one vlan_tag bit can be set. */
6510                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6511                 return;
6512         }
6513         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6514                  rte_be_to_cpu_16(vlan_m->inner_type));
6515         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6516                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6517 }
6518
6519 /**
6520  * Add IPV4 item to matcher and to the value.
6521  *
6522  * @param[in, out] matcher
6523  *   Flow matcher.
6524  * @param[in, out] key
6525  *   Flow matcher value.
6526  * @param[in] item
6527  *   Flow pattern to translate.
6528  * @param[in] inner
6529  *   Item is inner pattern.
6530  * @param[in] group
6531  *   The group to insert the rule.
6532  */
6533 static void
6534 flow_dv_translate_item_ipv4(void *matcher, void *key,
6535                             const struct rte_flow_item *item,
6536                             int inner, uint32_t group)
6537 {
6538         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6539         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6540         const struct rte_flow_item_ipv4 nic_mask = {
6541                 .hdr = {
6542                         .src_addr = RTE_BE32(0xffffffff),
6543                         .dst_addr = RTE_BE32(0xffffffff),
6544                         .type_of_service = 0xff,
6545                         .next_proto_id = 0xff,
6546                         .time_to_live = 0xff,
6547                 },
6548         };
6549         void *headers_m;
6550         void *headers_v;
6551         char *l24_m;
6552         char *l24_v;
6553         uint8_t tos;
6554
6555         if (inner) {
6556                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6557                                          inner_headers);
6558                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6559         } else {
6560                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6561                                          outer_headers);
6562                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6563         }
6564         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6565         if (!ipv4_v)
6566                 return;
6567         if (!ipv4_m)
6568                 ipv4_m = &nic_mask;
6569         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6570                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6571         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6572                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6573         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6574         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6575         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6576                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6577         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6578                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6579         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6580         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6581         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6582         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6583                  ipv4_m->hdr.type_of_service);
6584         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6585         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6586                  ipv4_m->hdr.type_of_service >> 2);
6587         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6588         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6589                  ipv4_m->hdr.next_proto_id);
6590         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6591                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6592         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6593                  ipv4_m->hdr.time_to_live);
6594         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6595                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6596         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6597                  !!(ipv4_m->hdr.fragment_offset));
6598         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6599                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6600 }
6601
6602 /**
6603  * Add IPV6 item to matcher and to the value.
6604  *
6605  * @param[in, out] matcher
6606  *   Flow matcher.
6607  * @param[in, out] key
6608  *   Flow matcher value.
6609  * @param[in] item
6610  *   Flow pattern to translate.
6611  * @param[in] inner
6612  *   Item is inner pattern.
6613  * @param[in] group
6614  *   The group to insert the rule.
6615  */
6616 static void
6617 flow_dv_translate_item_ipv6(void *matcher, void *key,
6618                             const struct rte_flow_item *item,
6619                             int inner, uint32_t group)
6620 {
6621         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6622         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6623         const struct rte_flow_item_ipv6 nic_mask = {
6624                 .hdr = {
6625                         .src_addr =
6626                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6627                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6628                         .dst_addr =
6629                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6630                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6631                         .vtc_flow = RTE_BE32(0xffffffff),
6632                         .proto = 0xff,
6633                         .hop_limits = 0xff,
6634                 },
6635         };
6636         void *headers_m;
6637         void *headers_v;
6638         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6639         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6640         char *l24_m;
6641         char *l24_v;
6642         uint32_t vtc_m;
6643         uint32_t vtc_v;
6644         int i;
6645         int size;
6646
6647         if (inner) {
6648                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6649                                          inner_headers);
6650                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6651         } else {
6652                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6653                                          outer_headers);
6654                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6655         }
6656         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6657         if (!ipv6_v)
6658                 return;
6659         if (!ipv6_m)
6660                 ipv6_m = &nic_mask;
6661         size = sizeof(ipv6_m->hdr.dst_addr);
6662         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6663                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6664         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6665                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6666         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6667         for (i = 0; i < size; ++i)
6668                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6669         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6670                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6671         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6672                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6673         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6674         for (i = 0; i < size; ++i)
6675                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6676         /* TOS. */
6677         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6678         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6679         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6680         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6681         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6682         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6683         /* Label. */
6684         if (inner) {
6685                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6686                          vtc_m);
6687                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6688                          vtc_v);
6689         } else {
6690                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6691                          vtc_m);
6692                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6693                          vtc_v);
6694         }
6695         /* Protocol. */
6696         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6697                  ipv6_m->hdr.proto);
6698         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6699                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6700         /* Hop limit. */
6701         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6702                  ipv6_m->hdr.hop_limits);
6703         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6704                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6705         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6706                  !!(ipv6_m->has_frag_ext));
6707         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6708                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6709 }
6710
6711 /**
6712  * Add IPV6 fragment extension item to matcher and to the value.
6713  *
6714  * @param[in, out] matcher
6715  *   Flow matcher.
6716  * @param[in, out] key
6717  *   Flow matcher value.
6718  * @param[in] item
6719  *   Flow pattern to translate.
6720  * @param[in] inner
6721  *   Item is inner pattern.
6722  */
6723 static void
6724 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6725                                      const struct rte_flow_item *item,
6726                                      int inner)
6727 {
6728         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6729         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6730         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6731                 .hdr = {
6732                         .next_header = 0xff,
6733                         .frag_data = RTE_BE16(0xffff),
6734                 },
6735         };
6736         void *headers_m;
6737         void *headers_v;
6738
6739         if (inner) {
6740                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6741                                          inner_headers);
6742                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6743         } else {
6744                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6745                                          outer_headers);
6746                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6747         }
6748         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6749         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6750         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6751         if (!ipv6_frag_ext_v)
6752                 return;
6753         if (!ipv6_frag_ext_m)
6754                 ipv6_frag_ext_m = &nic_mask;
6755         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6756                  ipv6_frag_ext_m->hdr.next_header);
6757         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6758                  ipv6_frag_ext_v->hdr.next_header &
6759                  ipv6_frag_ext_m->hdr.next_header);
6760 }
6761
6762 /**
6763  * Add TCP item to matcher and to the value.
6764  *
6765  * @param[in, out] matcher
6766  *   Flow matcher.
6767  * @param[in, out] key
6768  *   Flow matcher value.
6769  * @param[in] item
6770  *   Flow pattern to translate.
6771  * @param[in] inner
6772  *   Item is inner pattern.
6773  */
6774 static void
6775 flow_dv_translate_item_tcp(void *matcher, void *key,
6776                            const struct rte_flow_item *item,
6777                            int inner)
6778 {
6779         const struct rte_flow_item_tcp *tcp_m = item->mask;
6780         const struct rte_flow_item_tcp *tcp_v = item->spec;
6781         void *headers_m;
6782         void *headers_v;
6783
6784         if (inner) {
6785                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6786                                          inner_headers);
6787                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6788         } else {
6789                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6790                                          outer_headers);
6791                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6792         }
6793         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6794         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6795         if (!tcp_v)
6796                 return;
6797         if (!tcp_m)
6798                 tcp_m = &rte_flow_item_tcp_mask;
6799         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6800                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6801         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6802                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6803         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6804                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6805         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6806                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6807         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6808                  tcp_m->hdr.tcp_flags);
6809         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6810                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6811 }
6812
6813 /**
6814  * Add UDP item to matcher and to the value.
6815  *
6816  * @param[in, out] matcher
6817  *   Flow matcher.
6818  * @param[in, out] key
6819  *   Flow matcher value.
6820  * @param[in] item
6821  *   Flow pattern to translate.
6822  * @param[in] inner
6823  *   Item is inner pattern.
6824  */
6825 static void
6826 flow_dv_translate_item_udp(void *matcher, void *key,
6827                            const struct rte_flow_item *item,
6828                            int inner)
6829 {
6830         const struct rte_flow_item_udp *udp_m = item->mask;
6831         const struct rte_flow_item_udp *udp_v = item->spec;
6832         void *headers_m;
6833         void *headers_v;
6834
6835         if (inner) {
6836                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6837                                          inner_headers);
6838                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6839         } else {
6840                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6841                                          outer_headers);
6842                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6843         }
6844         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6845         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6846         if (!udp_v)
6847                 return;
6848         if (!udp_m)
6849                 udp_m = &rte_flow_item_udp_mask;
6850         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6851                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6852         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6853                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6854         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6855                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6856         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6857                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6858 }
6859
6860 /**
6861  * Add GRE optional Key item to matcher and to the value.
6862  *
6863  * @param[in, out] matcher
6864  *   Flow matcher.
6865  * @param[in, out] key
6866  *   Flow matcher value.
6867  * @param[in] item
6868  *   Flow pattern to translate.
6869  * @param[in] inner
6870  *   Item is inner pattern.
6871  */
6872 static void
6873 flow_dv_translate_item_gre_key(void *matcher, void *key,
6874                                    const struct rte_flow_item *item)
6875 {
6876         const rte_be32_t *key_m = item->mask;
6877         const rte_be32_t *key_v = item->spec;
6878         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6879         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6880         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6881
6882         /* GRE K bit must be on and should already be validated */
6883         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6884         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6885         if (!key_v)
6886                 return;
6887         if (!key_m)
6888                 key_m = &gre_key_default_mask;
6889         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6890                  rte_be_to_cpu_32(*key_m) >> 8);
6891         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6892                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6893         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6894                  rte_be_to_cpu_32(*key_m) & 0xFF);
6895         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6896                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6897 }
6898
6899 /**
6900  * Add GRE item to matcher and to the value.
6901  *
6902  * @param[in, out] matcher
6903  *   Flow matcher.
6904  * @param[in, out] key
6905  *   Flow matcher value.
6906  * @param[in] item
6907  *   Flow pattern to translate.
6908  * @param[in] inner
6909  *   Item is inner pattern.
6910  */
6911 static void
6912 flow_dv_translate_item_gre(void *matcher, void *key,
6913                            const struct rte_flow_item *item,
6914                            int inner)
6915 {
6916         const struct rte_flow_item_gre *gre_m = item->mask;
6917         const struct rte_flow_item_gre *gre_v = item->spec;
6918         void *headers_m;
6919         void *headers_v;
6920         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6921         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6922         struct {
6923                 union {
6924                         __extension__
6925                         struct {
6926                                 uint16_t version:3;
6927                                 uint16_t rsvd0:9;
6928                                 uint16_t s_present:1;
6929                                 uint16_t k_present:1;
6930                                 uint16_t rsvd_bit1:1;
6931                                 uint16_t c_present:1;
6932                         };
6933                         uint16_t value;
6934                 };
6935         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6936
6937         if (inner) {
6938                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6939                                          inner_headers);
6940                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6941         } else {
6942                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6943                                          outer_headers);
6944                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6945         }
6946         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6947         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6948         if (!gre_v)
6949                 return;
6950         if (!gre_m)
6951                 gre_m = &rte_flow_item_gre_mask;
6952         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6953                  rte_be_to_cpu_16(gre_m->protocol));
6954         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6955                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6956         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6957         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6958         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6959                  gre_crks_rsvd0_ver_m.c_present);
6960         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6961                  gre_crks_rsvd0_ver_v.c_present &
6962                  gre_crks_rsvd0_ver_m.c_present);
6963         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6964                  gre_crks_rsvd0_ver_m.k_present);
6965         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6966                  gre_crks_rsvd0_ver_v.k_present &
6967                  gre_crks_rsvd0_ver_m.k_present);
6968         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6969                  gre_crks_rsvd0_ver_m.s_present);
6970         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6971                  gre_crks_rsvd0_ver_v.s_present &
6972                  gre_crks_rsvd0_ver_m.s_present);
6973 }
6974
6975 /**
6976  * Add NVGRE item to matcher and to the value.
6977  *
6978  * @param[in, out] matcher
6979  *   Flow matcher.
6980  * @param[in, out] key
6981  *   Flow matcher value.
6982  * @param[in] item
6983  *   Flow pattern to translate.
6984  * @param[in] inner
6985  *   Item is inner pattern.
6986  */
6987 static void
6988 flow_dv_translate_item_nvgre(void *matcher, void *key,
6989                              const struct rte_flow_item *item,
6990                              int inner)
6991 {
6992         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6993         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6994         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6995         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6996         const char *tni_flow_id_m;
6997         const char *tni_flow_id_v;
6998         char *gre_key_m;
6999         char *gre_key_v;
7000         int size;
7001         int i;
7002
7003         /* For NVGRE, GRE header fields must be set with defined values. */
7004         const struct rte_flow_item_gre gre_spec = {
7005                 .c_rsvd0_ver = RTE_BE16(0x2000),
7006                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7007         };
7008         const struct rte_flow_item_gre gre_mask = {
7009                 .c_rsvd0_ver = RTE_BE16(0xB000),
7010                 .protocol = RTE_BE16(UINT16_MAX),
7011         };
7012         const struct rte_flow_item gre_item = {
7013                 .spec = &gre_spec,
7014                 .mask = &gre_mask,
7015                 .last = NULL,
7016         };
7017         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7018         if (!nvgre_v)
7019                 return;
7020         if (!nvgre_m)
7021                 nvgre_m = &rte_flow_item_nvgre_mask;
7022         tni_flow_id_m = (const char *)nvgre_m->tni;
7023         tni_flow_id_v = (const char *)nvgre_v->tni;
7024         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7025         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7026         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7027         memcpy(gre_key_m, tni_flow_id_m, size);
7028         for (i = 0; i < size; ++i)
7029                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7030 }
7031
7032 /**
7033  * Add VXLAN item to matcher and to the value.
7034  *
7035  * @param[in, out] matcher
7036  *   Flow matcher.
7037  * @param[in, out] key
7038  *   Flow matcher value.
7039  * @param[in] item
7040  *   Flow pattern to translate.
7041  * @param[in] inner
7042  *   Item is inner pattern.
7043  */
7044 static void
7045 flow_dv_translate_item_vxlan(void *matcher, void *key,
7046                              const struct rte_flow_item *item,
7047                              int inner)
7048 {
7049         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7050         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7051         void *headers_m;
7052         void *headers_v;
7053         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7054         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7055         char *vni_m;
7056         char *vni_v;
7057         uint16_t dport;
7058         int size;
7059         int i;
7060
7061         if (inner) {
7062                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7063                                          inner_headers);
7064                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7065         } else {
7066                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7067                                          outer_headers);
7068                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7069         }
7070         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7071                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7072         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7073                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7074                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7075         }
7076         if (!vxlan_v)
7077                 return;
7078         if (!vxlan_m)
7079                 vxlan_m = &rte_flow_item_vxlan_mask;
7080         size = sizeof(vxlan_m->vni);
7081         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7082         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7083         memcpy(vni_m, vxlan_m->vni, size);
7084         for (i = 0; i < size; ++i)
7085                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7086 }
7087
7088 /**
7089  * Add VXLAN-GPE item to matcher and to the value.
7090  *
7091  * @param[in, out] matcher
7092  *   Flow matcher.
7093  * @param[in, out] key
7094  *   Flow matcher value.
7095  * @param[in] item
7096  *   Flow pattern to translate.
7097  * @param[in] inner
7098  *   Item is inner pattern.
7099  */
7100
7101 static void
7102 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7103                                  const struct rte_flow_item *item, int inner)
7104 {
7105         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7106         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7107         void *headers_m;
7108         void *headers_v;
7109         void *misc_m =
7110                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7111         void *misc_v =
7112                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7113         char *vni_m;
7114         char *vni_v;
7115         uint16_t dport;
7116         int size;
7117         int i;
7118         uint8_t flags_m = 0xff;
7119         uint8_t flags_v = 0xc;
7120
7121         if (inner) {
7122                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7123                                          inner_headers);
7124                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7125         } else {
7126                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7127                                          outer_headers);
7128                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7129         }
7130         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7131                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7132         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7133                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7134                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7135         }
7136         if (!vxlan_v)
7137                 return;
7138         if (!vxlan_m)
7139                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7140         size = sizeof(vxlan_m->vni);
7141         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7142         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7143         memcpy(vni_m, vxlan_m->vni, size);
7144         for (i = 0; i < size; ++i)
7145                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7146         if (vxlan_m->flags) {
7147                 flags_m = vxlan_m->flags;
7148                 flags_v = vxlan_v->flags;
7149         }
7150         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7151         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7152         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7153                  vxlan_m->protocol);
7154         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7155                  vxlan_v->protocol);
7156 }
7157
7158 /**
7159  * Add Geneve item to matcher and to the value.
7160  *
7161  * @param[in, out] matcher
7162  *   Flow matcher.
7163  * @param[in, out] key
7164  *   Flow matcher value.
7165  * @param[in] item
7166  *   Flow pattern to translate.
7167  * @param[in] inner
7168  *   Item is inner pattern.
7169  */
7170
7171 static void
7172 flow_dv_translate_item_geneve(void *matcher, void *key,
7173                               const struct rte_flow_item *item, int inner)
7174 {
7175         const struct rte_flow_item_geneve *geneve_m = item->mask;
7176         const struct rte_flow_item_geneve *geneve_v = item->spec;
7177         void *headers_m;
7178         void *headers_v;
7179         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7180         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7181         uint16_t dport;
7182         uint16_t gbhdr_m;
7183         uint16_t gbhdr_v;
7184         char *vni_m;
7185         char *vni_v;
7186         size_t size, i;
7187
7188         if (inner) {
7189                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7190                                          inner_headers);
7191                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7192         } else {
7193                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7194                                          outer_headers);
7195                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7196         }
7197         dport = MLX5_UDP_PORT_GENEVE;
7198         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7199                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7200                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7201         }
7202         if (!geneve_v)
7203                 return;
7204         if (!geneve_m)
7205                 geneve_m = &rte_flow_item_geneve_mask;
7206         size = sizeof(geneve_m->vni);
7207         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7208         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7209         memcpy(vni_m, geneve_m->vni, size);
7210         for (i = 0; i < size; ++i)
7211                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7212         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7213                  rte_be_to_cpu_16(geneve_m->protocol));
7214         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7215                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7216         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7217         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7218         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7219                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7220         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7221                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7222         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7223                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7224         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7225                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7226                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7227 }
7228
7229 /**
7230  * Add MPLS item to matcher and to the value.
7231  *
7232  * @param[in, out] matcher
7233  *   Flow matcher.
7234  * @param[in, out] key
7235  *   Flow matcher value.
7236  * @param[in] item
7237  *   Flow pattern to translate.
7238  * @param[in] prev_layer
7239  *   The protocol layer indicated in previous item.
7240  * @param[in] inner
7241  *   Item is inner pattern.
7242  */
7243 static void
7244 flow_dv_translate_item_mpls(void *matcher, void *key,
7245                             const struct rte_flow_item *item,
7246                             uint64_t prev_layer,
7247                             int inner)
7248 {
7249         const uint32_t *in_mpls_m = item->mask;
7250         const uint32_t *in_mpls_v = item->spec;
7251         uint32_t *out_mpls_m = 0;
7252         uint32_t *out_mpls_v = 0;
7253         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7254         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7255         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7256                                      misc_parameters_2);
7257         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7258         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7259         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7260
7261         switch (prev_layer) {
7262         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7263                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7264                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7265                          MLX5_UDP_PORT_MPLS);
7266                 break;
7267         case MLX5_FLOW_LAYER_GRE:
7268                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7269                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7270                          RTE_ETHER_TYPE_MPLS);
7271                 break;
7272         default:
7273                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7274                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7275                          IPPROTO_MPLS);
7276                 break;
7277         }
7278         if (!in_mpls_v)
7279                 return;
7280         if (!in_mpls_m)
7281                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7282         switch (prev_layer) {
7283         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7284                 out_mpls_m =
7285                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7286                                                  outer_first_mpls_over_udp);
7287                 out_mpls_v =
7288                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7289                                                  outer_first_mpls_over_udp);
7290                 break;
7291         case MLX5_FLOW_LAYER_GRE:
7292                 out_mpls_m =
7293                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7294                                                  outer_first_mpls_over_gre);
7295                 out_mpls_v =
7296                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7297                                                  outer_first_mpls_over_gre);
7298                 break;
7299         default:
7300                 /* Inner MPLS not over GRE is not supported. */
7301                 if (!inner) {
7302                         out_mpls_m =
7303                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7304                                                          misc2_m,
7305                                                          outer_first_mpls);
7306                         out_mpls_v =
7307                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7308                                                          misc2_v,
7309                                                          outer_first_mpls);
7310                 }
7311                 break;
7312         }
7313         if (out_mpls_m && out_mpls_v) {
7314                 *out_mpls_m = *in_mpls_m;
7315                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7316         }
7317 }
7318
7319 /**
7320  * Add metadata register item to matcher
7321  *
7322  * @param[in, out] matcher
7323  *   Flow matcher.
7324  * @param[in, out] key
7325  *   Flow matcher value.
7326  * @param[in] reg_type
7327  *   Type of device metadata register
7328  * @param[in] value
7329  *   Register value
7330  * @param[in] mask
7331  *   Register mask
7332  */
7333 static void
7334 flow_dv_match_meta_reg(void *matcher, void *key,
7335                        enum modify_reg reg_type,
7336                        uint32_t data, uint32_t mask)
7337 {
7338         void *misc2_m =
7339                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7340         void *misc2_v =
7341                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7342         uint32_t temp;
7343
7344         data &= mask;
7345         switch (reg_type) {
7346         case REG_A:
7347                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7348                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7349                 break;
7350         case REG_B:
7351                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7352                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7353                 break;
7354         case REG_C_0:
7355                 /*
7356                  * The metadata register C0 field might be divided into
7357                  * source vport index and META item value, we should set
7358                  * this field according to specified mask, not as whole one.
7359                  */
7360                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7361                 temp |= mask;
7362                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7363                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7364                 temp &= ~mask;
7365                 temp |= data;
7366                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7367                 break;
7368         case REG_C_1:
7369                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7370                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7371                 break;
7372         case REG_C_2:
7373                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7374                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7375                 break;
7376         case REG_C_3:
7377                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7378                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7379                 break;
7380         case REG_C_4:
7381                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7382                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7383                 break;
7384         case REG_C_5:
7385                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7386                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7387                 break;
7388         case REG_C_6:
7389                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7390                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7391                 break;
7392         case REG_C_7:
7393                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7394                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7395                 break;
7396         default:
7397                 MLX5_ASSERT(false);
7398                 break;
7399         }
7400 }
7401
7402 /**
7403  * Add MARK item to matcher
7404  *
7405  * @param[in] dev
7406  *   The device to configure through.
7407  * @param[in, out] matcher
7408  *   Flow matcher.
7409  * @param[in, out] key
7410  *   Flow matcher value.
7411  * @param[in] item
7412  *   Flow pattern to translate.
7413  */
7414 static void
7415 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7416                             void *matcher, void *key,
7417                             const struct rte_flow_item *item)
7418 {
7419         struct mlx5_priv *priv = dev->data->dev_private;
7420         const struct rte_flow_item_mark *mark;
7421         uint32_t value;
7422         uint32_t mask;
7423
7424         mark = item->mask ? (const void *)item->mask :
7425                             &rte_flow_item_mark_mask;
7426         mask = mark->id & priv->sh->dv_mark_mask;
7427         mark = (const void *)item->spec;
7428         MLX5_ASSERT(mark);
7429         value = mark->id & priv->sh->dv_mark_mask & mask;
7430         if (mask) {
7431                 enum modify_reg reg;
7432
7433                 /* Get the metadata register index for the mark. */
7434                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7435                 MLX5_ASSERT(reg > 0);
7436                 if (reg == REG_C_0) {
7437                         struct mlx5_priv *priv = dev->data->dev_private;
7438                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7439                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7440
7441                         mask &= msk_c0;
7442                         mask <<= shl_c0;
7443                         value <<= shl_c0;
7444                 }
7445                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7446         }
7447 }
7448
7449 /**
7450  * Add META item to matcher
7451  *
7452  * @param[in] dev
7453  *   The devich to configure through.
7454  * @param[in, out] matcher
7455  *   Flow matcher.
7456  * @param[in, out] key
7457  *   Flow matcher value.
7458  * @param[in] attr
7459  *   Attributes of flow that includes this item.
7460  * @param[in] item
7461  *   Flow pattern to translate.
7462  */
7463 static void
7464 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7465                             void *matcher, void *key,
7466                             const struct rte_flow_attr *attr,
7467                             const struct rte_flow_item *item)
7468 {
7469         const struct rte_flow_item_meta *meta_m;
7470         const struct rte_flow_item_meta *meta_v;
7471
7472         meta_m = (const void *)item->mask;
7473         if (!meta_m)
7474                 meta_m = &rte_flow_item_meta_mask;
7475         meta_v = (const void *)item->spec;
7476         if (meta_v) {
7477                 int reg;
7478                 uint32_t value = meta_v->data;
7479                 uint32_t mask = meta_m->data;
7480
7481                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7482                 if (reg < 0)
7483                         return;
7484                 /*
7485                  * In datapath code there is no endianness
7486                  * coversions for perfromance reasons, all
7487                  * pattern conversions are done in rte_flow.
7488                  */
7489                 value = rte_cpu_to_be_32(value);
7490                 mask = rte_cpu_to_be_32(mask);
7491                 if (reg == REG_C_0) {
7492                         struct mlx5_priv *priv = dev->data->dev_private;
7493                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7494                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7495 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7496                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7497
7498                         value >>= shr_c0;
7499                         mask >>= shr_c0;
7500 #endif
7501                         value <<= shl_c0;
7502                         mask <<= shl_c0;
7503                         MLX5_ASSERT(msk_c0);
7504                         MLX5_ASSERT(!(~msk_c0 & mask));
7505                 }
7506                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7507         }
7508 }
7509
7510 /**
7511  * Add vport metadata Reg C0 item to matcher
7512  *
7513  * @param[in, out] matcher
7514  *   Flow matcher.
7515  * @param[in, out] key
7516  *   Flow matcher value.
7517  * @param[in] reg
7518  *   Flow pattern to translate.
7519  */
7520 static void
7521 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7522                                   uint32_t value, uint32_t mask)
7523 {
7524         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7525 }
7526
7527 /**
7528  * Add tag item to matcher
7529  *
7530  * @param[in] dev
7531  *   The devich to configure through.
7532  * @param[in, out] matcher
7533  *   Flow matcher.
7534  * @param[in, out] key
7535  *   Flow matcher value.
7536  * @param[in] item
7537  *   Flow pattern to translate.
7538  */
7539 static void
7540 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7541                                 void *matcher, void *key,
7542                                 const struct rte_flow_item *item)
7543 {
7544         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7545         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7546         uint32_t mask, value;
7547
7548         MLX5_ASSERT(tag_v);
7549         value = tag_v->data;
7550         mask = tag_m ? tag_m->data : UINT32_MAX;
7551         if (tag_v->id == REG_C_0) {
7552                 struct mlx5_priv *priv = dev->data->dev_private;
7553                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7554                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7555
7556                 mask &= msk_c0;
7557                 mask <<= shl_c0;
7558                 value <<= shl_c0;
7559         }
7560         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7561 }
7562
7563 /**
7564  * Add TAG item to matcher
7565  *
7566  * @param[in] dev
7567  *   The devich to configure through.
7568  * @param[in, out] matcher
7569  *   Flow matcher.
7570  * @param[in, out] key
7571  *   Flow matcher value.
7572  * @param[in] item
7573  *   Flow pattern to translate.
7574  */
7575 static void
7576 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7577                            void *matcher, void *key,
7578                            const struct rte_flow_item *item)
7579 {
7580         const struct rte_flow_item_tag *tag_v = item->spec;
7581         const struct rte_flow_item_tag *tag_m = item->mask;
7582         enum modify_reg reg;
7583
7584         MLX5_ASSERT(tag_v);
7585         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7586         /* Get the metadata register index for the tag. */
7587         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7588         MLX5_ASSERT(reg > 0);
7589         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7590 }
7591
7592 /**
7593  * Add source vport match to the specified matcher.
7594  *
7595  * @param[in, out] matcher
7596  *   Flow matcher.
7597  * @param[in, out] key
7598  *   Flow matcher value.
7599  * @param[in] port
7600  *   Source vport value to match
7601  * @param[in] mask
7602  *   Mask
7603  */
7604 static void
7605 flow_dv_translate_item_source_vport(void *matcher, void *key,
7606                                     int16_t port, uint16_t mask)
7607 {
7608         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7609         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7610
7611         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7612         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7613 }
7614
7615 /**
7616  * Translate port-id item to eswitch match on  port-id.
7617  *
7618  * @param[in] dev
7619  *   The devich to configure through.
7620  * @param[in, out] matcher
7621  *   Flow matcher.
7622  * @param[in, out] key
7623  *   Flow matcher value.
7624  * @param[in] item
7625  *   Flow pattern to translate.
7626  *
7627  * @return
7628  *   0 on success, a negative errno value otherwise.
7629  */
7630 static int
7631 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7632                                void *key, const struct rte_flow_item *item)
7633 {
7634         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7635         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7636         struct mlx5_priv *priv;
7637         uint16_t mask, id;
7638
7639         mask = pid_m ? pid_m->id : 0xffff;
7640         id = pid_v ? pid_v->id : dev->data->port_id;
7641         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7642         if (!priv)
7643                 return -rte_errno;
7644         /* Translate to vport field or to metadata, depending on mode. */
7645         if (priv->vport_meta_mask)
7646                 flow_dv_translate_item_meta_vport(matcher, key,
7647                                                   priv->vport_meta_tag,
7648                                                   priv->vport_meta_mask);
7649         else
7650                 flow_dv_translate_item_source_vport(matcher, key,
7651                                                     priv->vport_id, mask);
7652         return 0;
7653 }
7654
7655 /**
7656  * Add ICMP6 item to matcher and to the value.
7657  *
7658  * @param[in, out] matcher
7659  *   Flow matcher.
7660  * @param[in, out] key
7661  *   Flow matcher value.
7662  * @param[in] item
7663  *   Flow pattern to translate.
7664  * @param[in] inner
7665  *   Item is inner pattern.
7666  */
7667 static void
7668 flow_dv_translate_item_icmp6(void *matcher, void *key,
7669                               const struct rte_flow_item *item,
7670                               int inner)
7671 {
7672         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7673         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7674         void *headers_m;
7675         void *headers_v;
7676         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7677                                      misc_parameters_3);
7678         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7679         if (inner) {
7680                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7681                                          inner_headers);
7682                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7683         } else {
7684                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7685                                          outer_headers);
7686                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7687         }
7688         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7689         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7690         if (!icmp6_v)
7691                 return;
7692         if (!icmp6_m)
7693                 icmp6_m = &rte_flow_item_icmp6_mask;
7694         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7695         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7696                  icmp6_v->type & icmp6_m->type);
7697         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7698         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7699                  icmp6_v->code & icmp6_m->code);
7700 }
7701
7702 /**
7703  * Add ICMP item to matcher and to the value.
7704  *
7705  * @param[in, out] matcher
7706  *   Flow matcher.
7707  * @param[in, out] key
7708  *   Flow matcher value.
7709  * @param[in] item
7710  *   Flow pattern to translate.
7711  * @param[in] inner
7712  *   Item is inner pattern.
7713  */
7714 static void
7715 flow_dv_translate_item_icmp(void *matcher, void *key,
7716                             const struct rte_flow_item *item,
7717                             int inner)
7718 {
7719         const struct rte_flow_item_icmp *icmp_m = item->mask;
7720         const struct rte_flow_item_icmp *icmp_v = item->spec;
7721         uint32_t icmp_header_data_m = 0;
7722         uint32_t icmp_header_data_v = 0;
7723         void *headers_m;
7724         void *headers_v;
7725         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7726                                      misc_parameters_3);
7727         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7728         if (inner) {
7729                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7730                                          inner_headers);
7731                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7732         } else {
7733                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7734                                          outer_headers);
7735                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7736         }
7737         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7738         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7739         if (!icmp_v)
7740                 return;
7741         if (!icmp_m)
7742                 icmp_m = &rte_flow_item_icmp_mask;
7743         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7744                  icmp_m->hdr.icmp_type);
7745         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7746                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7747         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7748                  icmp_m->hdr.icmp_code);
7749         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7750                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7751         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7752         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7753         if (icmp_header_data_m) {
7754                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7755                 icmp_header_data_v |=
7756                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7757                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7758                          icmp_header_data_m);
7759                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7760                          icmp_header_data_v & icmp_header_data_m);
7761         }
7762 }
7763
7764 /**
7765  * Add GTP item to matcher and to the value.
7766  *
7767  * @param[in, out] matcher
7768  *   Flow matcher.
7769  * @param[in, out] key
7770  *   Flow matcher value.
7771  * @param[in] item
7772  *   Flow pattern to translate.
7773  * @param[in] inner
7774  *   Item is inner pattern.
7775  */
7776 static void
7777 flow_dv_translate_item_gtp(void *matcher, void *key,
7778                            const struct rte_flow_item *item, int inner)
7779 {
7780         const struct rte_flow_item_gtp *gtp_m = item->mask;
7781         const struct rte_flow_item_gtp *gtp_v = item->spec;
7782         void *headers_m;
7783         void *headers_v;
7784         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7785                                      misc_parameters_3);
7786         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7787         uint16_t dport = RTE_GTPU_UDP_PORT;
7788
7789         if (inner) {
7790                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7791                                          inner_headers);
7792                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7793         } else {
7794                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7795                                          outer_headers);
7796                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7797         }
7798         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7799                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7800                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7801         }
7802         if (!gtp_v)
7803                 return;
7804         if (!gtp_m)
7805                 gtp_m = &rte_flow_item_gtp_mask;
7806         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7807                  gtp_m->v_pt_rsv_flags);
7808         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7809                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7810         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7811         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7812                  gtp_v->msg_type & gtp_m->msg_type);
7813         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7814                  rte_be_to_cpu_32(gtp_m->teid));
7815         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7816                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7817 }
7818
7819 /**
7820  * Add eCPRI item to matcher and to the value.
7821  *
7822  * @param[in] dev
7823  *   The devich to configure through.
7824  * @param[in, out] matcher
7825  *   Flow matcher.
7826  * @param[in, out] key
7827  *   Flow matcher value.
7828  * @param[in] item
7829  *   Flow pattern to translate.
7830  * @param[in] samples
7831  *   Sample IDs to be used in the matching.
7832  */
7833 static void
7834 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7835                              void *key, const struct rte_flow_item *item)
7836 {
7837         struct mlx5_priv *priv = dev->data->dev_private;
7838         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7839         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7840         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7841                                      misc_parameters_4);
7842         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7843         uint32_t *samples;
7844         void *dw_m;
7845         void *dw_v;
7846
7847         if (!ecpri_v)
7848                 return;
7849         if (!ecpri_m)
7850                 ecpri_m = &rte_flow_item_ecpri_mask;
7851         /*
7852          * Maximal four DW samples are supported in a single matching now.
7853          * Two are used now for a eCPRI matching:
7854          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7855          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7856          *    if any.
7857          */
7858         if (!ecpri_m->hdr.common.u32)
7859                 return;
7860         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7861         /* Need to take the whole DW as the mask to fill the entry. */
7862         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7863                             prog_sample_field_value_0);
7864         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7865                             prog_sample_field_value_0);
7866         /* Already big endian (network order) in the header. */
7867         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7868         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7869         /* Sample#0, used for matching type, offset 0. */
7870         MLX5_SET(fte_match_set_misc4, misc4_m,
7871                  prog_sample_field_id_0, samples[0]);
7872         /* It makes no sense to set the sample ID in the mask field. */
7873         MLX5_SET(fte_match_set_misc4, misc4_v,
7874                  prog_sample_field_id_0, samples[0]);
7875         /*
7876          * Checking if message body part needs to be matched.
7877          * Some wildcard rules only matching type field should be supported.
7878          */
7879         if (ecpri_m->hdr.dummy[0]) {
7880                 switch (ecpri_v->hdr.common.type) {
7881                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7882                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7883                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7884                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7885                                             prog_sample_field_value_1);
7886                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7887                                             prog_sample_field_value_1);
7888                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7889                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7890                         /* Sample#1, to match message body, offset 4. */
7891                         MLX5_SET(fte_match_set_misc4, misc4_m,
7892                                  prog_sample_field_id_1, samples[1]);
7893                         MLX5_SET(fte_match_set_misc4, misc4_v,
7894                                  prog_sample_field_id_1, samples[1]);
7895                         break;
7896                 default:
7897                         /* Others, do not match any sample ID. */
7898                         break;
7899                 }
7900         }
7901 }
7902
7903 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7904
7905 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7906         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7907                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7908
7909 /**
7910  * Calculate flow matcher enable bitmap.
7911  *
7912  * @param match_criteria
7913  *   Pointer to flow matcher criteria.
7914  *
7915  * @return
7916  *   Bitmap of enabled fields.
7917  */
7918 static uint8_t
7919 flow_dv_matcher_enable(uint32_t *match_criteria)
7920 {
7921         uint8_t match_criteria_enable;
7922
7923         match_criteria_enable =
7924                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7925                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7926         match_criteria_enable |=
7927                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7928                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7929         match_criteria_enable |=
7930                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7931                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7932         match_criteria_enable |=
7933                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7934                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7935         match_criteria_enable |=
7936                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7937                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7938         match_criteria_enable |=
7939                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7940                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7941         return match_criteria_enable;
7942 }
7943
7944 struct mlx5_hlist_entry *
7945 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7946 {
7947         struct mlx5_dev_ctx_shared *sh = list->ctx;
7948         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7949         struct rte_eth_dev *dev = ctx->dev;
7950         struct mlx5_flow_tbl_data_entry *tbl_data;
7951         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7952         struct rte_flow_error *error = ctx->error;
7953         union mlx5_flow_tbl_key key = { .v64 = key64 };
7954         struct mlx5_flow_tbl_resource *tbl;
7955         void *domain;
7956         uint32_t idx = 0;
7957         int ret;
7958
7959         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7960         if (!tbl_data) {
7961                 rte_flow_error_set(error, ENOMEM,
7962                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7963                                    NULL,
7964                                    "cannot allocate flow table data entry");
7965                 return NULL;
7966         }
7967         tbl_data->idx = idx;
7968         tbl_data->tunnel = tt_prm->tunnel;
7969         tbl_data->group_id = tt_prm->group_id;
7970         tbl_data->external = tt_prm->external;
7971         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7972         tbl_data->is_egress = !!key.direction;
7973         tbl = &tbl_data->tbl;
7974         if (key.dummy)
7975                 return &tbl_data->entry;
7976         if (key.domain)
7977                 domain = sh->fdb_domain;
7978         else if (key.direction)
7979                 domain = sh->tx_domain;
7980         else
7981                 domain = sh->rx_domain;
7982         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
7983         if (ret) {
7984                 rte_flow_error_set(error, ENOMEM,
7985                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7986                                    NULL, "cannot create flow table object");
7987                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7988                 return NULL;
7989         }
7990         if (key.table_id) {
7991                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
7992                                         (tbl->obj, &tbl_data->jump.action);
7993                 if (ret) {
7994                         rte_flow_error_set(error, ENOMEM,
7995                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7996                                            NULL,
7997                                            "cannot create flow jump action");
7998                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7999                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8000                         return NULL;
8001                 }
8002         }
8003         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
8004               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
8005               key.table_id);
8006         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8007                              flow_dv_matcher_create_cb,
8008                              flow_dv_matcher_match_cb,
8009                              flow_dv_matcher_remove_cb);
8010         return &tbl_data->entry;
8011 }
8012
8013 /**
8014  * Get a flow table.
8015  *
8016  * @param[in, out] dev
8017  *   Pointer to rte_eth_dev structure.
8018  * @param[in] table_id
8019  *   Table id to use.
8020  * @param[in] egress
8021  *   Direction of the table.
8022  * @param[in] transfer
8023  *   E-Switch or NIC flow.
8024  * @param[in] dummy
8025  *   Dummy entry for dv API.
8026  * @param[out] error
8027  *   pointer to error structure.
8028  *
8029  * @return
8030  *   Returns tables resource based on the index, NULL in case of failed.
8031  */
8032 struct mlx5_flow_tbl_resource *
8033 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8034                          uint32_t table_id, uint8_t egress,
8035                          uint8_t transfer,
8036                          bool external,
8037                          const struct mlx5_flow_tunnel *tunnel,
8038                          uint32_t group_id, uint8_t dummy,
8039                          struct rte_flow_error *error)
8040 {
8041         struct mlx5_priv *priv = dev->data->dev_private;
8042         union mlx5_flow_tbl_key table_key = {
8043                 {
8044                         .table_id = table_id,
8045                         .dummy = dummy,
8046                         .domain = !!transfer,
8047                         .direction = !!egress,
8048                 }
8049         };
8050         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8051                 .tunnel = tunnel,
8052                 .group_id = group_id,
8053                 .external = external,
8054         };
8055         struct mlx5_flow_cb_ctx ctx = {
8056                 .dev = dev,
8057                 .error = error,
8058                 .data = &tt_prm,
8059         };
8060         struct mlx5_hlist_entry *entry;
8061         struct mlx5_flow_tbl_data_entry *tbl_data;
8062
8063         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8064         if (!entry) {
8065                 rte_flow_error_set(error, ENOMEM,
8066                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8067                                    "cannot get table");
8068                 return NULL;
8069         }
8070         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8071         return &tbl_data->tbl;
8072 }
8073
8074 void
8075 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8076                       struct mlx5_hlist_entry *entry)
8077 {
8078         struct mlx5_dev_ctx_shared *sh = list->ctx;
8079         struct mlx5_flow_tbl_data_entry *tbl_data =
8080                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8081
8082         MLX5_ASSERT(entry && sh);
8083         if (tbl_data->jump.action)
8084                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8085         if (tbl_data->tbl.obj)
8086                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8087         if (tbl_data->tunnel_offload && tbl_data->external) {
8088                 struct mlx5_hlist_entry *he;
8089                 struct mlx5_hlist *tunnel_grp_hash;
8090                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8091                 union tunnel_tbl_key tunnel_key = {
8092                         .tunnel_id = tbl_data->tunnel ?
8093                                         tbl_data->tunnel->tunnel_id : 0,
8094                         .group = tbl_data->group_id
8095                 };
8096                 union mlx5_flow_tbl_key table_key = {
8097                         .v64 = entry->key
8098                 };
8099                 uint32_t table_id = table_key.table_id;
8100
8101                 tunnel_grp_hash = tbl_data->tunnel ?
8102                                         tbl_data->tunnel->groups :
8103                                         thub->groups;
8104                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8105                 if (he)
8106                         mlx5_hlist_unregister(tunnel_grp_hash, he);
8107                 DRV_LOG(DEBUG,
8108                         "Table_id %#x tunnel %u group %u released.",
8109                         table_id,
8110                         tbl_data->tunnel ?
8111                         tbl_data->tunnel->tunnel_id : 0,
8112                         tbl_data->group_id);
8113         }
8114         mlx5_cache_list_destroy(&tbl_data->matchers);
8115         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8116 }
8117
8118 /**
8119  * Release a flow table.
8120  *
8121  * @param[in] sh
8122  *   Pointer to device shared structure.
8123  * @param[in] tbl
8124  *   Table resource to be released.
8125  *
8126  * @return
8127  *   Returns 0 if table was released, else return 1;
8128  */
8129 static int
8130 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8131                              struct mlx5_flow_tbl_resource *tbl)
8132 {
8133         struct mlx5_flow_tbl_data_entry *tbl_data =
8134                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8135
8136         if (!tbl)
8137                 return 0;
8138         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8139 }
8140
8141 int
8142 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8143                          struct mlx5_cache_entry *entry, void *cb_ctx)
8144 {
8145         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8146         struct mlx5_flow_dv_matcher *ref = ctx->data;
8147         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8148                                                         entry);
8149
8150         return cur->crc != ref->crc ||
8151                cur->priority != ref->priority ||
8152                memcmp((const void *)cur->mask.buf,
8153                       (const void *)ref->mask.buf, ref->mask.size);
8154 }
8155
8156 struct mlx5_cache_entry *
8157 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8158                           struct mlx5_cache_entry *entry __rte_unused,
8159                           void *cb_ctx)
8160 {
8161         struct mlx5_dev_ctx_shared *sh = list->ctx;
8162         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8163         struct mlx5_flow_dv_matcher *ref = ctx->data;
8164         struct mlx5_flow_dv_matcher *cache;
8165         struct mlx5dv_flow_matcher_attr dv_attr = {
8166                 .type = IBV_FLOW_ATTR_NORMAL,
8167                 .match_mask = (void *)&ref->mask,
8168         };
8169         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8170                                                             typeof(*tbl), tbl);
8171         int ret;
8172
8173         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8174         if (!cache) {
8175                 rte_flow_error_set(ctx->error, ENOMEM,
8176                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8177                                    "cannot create matcher");
8178                 return NULL;
8179         }
8180         *cache = *ref;
8181         dv_attr.match_criteria_enable =
8182                 flow_dv_matcher_enable(cache->mask.buf);
8183         dv_attr.priority = ref->priority;
8184         if (tbl->is_egress)
8185                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8186         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8187                                                &cache->matcher_object);
8188         if (ret) {
8189                 mlx5_free(cache);
8190                 rte_flow_error_set(ctx->error, ENOMEM,
8191                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8192                                    "cannot create matcher");
8193                 return NULL;
8194         }
8195         return &cache->entry;
8196 }
8197
8198 /**
8199  * Register the flow matcher.
8200  *
8201  * @param[in, out] dev
8202  *   Pointer to rte_eth_dev structure.
8203  * @param[in, out] matcher
8204  *   Pointer to flow matcher.
8205  * @param[in, out] key
8206  *   Pointer to flow table key.
8207  * @parm[in, out] dev_flow
8208  *   Pointer to the dev_flow.
8209  * @param[out] error
8210  *   pointer to error structure.
8211  *
8212  * @return
8213  *   0 on success otherwise -errno and errno is set.
8214  */
8215 static int
8216 flow_dv_matcher_register(struct rte_eth_dev *dev,
8217                          struct mlx5_flow_dv_matcher *ref,
8218                          union mlx5_flow_tbl_key *key,
8219                          struct mlx5_flow *dev_flow,
8220                          struct rte_flow_error *error)
8221 {
8222         struct mlx5_cache_entry *entry;
8223         struct mlx5_flow_dv_matcher *cache;
8224         struct mlx5_flow_tbl_resource *tbl;
8225         struct mlx5_flow_tbl_data_entry *tbl_data;
8226         struct mlx5_flow_cb_ctx ctx = {
8227                 .error = error,
8228                 .data = ref,
8229         };
8230
8231         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
8232                                        key->domain, false, NULL, 0, 0, error);
8233         if (!tbl)
8234                 return -rte_errno;      /* No need to refill the error info */
8235         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8236         ref->tbl = tbl;
8237         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8238         if (!entry) {
8239                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8240                 return rte_flow_error_set(error, ENOMEM,
8241                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8242                                           "cannot allocate ref memory");
8243         }
8244         cache = container_of(entry, typeof(*cache), entry);
8245         dev_flow->handle->dvh.matcher = cache;
8246         return 0;
8247 }
8248
8249 struct mlx5_hlist_entry *
8250 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8251 {
8252         struct mlx5_dev_ctx_shared *sh = list->ctx;
8253         struct rte_flow_error *error = ctx;
8254         struct mlx5_flow_dv_tag_resource *entry;
8255         uint32_t idx = 0;
8256         int ret;
8257
8258         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8259         if (!entry) {
8260                 rte_flow_error_set(error, ENOMEM,
8261                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8262                                    "cannot allocate resource memory");
8263                 return NULL;
8264         }
8265         entry->idx = idx;
8266         ret = mlx5_flow_os_create_flow_action_tag(key,
8267                                                   &entry->action);
8268         if (ret) {
8269                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8270                 rte_flow_error_set(error, ENOMEM,
8271                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8272                                    NULL, "cannot create action");
8273                 return NULL;
8274         }
8275         return &entry->entry;
8276 }
8277
8278 /**
8279  * Find existing tag resource or create and register a new one.
8280  *
8281  * @param dev[in, out]
8282  *   Pointer to rte_eth_dev structure.
8283  * @param[in, out] tag_be24
8284  *   Tag value in big endian then R-shift 8.
8285  * @parm[in, out] dev_flow
8286  *   Pointer to the dev_flow.
8287  * @param[out] error
8288  *   pointer to error structure.
8289  *
8290  * @return
8291  *   0 on success otherwise -errno and errno is set.
8292  */
8293 static int
8294 flow_dv_tag_resource_register
8295                         (struct rte_eth_dev *dev,
8296                          uint32_t tag_be24,
8297                          struct mlx5_flow *dev_flow,
8298                          struct rte_flow_error *error)
8299 {
8300         struct mlx5_priv *priv = dev->data->dev_private;
8301         struct mlx5_flow_dv_tag_resource *cache_resource;
8302         struct mlx5_hlist_entry *entry;
8303
8304         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8305         if (entry) {
8306                 cache_resource = container_of
8307                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8308                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8309                 dev_flow->dv.tag_resource = cache_resource;
8310                 return 0;
8311         }
8312         return -rte_errno;
8313 }
8314
8315 void
8316 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8317                       struct mlx5_hlist_entry *entry)
8318 {
8319         struct mlx5_dev_ctx_shared *sh = list->ctx;
8320         struct mlx5_flow_dv_tag_resource *tag =
8321                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8322
8323         MLX5_ASSERT(tag && sh && tag->action);
8324         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8325         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8326         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8327 }
8328
8329 /**
8330  * Release the tag.
8331  *
8332  * @param dev
8333  *   Pointer to Ethernet device.
8334  * @param tag_idx
8335  *   Tag index.
8336  *
8337  * @return
8338  *   1 while a reference on it exists, 0 when freed.
8339  */
8340 static int
8341 flow_dv_tag_release(struct rte_eth_dev *dev,
8342                     uint32_t tag_idx)
8343 {
8344         struct mlx5_priv *priv = dev->data->dev_private;
8345         struct mlx5_flow_dv_tag_resource *tag;
8346
8347         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8348         if (!tag)
8349                 return 0;
8350         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8351                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8352         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8353 }
8354
8355 /**
8356  * Translate port ID action to vport.
8357  *
8358  * @param[in] dev
8359  *   Pointer to rte_eth_dev structure.
8360  * @param[in] action
8361  *   Pointer to the port ID action.
8362  * @param[out] dst_port_id
8363  *   The target port ID.
8364  * @param[out] error
8365  *   Pointer to the error structure.
8366  *
8367  * @return
8368  *   0 on success, a negative errno value otherwise and rte_errno is set.
8369  */
8370 static int
8371 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8372                                  const struct rte_flow_action *action,
8373                                  uint32_t *dst_port_id,
8374                                  struct rte_flow_error *error)
8375 {
8376         uint32_t port;
8377         struct mlx5_priv *priv;
8378         const struct rte_flow_action_port_id *conf =
8379                         (const struct rte_flow_action_port_id *)action->conf;
8380
8381         port = conf->original ? dev->data->port_id : conf->id;
8382         priv = mlx5_port_to_eswitch_info(port, false);
8383         if (!priv)
8384                 return rte_flow_error_set(error, -rte_errno,
8385                                           RTE_FLOW_ERROR_TYPE_ACTION,
8386                                           NULL,
8387                                           "No eswitch info was found for port");
8388 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8389         /*
8390          * This parameter is transferred to
8391          * mlx5dv_dr_action_create_dest_ib_port().
8392          */
8393         *dst_port_id = priv->dev_port;
8394 #else
8395         /*
8396          * Legacy mode, no LAG configurations is supported.
8397          * This parameter is transferred to
8398          * mlx5dv_dr_action_create_dest_vport().
8399          */
8400         *dst_port_id = priv->vport_id;
8401 #endif
8402         return 0;
8403 }
8404
8405 /**
8406  * Create a counter with aging configuration.
8407  *
8408  * @param[in] dev
8409  *   Pointer to rte_eth_dev structure.
8410  * @param[out] count
8411  *   Pointer to the counter action configuration.
8412  * @param[in] age
8413  *   Pointer to the aging action configuration.
8414  *
8415  * @return
8416  *   Index to flow counter on success, 0 otherwise.
8417  */
8418 static uint32_t
8419 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8420                                 struct mlx5_flow *dev_flow,
8421                                 const struct rte_flow_action_count *count,
8422                                 const struct rte_flow_action_age *age)
8423 {
8424         uint32_t counter;
8425         struct mlx5_age_param *age_param;
8426
8427         if (count && count->shared)
8428                 counter = flow_dv_counter_get_shared(dev, count->id);
8429         else
8430                 counter = flow_dv_counter_alloc(dev, !!age);
8431         if (!counter || age == NULL)
8432                 return counter;
8433         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8434         age_param->context = age->context ? age->context :
8435                 (void *)(uintptr_t)(dev_flow->flow_idx);
8436         age_param->timeout = age->timeout;
8437         age_param->port_id = dev->data->port_id;
8438         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8439         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8440         return counter;
8441 }
8442 /**
8443  * Add Tx queue matcher
8444  *
8445  * @param[in] dev
8446  *   Pointer to the dev struct.
8447  * @param[in, out] matcher
8448  *   Flow matcher.
8449  * @param[in, out] key
8450  *   Flow matcher value.
8451  * @param[in] item
8452  *   Flow pattern to translate.
8453  * @param[in] inner
8454  *   Item is inner pattern.
8455  */
8456 static void
8457 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8458                                 void *matcher, void *key,
8459                                 const struct rte_flow_item *item)
8460 {
8461         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8462         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8463         void *misc_m =
8464                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8465         void *misc_v =
8466                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8467         struct mlx5_txq_ctrl *txq;
8468         uint32_t queue;
8469
8470
8471         queue_m = (const void *)item->mask;
8472         if (!queue_m)
8473                 return;
8474         queue_v = (const void *)item->spec;
8475         if (!queue_v)
8476                 return;
8477         txq = mlx5_txq_get(dev, queue_v->queue);
8478         if (!txq)
8479                 return;
8480         queue = txq->obj->sq->id;
8481         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8482         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8483                  queue & queue_m->queue);
8484         mlx5_txq_release(dev, queue_v->queue);
8485 }
8486
8487 /**
8488  * Set the hash fields according to the @p flow information.
8489  *
8490  * @param[in] dev_flow
8491  *   Pointer to the mlx5_flow.
8492  * @param[in] rss_desc
8493  *   Pointer to the mlx5_flow_rss_desc.
8494  */
8495 static void
8496 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8497                        struct mlx5_flow_rss_desc *rss_desc)
8498 {
8499         uint64_t items = dev_flow->handle->layers;
8500         int rss_inner = 0;
8501         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8502
8503         dev_flow->hash_fields = 0;
8504 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8505         if (rss_desc->level >= 2) {
8506                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8507                 rss_inner = 1;
8508         }
8509 #endif
8510         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8511             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8512                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8513                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8514                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8515                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8516                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8517                         else
8518                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8519                 }
8520         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8521                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8522                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8523                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8524                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8525                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8526                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8527                         else
8528                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8529                 }
8530         }
8531         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8532             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8533                 if (rss_types & ETH_RSS_UDP) {
8534                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8535                                 dev_flow->hash_fields |=
8536                                                 IBV_RX_HASH_SRC_PORT_UDP;
8537                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8538                                 dev_flow->hash_fields |=
8539                                                 IBV_RX_HASH_DST_PORT_UDP;
8540                         else
8541                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8542                 }
8543         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8544                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8545                 if (rss_types & ETH_RSS_TCP) {
8546                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8547                                 dev_flow->hash_fields |=
8548                                                 IBV_RX_HASH_SRC_PORT_TCP;
8549                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8550                                 dev_flow->hash_fields |=
8551                                                 IBV_RX_HASH_DST_PORT_TCP;
8552                         else
8553                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8554                 }
8555         }
8556 }
8557
8558 /**
8559  * Prepare an Rx Hash queue.
8560  *
8561  * @param dev
8562  *   Pointer to Ethernet device.
8563  * @param[in] dev_flow
8564  *   Pointer to the mlx5_flow.
8565  * @param[in] rss_desc
8566  *   Pointer to the mlx5_flow_rss_desc.
8567  * @param[out] hrxq_idx
8568  *   Hash Rx queue index.
8569  *
8570  * @return
8571  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8572  */
8573 static struct mlx5_hrxq *
8574 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
8575                      struct mlx5_flow *dev_flow,
8576                      struct mlx5_flow_rss_desc *rss_desc,
8577                      uint32_t *hrxq_idx)
8578 {
8579         struct mlx5_priv *priv = dev->data->dev_private;
8580         struct mlx5_flow_handle *dh = dev_flow->handle;
8581         struct mlx5_hrxq *hrxq;
8582
8583         MLX5_ASSERT(rss_desc->queue_num);
8584         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
8585         rss_desc->hash_fields = dev_flow->hash_fields;
8586         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
8587         rss_desc->standalone = false;
8588         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
8589         if (!*hrxq_idx)
8590                 return NULL;
8591         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8592                               *hrxq_idx);
8593         return hrxq;
8594 }
8595
8596 /**
8597  * Release sample sub action resource.
8598  *
8599  * @param[in, out] dev
8600  *   Pointer to rte_eth_dev structure.
8601  * @param[in] act_res
8602  *   Pointer to sample sub action resource.
8603  */
8604 static void
8605 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
8606                                    struct mlx5_flow_sub_actions_idx *act_res)
8607 {
8608         if (act_res->rix_hrxq) {
8609                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
8610                 act_res->rix_hrxq = 0;
8611         }
8612         if (act_res->rix_encap_decap) {
8613                 flow_dv_encap_decap_resource_release(dev,
8614                                                      act_res->rix_encap_decap);
8615                 act_res->rix_encap_decap = 0;
8616         }
8617         if (act_res->rix_port_id_action) {
8618                 flow_dv_port_id_action_resource_release(dev,
8619                                                 act_res->rix_port_id_action);
8620                 act_res->rix_port_id_action = 0;
8621         }
8622         if (act_res->rix_tag) {
8623                 flow_dv_tag_release(dev, act_res->rix_tag);
8624                 act_res->rix_tag = 0;
8625         }
8626         if (act_res->cnt) {
8627                 flow_dv_counter_release(dev, act_res->cnt);
8628                 act_res->cnt = 0;
8629         }
8630 }
8631
8632 int
8633 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
8634                         struct mlx5_cache_entry *entry, void *cb_ctx)
8635 {
8636         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8637         struct rte_eth_dev *dev = ctx->dev;
8638         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8639         struct mlx5_flow_dv_sample_resource *cache_resource =
8640                         container_of(entry, typeof(*cache_resource), entry);
8641
8642         if (resource->ratio == cache_resource->ratio &&
8643             resource->ft_type == cache_resource->ft_type &&
8644             resource->ft_id == cache_resource->ft_id &&
8645             resource->set_action == cache_resource->set_action &&
8646             !memcmp((void *)&resource->sample_act,
8647                     (void *)&cache_resource->sample_act,
8648                     sizeof(struct mlx5_flow_sub_actions_list))) {
8649                 /*
8650                  * Existing sample action should release the prepared
8651                  * sub-actions reference counter.
8652                  */
8653                 flow_dv_sample_sub_actions_release(dev,
8654                                                 &resource->sample_idx);
8655                 return 0;
8656         }
8657         return 1;
8658 }
8659
8660 struct mlx5_cache_entry *
8661 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
8662                          struct mlx5_cache_entry *entry __rte_unused,
8663                          void *cb_ctx)
8664 {
8665         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8666         struct rte_eth_dev *dev = ctx->dev;
8667         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8668         void **sample_dv_actions = resource->sub_actions;
8669         struct mlx5_flow_dv_sample_resource *cache_resource;
8670         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8671         struct mlx5_priv *priv = dev->data->dev_private;
8672         struct mlx5_dev_ctx_shared *sh = priv->sh;
8673         struct mlx5_flow_tbl_resource *tbl;
8674         uint32_t idx = 0;
8675         const uint32_t next_ft_step = 1;
8676         uint32_t next_ft_id = resource->ft_id + next_ft_step;
8677         uint8_t is_egress = 0;
8678         uint8_t is_transfer = 0;
8679         struct rte_flow_error *error = ctx->error;
8680
8681         /* Register new sample resource. */
8682         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
8683         if (!cache_resource) {
8684                 rte_flow_error_set(error, ENOMEM,
8685                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8686                                           NULL,
8687                                           "cannot allocate resource memory");
8688                 return NULL;
8689         }
8690         *cache_resource = *resource;
8691         /* Create normal path table level */
8692         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8693                 is_transfer = 1;
8694         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
8695                 is_egress = 1;
8696         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8697                                         is_egress, is_transfer,
8698                                         true, NULL, 0, 0, error);
8699         if (!tbl) {
8700                 rte_flow_error_set(error, ENOMEM,
8701                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8702                                           NULL,
8703                                           "fail to create normal path table "
8704                                           "for sample");
8705                 goto error;
8706         }
8707         cache_resource->normal_path_tbl = tbl;
8708         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8709                 cache_resource->default_miss =
8710                                 mlx5_glue->dr_create_flow_action_default_miss();
8711                 if (!cache_resource->default_miss) {
8712                         rte_flow_error_set(error, ENOMEM,
8713                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8714                                                 NULL,
8715                                                 "cannot create default miss "
8716                                                 "action");
8717                         goto error;
8718                 }
8719                 sample_dv_actions[resource->sample_act.actions_num++] =
8720                                                 cache_resource->default_miss;
8721         }
8722         /* Create a DR sample action */
8723         sampler_attr.sample_ratio = cache_resource->ratio;
8724         sampler_attr.default_next_table = tbl->obj;
8725         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8726         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8727                                                         &sample_dv_actions[0];
8728         sampler_attr.action = cache_resource->set_action;
8729         cache_resource->verbs_action =
8730                 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8731         if (!cache_resource->verbs_action) {
8732                 rte_flow_error_set(error, ENOMEM,
8733                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8734                                         NULL, "cannot create sample action");
8735                 goto error;
8736         }
8737         cache_resource->idx = idx;
8738         return &cache_resource->entry;
8739 error:
8740         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
8741             cache_resource->default_miss)
8742                 claim_zero(mlx5_glue->destroy_flow_action
8743                                 (cache_resource->default_miss));
8744         else
8745                 flow_dv_sample_sub_actions_release(dev,
8746                                                    &cache_resource->sample_idx);
8747         if (cache_resource->normal_path_tbl)
8748                 flow_dv_tbl_resource_release(MLX5_SH(dev),
8749                                 cache_resource->normal_path_tbl);
8750         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
8751         return NULL;
8752
8753 }
8754
8755 /**
8756  * Find existing sample resource or create and register a new one.
8757  *
8758  * @param[in, out] dev
8759  *   Pointer to rte_eth_dev structure.
8760  * @param[in] resource
8761  *   Pointer to sample resource.
8762  * @parm[in, out] dev_flow
8763  *   Pointer to the dev_flow.
8764  * @param[out] error
8765  *   pointer to error structure.
8766  *
8767  * @return
8768  *   0 on success otherwise -errno and errno is set.
8769  */
8770 static int
8771 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8772                          struct mlx5_flow_dv_sample_resource *resource,
8773                          struct mlx5_flow *dev_flow,
8774                          struct rte_flow_error *error)
8775 {
8776         struct mlx5_flow_dv_sample_resource *cache_resource;
8777         struct mlx5_cache_entry *entry;
8778         struct mlx5_priv *priv = dev->data->dev_private;
8779         struct mlx5_flow_cb_ctx ctx = {
8780                 .dev = dev,
8781                 .error = error,
8782                 .data = resource,
8783         };
8784
8785         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
8786         if (!entry)
8787                 return -rte_errno;
8788         cache_resource = container_of(entry, typeof(*cache_resource), entry);
8789         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
8790         dev_flow->dv.sample_res = cache_resource;
8791         return 0;
8792 }
8793
8794 int
8795 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
8796                             struct mlx5_cache_entry *entry, void *cb_ctx)
8797 {
8798         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8799         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8800         struct rte_eth_dev *dev = ctx->dev;
8801         struct mlx5_flow_dv_dest_array_resource *cache_resource =
8802                         container_of(entry, typeof(*cache_resource), entry);
8803         uint32_t idx = 0;
8804
8805         if (resource->num_of_dest == cache_resource->num_of_dest &&
8806             resource->ft_type == cache_resource->ft_type &&
8807             !memcmp((void *)cache_resource->sample_act,
8808                     (void *)resource->sample_act,
8809                    (resource->num_of_dest *
8810                    sizeof(struct mlx5_flow_sub_actions_list)))) {
8811                 /*
8812                  * Existing sample action should release the prepared
8813                  * sub-actions reference counter.
8814                  */
8815                 for (idx = 0; idx < resource->num_of_dest; idx++)
8816                         flow_dv_sample_sub_actions_release(dev,
8817                                         &resource->sample_idx[idx]);
8818                 return 0;
8819         }
8820         return 1;
8821 }
8822
8823 struct mlx5_cache_entry *
8824 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
8825                          struct mlx5_cache_entry *entry __rte_unused,
8826                          void *cb_ctx)
8827 {
8828         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8829         struct rte_eth_dev *dev = ctx->dev;
8830         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8831         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8832         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8833         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8834         struct mlx5_priv *priv = dev->data->dev_private;
8835         struct mlx5_dev_ctx_shared *sh = priv->sh;
8836         struct mlx5_flow_sub_actions_list *sample_act;
8837         struct mlx5dv_dr_domain *domain;
8838         uint32_t idx = 0, res_idx = 0;
8839         struct rte_flow_error *error = ctx->error;
8840
8841         /* Register new destination array resource. */
8842         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8843                                             &res_idx);
8844         if (!cache_resource) {
8845                 rte_flow_error_set(error, ENOMEM,
8846                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8847                                           NULL,
8848                                           "cannot allocate resource memory");
8849                 return NULL;
8850         }
8851         *cache_resource = *resource;
8852         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8853                 domain = sh->fdb_domain;
8854         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
8855                 domain = sh->rx_domain;
8856         else
8857                 domain = sh->tx_domain;
8858         for (idx = 0; idx < resource->num_of_dest; idx++) {
8859                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8860                                  mlx5_malloc(MLX5_MEM_ZERO,
8861                                  sizeof(struct mlx5dv_dr_action_dest_attr),
8862                                  0, SOCKET_ID_ANY);
8863                 if (!dest_attr[idx]) {
8864                         rte_flow_error_set(error, ENOMEM,
8865                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8866                                            NULL,
8867                                            "cannot allocate resource memory");
8868                         goto error;
8869                 }
8870                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8871                 sample_act = &resource->sample_act[idx];
8872                 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8873                         dest_attr[idx]->dest = sample_act->dr_queue_action;
8874                 } else if (sample_act->action_flags ==
8875                           (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8876                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8877                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8878                         dest_attr[idx]->dest_reformat->reformat =
8879                                         sample_act->dr_encap_action;
8880                         dest_attr[idx]->dest_reformat->dest =
8881                                         sample_act->dr_port_id_action;
8882                 } else if (sample_act->action_flags ==
8883                            MLX5_FLOW_ACTION_PORT_ID) {
8884                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
8885                 }
8886         }
8887         /* create a dest array actioin */
8888         cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8889                                                 (domain,
8890                                                  cache_resource->num_of_dest,
8891                                                  dest_attr);
8892         if (!cache_resource->action) {
8893                 rte_flow_error_set(error, ENOMEM,
8894                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8895                                    NULL,
8896                                    "cannot create destination array action");
8897                 goto error;
8898         }
8899         cache_resource->idx = res_idx;
8900         for (idx = 0; idx < resource->num_of_dest; idx++)
8901                 mlx5_free(dest_attr[idx]);
8902         return &cache_resource->entry;
8903 error:
8904         for (idx = 0; idx < resource->num_of_dest; idx++) {
8905                 struct mlx5_flow_sub_actions_idx *act_res =
8906                                         &cache_resource->sample_idx[idx];
8907                 if (act_res->rix_hrxq &&
8908                     !mlx5_hrxq_release(dev,
8909                                 act_res->rix_hrxq))
8910                         act_res->rix_hrxq = 0;
8911                 if (act_res->rix_encap_decap &&
8912                         !flow_dv_encap_decap_resource_release(dev,
8913                                 act_res->rix_encap_decap))
8914                         act_res->rix_encap_decap = 0;
8915                 if (act_res->rix_port_id_action &&
8916                         !flow_dv_port_id_action_resource_release(dev,
8917                                 act_res->rix_port_id_action))
8918                         act_res->rix_port_id_action = 0;
8919                 if (dest_attr[idx])
8920                         mlx5_free(dest_attr[idx]);
8921         }
8922
8923         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
8924         return NULL;
8925 }
8926
8927 /**
8928  * Find existing destination array resource or create and register a new one.
8929  *
8930  * @param[in, out] dev
8931  *   Pointer to rte_eth_dev structure.
8932  * @param[in] resource
8933  *   Pointer to destination array resource.
8934  * @parm[in, out] dev_flow
8935  *   Pointer to the dev_flow.
8936  * @param[out] error
8937  *   pointer to error structure.
8938  *
8939  * @return
8940  *   0 on success otherwise -errno and errno is set.
8941  */
8942 static int
8943 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8944                          struct mlx5_flow_dv_dest_array_resource *resource,
8945                          struct mlx5_flow *dev_flow,
8946                          struct rte_flow_error *error)
8947 {
8948         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8949         struct mlx5_priv *priv = dev->data->dev_private;
8950         struct mlx5_cache_entry *entry;
8951         struct mlx5_flow_cb_ctx ctx = {
8952                 .dev = dev,
8953                 .error = error,
8954                 .data = resource,
8955         };
8956
8957         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
8958         if (!entry)
8959                 return -rte_errno;
8960         cache_resource = container_of(entry, typeof(*cache_resource), entry);
8961         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
8962         dev_flow->dv.dest_array_res = cache_resource;
8963         return 0;
8964 }
8965
8966 /**
8967  * Convert Sample action to DV specification.
8968  *
8969  * @param[in] dev
8970  *   Pointer to rte_eth_dev structure.
8971  * @param[in] action
8972  *   Pointer to action structure.
8973  * @param[in, out] dev_flow
8974  *   Pointer to the mlx5_flow.
8975  * @param[in] attr
8976  *   Pointer to the flow attributes.
8977  * @param[in, out] num_of_dest
8978  *   Pointer to the num of destination.
8979  * @param[in, out] sample_actions
8980  *   Pointer to sample actions list.
8981  * @param[in, out] res
8982  *   Pointer to sample resource.
8983  * @param[out] error
8984  *   Pointer to the error structure.
8985  *
8986  * @return
8987  *   0 on success, a negative errno value otherwise and rte_errno is set.
8988  */
8989 static int
8990 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
8991                                 const struct rte_flow_action *action,
8992                                 struct mlx5_flow *dev_flow,
8993                                 const struct rte_flow_attr *attr,
8994                                 uint32_t *num_of_dest,
8995                                 void **sample_actions,
8996                                 struct mlx5_flow_dv_sample_resource *res,
8997                                 struct rte_flow_error *error)
8998 {
8999         struct mlx5_priv *priv = dev->data->dev_private;
9000         const struct rte_flow_action_sample *sample_action;
9001         const struct rte_flow_action *sub_actions;
9002         const struct rte_flow_action_queue *queue;
9003         struct mlx5_flow_sub_actions_list *sample_act;
9004         struct mlx5_flow_sub_actions_idx *sample_idx;
9005         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9006         struct mlx5_flow_rss_desc *rss_desc;
9007         uint64_t action_flags = 0;
9008
9009         MLX5_ASSERT(wks);
9010         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9011         sample_act = &res->sample_act;
9012         sample_idx = &res->sample_idx;
9013         sample_action = (const struct rte_flow_action_sample *)action->conf;
9014         res->ratio = sample_action->ratio;
9015         sub_actions = sample_action->actions;
9016         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
9017                 int type = sub_actions->type;
9018                 uint32_t pre_rix = 0;
9019                 void *pre_r;
9020                 switch (type) {
9021                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9022                 {
9023                         struct mlx5_hrxq *hrxq;
9024                         uint32_t hrxq_idx;
9025
9026                         queue = sub_actions->conf;
9027                         rss_desc->queue_num = 1;
9028                         rss_desc->queue[0] = queue->index;
9029                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9030                                                     rss_desc, &hrxq_idx);
9031                         if (!hrxq)
9032                                 return rte_flow_error_set
9033                                         (error, rte_errno,
9034                                          RTE_FLOW_ERROR_TYPE_ACTION,
9035                                          NULL,
9036                                          "cannot create fate queue");
9037                         sample_act->dr_queue_action = hrxq->action;
9038                         sample_idx->rix_hrxq = hrxq_idx;
9039                         sample_actions[sample_act->actions_num++] =
9040                                                 hrxq->action;
9041                         (*num_of_dest)++;
9042                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9043                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9044                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9045                         dev_flow->handle->fate_action =
9046                                         MLX5_FLOW_FATE_QUEUE;
9047                         break;
9048                 }
9049                 case RTE_FLOW_ACTION_TYPE_MARK:
9050                 {
9051                         uint32_t tag_be = mlx5_flow_mark_set
9052                                 (((const struct rte_flow_action_mark *)
9053                                 (sub_actions->conf))->id);
9054
9055                         dev_flow->handle->mark = 1;
9056                         pre_rix = dev_flow->handle->dvh.rix_tag;
9057                         /* Save the mark resource before sample */
9058                         pre_r = dev_flow->dv.tag_resource;
9059                         if (flow_dv_tag_resource_register(dev, tag_be,
9060                                                   dev_flow, error))
9061                                 return -rte_errno;
9062                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9063                         sample_act->dr_tag_action =
9064                                 dev_flow->dv.tag_resource->action;
9065                         sample_idx->rix_tag =
9066                                 dev_flow->handle->dvh.rix_tag;
9067                         sample_actions[sample_act->actions_num++] =
9068                                                 sample_act->dr_tag_action;
9069                         /* Recover the mark resource after sample */
9070                         dev_flow->dv.tag_resource = pre_r;
9071                         dev_flow->handle->dvh.rix_tag = pre_rix;
9072                         action_flags |= MLX5_FLOW_ACTION_MARK;
9073                         break;
9074                 }
9075                 case RTE_FLOW_ACTION_TYPE_COUNT:
9076                 {
9077                         uint32_t counter;
9078
9079                         counter = flow_dv_translate_create_counter(dev,
9080                                         dev_flow, sub_actions->conf, 0);
9081                         if (!counter)
9082                                 return rte_flow_error_set
9083                                                 (error, rte_errno,
9084                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9085                                                  NULL,
9086                                                  "cannot create counter"
9087                                                  " object.");
9088                         sample_idx->cnt = counter;
9089                         sample_act->dr_cnt_action =
9090                                   (flow_dv_counter_get_by_idx(dev,
9091                                   counter, NULL))->action;
9092                         sample_actions[sample_act->actions_num++] =
9093                                                 sample_act->dr_cnt_action;
9094                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9095                         break;
9096                 }
9097                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9098                 {
9099                         struct mlx5_flow_dv_port_id_action_resource
9100                                         port_id_resource;
9101                         uint32_t port_id = 0;
9102
9103                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9104                         /* Save the port id resource before sample */
9105                         pre_rix = dev_flow->handle->rix_port_id_action;
9106                         pre_r = dev_flow->dv.port_id_action;
9107                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9108                                                              &port_id, error))
9109                                 return -rte_errno;
9110                         port_id_resource.port_id = port_id;
9111                         if (flow_dv_port_id_action_resource_register
9112                             (dev, &port_id_resource, dev_flow, error))
9113                                 return -rte_errno;
9114                         sample_act->dr_port_id_action =
9115                                 dev_flow->dv.port_id_action->action;
9116                         sample_idx->rix_port_id_action =
9117                                 dev_flow->handle->rix_port_id_action;
9118                         sample_actions[sample_act->actions_num++] =
9119                                                 sample_act->dr_port_id_action;
9120                         /* Recover the port id resource after sample */
9121                         dev_flow->dv.port_id_action = pre_r;
9122                         dev_flow->handle->rix_port_id_action = pre_rix;
9123                         (*num_of_dest)++;
9124                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9125                         break;
9126                 }
9127                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9128                         /* Save the encap resource before sample */
9129                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9130                         pre_r = dev_flow->dv.encap_decap;
9131                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9132                                                            dev_flow,
9133                                                            attr->transfer,
9134                                                            error))
9135                                 return -rte_errno;
9136                         sample_act->dr_encap_action =
9137                                 dev_flow->dv.encap_decap->action;
9138                         sample_idx->rix_encap_decap =
9139                                 dev_flow->handle->dvh.rix_encap_decap;
9140                         sample_actions[sample_act->actions_num++] =
9141                                                 sample_act->dr_encap_action;
9142                         /* Recover the encap resource after sample */
9143                         dev_flow->dv.encap_decap = pre_r;
9144                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9145                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9146                         break;
9147                 default:
9148                         return rte_flow_error_set(error, EINVAL,
9149                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9150                                 NULL,
9151                                 "Not support for sampler action");
9152                 }
9153         }
9154         sample_act->action_flags = action_flags;
9155         res->ft_id = dev_flow->dv.group;
9156         if (attr->transfer) {
9157                 union {
9158                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9159                         uint64_t set_action;
9160                 } action_ctx = { .set_action = 0 };
9161
9162                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9163                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9164                          MLX5_MODIFICATION_TYPE_SET);
9165                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9166                          MLX5_MODI_META_REG_C_0);
9167                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9168                          priv->vport_meta_tag);
9169                 res->set_action = action_ctx.set_action;
9170         } else if (attr->ingress) {
9171                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9172         } else {
9173                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9174         }
9175         return 0;
9176 }
9177
9178 /**
9179  * Convert Sample action to DV specification.
9180  *
9181  * @param[in] dev
9182  *   Pointer to rte_eth_dev structure.
9183  * @param[in, out] dev_flow
9184  *   Pointer to the mlx5_flow.
9185  * @param[in] num_of_dest
9186  *   The num of destination.
9187  * @param[in, out] res
9188  *   Pointer to sample resource.
9189  * @param[in, out] mdest_res
9190  *   Pointer to destination array resource.
9191  * @param[in] sample_actions
9192  *   Pointer to sample path actions list.
9193  * @param[in] action_flags
9194  *   Holds the actions detected until now.
9195  * @param[out] error
9196  *   Pointer to the error structure.
9197  *
9198  * @return
9199  *   0 on success, a negative errno value otherwise and rte_errno is set.
9200  */
9201 static int
9202 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9203                              struct mlx5_flow *dev_flow,
9204                              uint32_t num_of_dest,
9205                              struct mlx5_flow_dv_sample_resource *res,
9206                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9207                              void **sample_actions,
9208                              uint64_t action_flags,
9209                              struct rte_flow_error *error)
9210 {
9211         /* update normal path action resource into last index of array */
9212         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9213         struct mlx5_flow_sub_actions_list *sample_act =
9214                                         &mdest_res->sample_act[dest_index];
9215         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9216         struct mlx5_flow_rss_desc *rss_desc;
9217         uint32_t normal_idx = 0;
9218         struct mlx5_hrxq *hrxq;
9219         uint32_t hrxq_idx;
9220
9221         MLX5_ASSERT(wks);
9222         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9223         if (num_of_dest > 1) {
9224                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9225                         /* Handle QP action for mirroring */
9226                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9227                                                     rss_desc, &hrxq_idx);
9228                         if (!hrxq)
9229                                 return rte_flow_error_set
9230                                      (error, rte_errno,
9231                                       RTE_FLOW_ERROR_TYPE_ACTION,
9232                                       NULL,
9233                                       "cannot create rx queue");
9234                         normal_idx++;
9235                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9236                         sample_act->dr_queue_action = hrxq->action;
9237                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9238                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9239                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9240                 }
9241                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9242                         normal_idx++;
9243                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9244                                 dev_flow->handle->dvh.rix_encap_decap;
9245                         sample_act->dr_encap_action =
9246                                 dev_flow->dv.encap_decap->action;
9247                 }
9248                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9249                         normal_idx++;
9250                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9251                                 dev_flow->handle->rix_port_id_action;
9252                         sample_act->dr_port_id_action =
9253                                 dev_flow->dv.port_id_action->action;
9254                 }
9255                 sample_act->actions_num = normal_idx;
9256                 /* update sample action resource into first index of array */
9257                 mdest_res->ft_type = res->ft_type;
9258                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9259                                 sizeof(struct mlx5_flow_sub_actions_idx));
9260                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9261                                 sizeof(struct mlx5_flow_sub_actions_list));
9262                 mdest_res->num_of_dest = num_of_dest;
9263                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9264                                                          dev_flow, error))
9265                         return rte_flow_error_set(error, EINVAL,
9266                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9267                                                   NULL, "can't create sample "
9268                                                   "action");
9269         } else {
9270                 res->sub_actions = sample_actions;
9271                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
9272                         return rte_flow_error_set(error, EINVAL,
9273                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9274                                                   NULL,
9275                                                   "can't create sample action");
9276         }
9277         return 0;
9278 }
9279
9280 /**
9281  * Fill the flow with DV spec, lock free
9282  * (mutex should be acquired by caller).
9283  *
9284  * @param[in] dev
9285  *   Pointer to rte_eth_dev structure.
9286  * @param[in, out] dev_flow
9287  *   Pointer to the sub flow.
9288  * @param[in] attr
9289  *   Pointer to the flow attributes.
9290  * @param[in] items
9291  *   Pointer to the list of items.
9292  * @param[in] actions
9293  *   Pointer to the list of actions.
9294  * @param[out] error
9295  *   Pointer to the error structure.
9296  *
9297  * @return
9298  *   0 on success, a negative errno value otherwise and rte_errno is set.
9299  */
9300 static int
9301 __flow_dv_translate(struct rte_eth_dev *dev,
9302                     struct mlx5_flow *dev_flow,
9303                     const struct rte_flow_attr *attr,
9304                     const struct rte_flow_item items[],
9305                     const struct rte_flow_action actions[],
9306                     struct rte_flow_error *error)
9307 {
9308         struct mlx5_priv *priv = dev->data->dev_private;
9309         struct mlx5_dev_config *dev_conf = &priv->config;
9310         struct rte_flow *flow = dev_flow->flow;
9311         struct mlx5_flow_handle *handle = dev_flow->handle;
9312         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9313         struct mlx5_flow_rss_desc *rss_desc;
9314         uint64_t item_flags = 0;
9315         uint64_t last_item = 0;
9316         uint64_t action_flags = 0;
9317         uint64_t priority = attr->priority;
9318         struct mlx5_flow_dv_matcher matcher = {
9319                 .mask = {
9320                         .size = sizeof(matcher.mask.buf) -
9321                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9322                 },
9323         };
9324         int actions_n = 0;
9325         bool actions_end = false;
9326         union {
9327                 struct mlx5_flow_dv_modify_hdr_resource res;
9328                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9329                             sizeof(struct mlx5_modification_cmd) *
9330                             (MLX5_MAX_MODIFY_NUM + 1)];
9331         } mhdr_dummy;
9332         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9333         const struct rte_flow_action_count *count = NULL;
9334         const struct rte_flow_action_age *age = NULL;
9335         union flow_dv_attr flow_attr = { .attr = 0 };
9336         uint32_t tag_be;
9337         union mlx5_flow_tbl_key tbl_key;
9338         uint32_t modify_action_position = UINT32_MAX;
9339         void *match_mask = matcher.mask.buf;
9340         void *match_value = dev_flow->dv.value.buf;
9341         uint8_t next_protocol = 0xff;
9342         struct rte_vlan_hdr vlan = { 0 };
9343         struct mlx5_flow_dv_dest_array_resource mdest_res;
9344         struct mlx5_flow_dv_sample_resource sample_res;
9345         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9346         struct mlx5_flow_sub_actions_list *sample_act;
9347         uint32_t sample_act_pos = UINT32_MAX;
9348         uint32_t num_of_dest = 0;
9349         int tmp_actions_n = 0;
9350         uint32_t table;
9351         int ret = 0;
9352         const struct mlx5_flow_tunnel *tunnel;
9353         struct flow_grp_info grp_info = {
9354                 .external = !!dev_flow->external,
9355                 .transfer = !!attr->transfer,
9356                 .fdb_def_rule = !!priv->fdb_def_rule,
9357         };
9358
9359         MLX5_ASSERT(wks);
9360         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9361         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9362         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9363         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9364                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9365         /* update normal path action resource into last index of array */
9366         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9367         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9368                  flow_items_to_tunnel(items) :
9369                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9370                  flow_actions_to_tunnel(actions) :
9371                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
9372         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9373                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9374         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9375                                 (dev, tunnel, attr, items, actions);
9376         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9377                                        grp_info, error);
9378         if (ret)
9379                 return ret;
9380         dev_flow->dv.group = table;
9381         if (attr->transfer)
9382                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9383         if (priority == MLX5_FLOW_PRIO_RSVD)
9384                 priority = dev_conf->flow_prio - 1;
9385         /* number of actions must be set to 0 in case of dirty stack. */
9386         mhdr_res->actions_num = 0;
9387         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9388                 /*
9389                  * do not add decap action if match rule drops packet
9390                  * HW rejects rules with decap & drop
9391                  */
9392                 bool add_decap = true;
9393                 const struct rte_flow_action *ptr = actions;
9394                 struct mlx5_flow_tbl_resource *tbl;
9395
9396                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9397                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9398                                 add_decap = false;
9399                                 break;
9400                         }
9401                 }
9402                 if (add_decap) {
9403                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9404                                                            attr->transfer,
9405                                                            error))
9406                                 return -rte_errno;
9407                         dev_flow->dv.actions[actions_n++] =
9408                                         dev_flow->dv.encap_decap->action;
9409                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9410                 }
9411                 /*
9412                  * bind table_id with <group, table> for tunnel match rule.
9413                  * Tunnel set rule establishes that bind in JUMP action handler.
9414                  * Required for scenario when application creates tunnel match
9415                  * rule before tunnel set rule.
9416                  */
9417                 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9418                                                attr->transfer,
9419                                                !!dev_flow->external, tunnel,
9420                                                attr->group, 0, error);
9421                 if (!tbl)
9422                         return rte_flow_error_set
9423                                (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
9424                                actions, "cannot register tunnel group");
9425         }
9426         for (; !actions_end ; actions++) {
9427                 const struct rte_flow_action_queue *queue;
9428                 const struct rte_flow_action_rss *rss;
9429                 const struct rte_flow_action *action = actions;
9430                 const uint8_t *rss_key;
9431                 const struct rte_flow_action_meter *mtr;
9432                 struct mlx5_flow_tbl_resource *tbl;
9433                 uint32_t port_id = 0;
9434                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9435                 int action_type = actions->type;
9436                 const struct rte_flow_action *found_action = NULL;
9437                 struct mlx5_flow_meter *fm = NULL;
9438                 uint32_t jump_group = 0;
9439
9440                 if (!mlx5_flow_os_action_supported(action_type))
9441                         return rte_flow_error_set(error, ENOTSUP,
9442                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9443                                                   actions,
9444                                                   "action not supported");
9445                 switch (action_type) {
9446                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9447                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9448                         break;
9449                 case RTE_FLOW_ACTION_TYPE_VOID:
9450                         break;
9451                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9452                         if (flow_dv_translate_action_port_id(dev, action,
9453                                                              &port_id, error))
9454                                 return -rte_errno;
9455                         port_id_resource.port_id = port_id;
9456                         MLX5_ASSERT(!handle->rix_port_id_action);
9457                         if (flow_dv_port_id_action_resource_register
9458                             (dev, &port_id_resource, dev_flow, error))
9459                                 return -rte_errno;
9460                         dev_flow->dv.actions[actions_n++] =
9461                                         dev_flow->dv.port_id_action->action;
9462                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9463                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9464                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9465                         num_of_dest++;
9466                         break;
9467                 case RTE_FLOW_ACTION_TYPE_FLAG:
9468                         action_flags |= MLX5_FLOW_ACTION_FLAG;
9469                         dev_flow->handle->mark = 1;
9470                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9471                                 struct rte_flow_action_mark mark = {
9472                                         .id = MLX5_FLOW_MARK_DEFAULT,
9473                                 };
9474
9475                                 if (flow_dv_convert_action_mark(dev, &mark,
9476                                                                 mhdr_res,
9477                                                                 error))
9478                                         return -rte_errno;
9479                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9480                                 break;
9481                         }
9482                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9483                         /*
9484                          * Only one FLAG or MARK is supported per device flow
9485                          * right now. So the pointer to the tag resource must be
9486                          * zero before the register process.
9487                          */
9488                         MLX5_ASSERT(!handle->dvh.rix_tag);
9489                         if (flow_dv_tag_resource_register(dev, tag_be,
9490                                                           dev_flow, error))
9491                                 return -rte_errno;
9492                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9493                         dev_flow->dv.actions[actions_n++] =
9494                                         dev_flow->dv.tag_resource->action;
9495                         break;
9496                 case RTE_FLOW_ACTION_TYPE_MARK:
9497                         action_flags |= MLX5_FLOW_ACTION_MARK;
9498                         dev_flow->handle->mark = 1;
9499                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9500                                 const struct rte_flow_action_mark *mark =
9501                                         (const struct rte_flow_action_mark *)
9502                                                 actions->conf;
9503
9504                                 if (flow_dv_convert_action_mark(dev, mark,
9505                                                                 mhdr_res,
9506                                                                 error))
9507                                         return -rte_errno;
9508                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9509                                 break;
9510                         }
9511                         /* Fall-through */
9512                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9513                         /* Legacy (non-extensive) MARK action. */
9514                         tag_be = mlx5_flow_mark_set
9515                               (((const struct rte_flow_action_mark *)
9516                                (actions->conf))->id);
9517                         MLX5_ASSERT(!handle->dvh.rix_tag);
9518                         if (flow_dv_tag_resource_register(dev, tag_be,
9519                                                           dev_flow, error))
9520                                 return -rte_errno;
9521                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9522                         dev_flow->dv.actions[actions_n++] =
9523                                         dev_flow->dv.tag_resource->action;
9524                         break;
9525                 case RTE_FLOW_ACTION_TYPE_SET_META:
9526                         if (flow_dv_convert_action_set_meta
9527                                 (dev, mhdr_res, attr,
9528                                  (const struct rte_flow_action_set_meta *)
9529                                   actions->conf, error))
9530                                 return -rte_errno;
9531                         action_flags |= MLX5_FLOW_ACTION_SET_META;
9532                         break;
9533                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9534                         if (flow_dv_convert_action_set_tag
9535                                 (dev, mhdr_res,
9536                                  (const struct rte_flow_action_set_tag *)
9537                                   actions->conf, error))
9538                                 return -rte_errno;
9539                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9540                         break;
9541                 case RTE_FLOW_ACTION_TYPE_DROP:
9542                         action_flags |= MLX5_FLOW_ACTION_DROP;
9543                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9544                         break;
9545                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9546                         queue = actions->conf;
9547                         rss_desc->queue_num = 1;
9548                         rss_desc->queue[0] = queue->index;
9549                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9550                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9551                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9552                         num_of_dest++;
9553                         break;
9554                 case RTE_FLOW_ACTION_TYPE_RSS:
9555                         rss = actions->conf;
9556                         memcpy(rss_desc->queue, rss->queue,
9557                                rss->queue_num * sizeof(uint16_t));
9558                         rss_desc->queue_num = rss->queue_num;
9559                         /* NULL RSS key indicates default RSS key. */
9560                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9561                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9562                         /*
9563                          * rss->level and rss.types should be set in advance
9564                          * when expanding items for RSS.
9565                          */
9566                         action_flags |= MLX5_FLOW_ACTION_RSS;
9567                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9568                         break;
9569                 case RTE_FLOW_ACTION_TYPE_AGE:
9570                 case RTE_FLOW_ACTION_TYPE_COUNT:
9571                         if (!dev_conf->devx) {
9572                                 return rte_flow_error_set
9573                                               (error, ENOTSUP,
9574                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9575                                                NULL,
9576                                                "count action not supported");
9577                         }
9578                         /* Save information first, will apply later. */
9579                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9580                                 count = action->conf;
9581                         else
9582                                 age = action->conf;
9583                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9584                         break;
9585                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9586                         dev_flow->dv.actions[actions_n++] =
9587                                                 priv->sh->pop_vlan_action;
9588                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9589                         break;
9590                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9591                         if (!(action_flags &
9592                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9593                                 flow_dev_get_vlan_info_from_items(items, &vlan);
9594                         vlan.eth_proto = rte_be_to_cpu_16
9595                              ((((const struct rte_flow_action_of_push_vlan *)
9596                                                    actions->conf)->ethertype));
9597                         found_action = mlx5_flow_find_action
9598                                         (actions + 1,
9599                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9600                         if (found_action)
9601                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9602                         found_action = mlx5_flow_find_action
9603                                         (actions + 1,
9604                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9605                         if (found_action)
9606                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9607                         if (flow_dv_create_action_push_vlan
9608                                             (dev, attr, &vlan, dev_flow, error))
9609                                 return -rte_errno;
9610                         dev_flow->dv.actions[actions_n++] =
9611                                         dev_flow->dv.push_vlan_res->action;
9612                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9613                         break;
9614                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9615                         /* of_vlan_push action handled this action */
9616                         MLX5_ASSERT(action_flags &
9617                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9618                         break;
9619                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9620                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9621                                 break;
9622                         flow_dev_get_vlan_info_from_items(items, &vlan);
9623                         mlx5_update_vlan_vid_pcp(actions, &vlan);
9624                         /* If no VLAN push - this is a modify header action */
9625                         if (flow_dv_convert_action_modify_vlan_vid
9626                                                 (mhdr_res, actions, error))
9627                                 return -rte_errno;
9628                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9629                         break;
9630                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9631                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9632                         if (flow_dv_create_action_l2_encap(dev, actions,
9633                                                            dev_flow,
9634                                                            attr->transfer,
9635                                                            error))
9636                                 return -rte_errno;
9637                         dev_flow->dv.actions[actions_n++] =
9638                                         dev_flow->dv.encap_decap->action;
9639                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9640                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9641                                 sample_act->action_flags |=
9642                                                         MLX5_FLOW_ACTION_ENCAP;
9643                         break;
9644                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9645                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9646                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9647                                                            attr->transfer,
9648                                                            error))
9649                                 return -rte_errno;
9650                         dev_flow->dv.actions[actions_n++] =
9651                                         dev_flow->dv.encap_decap->action;
9652                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9653                         break;
9654                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9655                         /* Handle encap with preceding decap. */
9656                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9657                                 if (flow_dv_create_action_raw_encap
9658                                         (dev, actions, dev_flow, attr, error))
9659                                         return -rte_errno;
9660                                 dev_flow->dv.actions[actions_n++] =
9661                                         dev_flow->dv.encap_decap->action;
9662                         } else {
9663                                 /* Handle encap without preceding decap. */
9664                                 if (flow_dv_create_action_l2_encap
9665                                     (dev, actions, dev_flow, attr->transfer,
9666                                      error))
9667                                         return -rte_errno;
9668                                 dev_flow->dv.actions[actions_n++] =
9669                                         dev_flow->dv.encap_decap->action;
9670                         }
9671                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9672                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9673                                 sample_act->action_flags |=
9674                                                         MLX5_FLOW_ACTION_ENCAP;
9675                         break;
9676                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
9677                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
9678                                 ;
9679                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
9680                                 if (flow_dv_create_action_l2_decap
9681                                     (dev, dev_flow, attr->transfer, error))
9682                                         return -rte_errno;
9683                                 dev_flow->dv.actions[actions_n++] =
9684                                         dev_flow->dv.encap_decap->action;
9685                         }
9686                         /* If decap is followed by encap, handle it at encap. */
9687                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9688                         break;
9689                 case RTE_FLOW_ACTION_TYPE_JUMP:
9690                         jump_group = ((const struct rte_flow_action_jump *)
9691                                                         action->conf)->group;
9692                         grp_info.std_tbl_fix = 0;
9693                         ret = mlx5_flow_group_to_table(dev, tunnel,
9694                                                        jump_group,
9695                                                        &table,
9696                                                        grp_info, error);
9697                         if (ret)
9698                                 return ret;
9699                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9700                                                        attr->transfer,
9701                                                        !!dev_flow->external,
9702                                                        tunnel, jump_group, 0,
9703                                                        error);
9704                         if (!tbl)
9705                                 return rte_flow_error_set
9706                                                 (error, errno,
9707                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9708                                                  NULL,
9709                                                  "cannot create jump action.");
9710                         if (flow_dv_jump_tbl_resource_register
9711                             (dev, tbl, dev_flow, error)) {
9712                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9713                                 return rte_flow_error_set
9714                                                 (error, errno,
9715                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9716                                                  NULL,
9717                                                  "cannot create jump action.");
9718                         }
9719                         dev_flow->dv.actions[actions_n++] =
9720                                         dev_flow->dv.jump->action;
9721                         action_flags |= MLX5_FLOW_ACTION_JUMP;
9722                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
9723                         break;
9724                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
9725                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
9726                         if (flow_dv_convert_action_modify_mac
9727                                         (mhdr_res, actions, error))
9728                                 return -rte_errno;
9729                         action_flags |= actions->type ==
9730                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
9731                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
9732                                         MLX5_FLOW_ACTION_SET_MAC_DST;
9733                         break;
9734                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
9735                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
9736                         if (flow_dv_convert_action_modify_ipv4
9737                                         (mhdr_res, actions, error))
9738                                 return -rte_errno;
9739                         action_flags |= actions->type ==
9740                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
9741                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
9742                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
9743                         break;
9744                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
9745                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
9746                         if (flow_dv_convert_action_modify_ipv6
9747                                         (mhdr_res, actions, error))
9748                                 return -rte_errno;
9749                         action_flags |= actions->type ==
9750                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
9751                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
9752                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
9753                         break;
9754                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
9755                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
9756                         if (flow_dv_convert_action_modify_tp
9757                                         (mhdr_res, actions, items,
9758                                          &flow_attr, dev_flow, !!(action_flags &
9759                                          MLX5_FLOW_ACTION_DECAP), error))
9760                                 return -rte_errno;
9761                         action_flags |= actions->type ==
9762                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
9763                                         MLX5_FLOW_ACTION_SET_TP_SRC :
9764                                         MLX5_FLOW_ACTION_SET_TP_DST;
9765                         break;
9766                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
9767                         if (flow_dv_convert_action_modify_dec_ttl
9768                                         (mhdr_res, items, &flow_attr, dev_flow,
9769                                          !!(action_flags &
9770                                          MLX5_FLOW_ACTION_DECAP), error))
9771                                 return -rte_errno;
9772                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
9773                         break;
9774                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
9775                         if (flow_dv_convert_action_modify_ttl
9776                                         (mhdr_res, actions, items, &flow_attr,
9777                                          dev_flow, !!(action_flags &
9778                                          MLX5_FLOW_ACTION_DECAP), error))
9779                                 return -rte_errno;
9780                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
9781                         break;
9782                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
9783                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
9784                         if (flow_dv_convert_action_modify_tcp_seq
9785                                         (mhdr_res, actions, error))
9786                                 return -rte_errno;
9787                         action_flags |= actions->type ==
9788                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
9789                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
9790                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
9791                         break;
9792
9793                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
9794                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
9795                         if (flow_dv_convert_action_modify_tcp_ack
9796                                         (mhdr_res, actions, error))
9797                                 return -rte_errno;
9798                         action_flags |= actions->type ==
9799                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
9800                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
9801                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
9802                         break;
9803                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
9804                         if (flow_dv_convert_action_set_reg
9805                                         (mhdr_res, actions, error))
9806                                 return -rte_errno;
9807                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9808                         break;
9809                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
9810                         if (flow_dv_convert_action_copy_mreg
9811                                         (dev, mhdr_res, actions, error))
9812                                 return -rte_errno;
9813                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9814                         break;
9815                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
9816                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
9817                         dev_flow->handle->fate_action =
9818                                         MLX5_FLOW_FATE_DEFAULT_MISS;
9819                         break;
9820                 case RTE_FLOW_ACTION_TYPE_METER:
9821                         mtr = actions->conf;
9822                         if (!flow->meter) {
9823                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
9824                                                             attr, error);
9825                                 if (!fm)
9826                                         return rte_flow_error_set(error,
9827                                                 rte_errno,
9828                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9829                                                 NULL,
9830                                                 "meter not found "
9831                                                 "or invalid parameters");
9832                                 flow->meter = fm->idx;
9833                         }
9834                         /* Set the meter action. */
9835                         if (!fm) {
9836                                 fm = mlx5_ipool_get(priv->sh->ipool
9837                                                 [MLX5_IPOOL_MTR], flow->meter);
9838                                 if (!fm)
9839                                         return rte_flow_error_set(error,
9840                                                 rte_errno,
9841                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9842                                                 NULL,
9843                                                 "meter not found "
9844                                                 "or invalid parameters");
9845                         }
9846                         dev_flow->dv.actions[actions_n++] =
9847                                 fm->mfts->meter_action;
9848                         action_flags |= MLX5_FLOW_ACTION_METER;
9849                         break;
9850                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
9851                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
9852                                                               actions, error))
9853                                 return -rte_errno;
9854                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
9855                         break;
9856                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
9857                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
9858                                                               actions, error))
9859                                 return -rte_errno;
9860                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
9861                         break;
9862                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
9863                         sample_act_pos = actions_n;
9864                         ret = flow_dv_translate_action_sample(dev,
9865                                                               actions,
9866                                                               dev_flow, attr,
9867                                                               &num_of_dest,
9868                                                               sample_actions,
9869                                                               &sample_res,
9870                                                               error);
9871                         if (ret < 0)
9872                                 return ret;
9873                         actions_n++;
9874                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
9875                         /* put encap action into group if work with port id */
9876                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
9877                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
9878                                 sample_act->action_flags |=
9879                                                         MLX5_FLOW_ACTION_ENCAP;
9880                         break;
9881                 case RTE_FLOW_ACTION_TYPE_END:
9882                         actions_end = true;
9883                         if (mhdr_res->actions_num) {
9884                                 /* create modify action if needed. */
9885                                 if (flow_dv_modify_hdr_resource_register
9886                                         (dev, mhdr_res, dev_flow, error))
9887                                         return -rte_errno;
9888                                 dev_flow->dv.actions[modify_action_position] =
9889                                         handle->dvh.modify_hdr->action;
9890                         }
9891                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
9892                                 flow->counter =
9893                                         flow_dv_translate_create_counter(dev,
9894                                                 dev_flow, count, age);
9895
9896                                 if (!flow->counter)
9897                                         return rte_flow_error_set
9898                                                 (error, rte_errno,
9899                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9900                                                 NULL,
9901                                                 "cannot create counter"
9902                                                 " object.");
9903                                 dev_flow->dv.actions[actions_n] =
9904                                           (flow_dv_counter_get_by_idx(dev,
9905                                           flow->counter, NULL))->action;
9906                                 actions_n++;
9907                         }
9908                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
9909                                 ret = flow_dv_create_action_sample(dev,
9910                                                           dev_flow,
9911                                                           num_of_dest,
9912                                                           &sample_res,
9913                                                           &mdest_res,
9914                                                           sample_actions,
9915                                                           action_flags,
9916                                                           error);
9917                                 if (ret < 0)
9918                                         return rte_flow_error_set
9919                                                 (error, rte_errno,
9920                                                 RTE_FLOW_ERROR_TYPE_ACTION,
9921                                                 NULL,
9922                                                 "cannot create sample action");
9923                                 if (num_of_dest > 1) {
9924                                         dev_flow->dv.actions[sample_act_pos] =
9925                                         dev_flow->dv.dest_array_res->action;
9926                                 } else {
9927                                         dev_flow->dv.actions[sample_act_pos] =
9928                                         dev_flow->dv.sample_res->verbs_action;
9929                                 }
9930                         }
9931                         break;
9932                 default:
9933                         break;
9934                 }
9935                 if (mhdr_res->actions_num &&
9936                     modify_action_position == UINT32_MAX)
9937                         modify_action_position = actions_n++;
9938         }
9939         /*
9940          * For multiple destination (sample action with ratio=1), the encap
9941          * action and port id action will be combined into group action.
9942          * So need remove the original these actions in the flow and only
9943          * use the sample action instead of.
9944          */
9945         if (num_of_dest > 1 && sample_act->dr_port_id_action) {
9946                 int i;
9947                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9948
9949                 for (i = 0; i < actions_n; i++) {
9950                         if ((sample_act->dr_encap_action &&
9951                                 sample_act->dr_encap_action ==
9952                                 dev_flow->dv.actions[i]) ||
9953                                 (sample_act->dr_port_id_action &&
9954                                 sample_act->dr_port_id_action ==
9955                                 dev_flow->dv.actions[i]))
9956                                 continue;
9957                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
9958                 }
9959                 memcpy((void *)dev_flow->dv.actions,
9960                                 (void *)temp_actions,
9961                                 tmp_actions_n * sizeof(void *));
9962                 actions_n = tmp_actions_n;
9963         }
9964         dev_flow->dv.actions_n = actions_n;
9965         dev_flow->act_flags = action_flags;
9966         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
9967                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
9968                 int item_type = items->type;
9969
9970                 if (!mlx5_flow_os_item_supported(item_type))
9971                         return rte_flow_error_set(error, ENOTSUP,
9972                                                   RTE_FLOW_ERROR_TYPE_ITEM,
9973                                                   NULL, "item not supported");
9974                 switch (item_type) {
9975                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
9976                         flow_dv_translate_item_port_id(dev, match_mask,
9977                                                        match_value, items);
9978                         last_item = MLX5_FLOW_ITEM_PORT_ID;
9979                         break;
9980                 case RTE_FLOW_ITEM_TYPE_ETH:
9981                         flow_dv_translate_item_eth(match_mask, match_value,
9982                                                    items, tunnel,
9983                                                    dev_flow->dv.group);
9984                         matcher.priority = action_flags &
9985                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
9986                                         !dev_flow->external ?
9987                                         MLX5_PRIORITY_MAP_L3 :
9988                                         MLX5_PRIORITY_MAP_L2;
9989                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
9990                                              MLX5_FLOW_LAYER_OUTER_L2;
9991                         break;
9992                 case RTE_FLOW_ITEM_TYPE_VLAN:
9993                         flow_dv_translate_item_vlan(dev_flow,
9994                                                     match_mask, match_value,
9995                                                     items, tunnel,
9996                                                     dev_flow->dv.group);
9997                         matcher.priority = MLX5_PRIORITY_MAP_L2;
9998                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
9999                                               MLX5_FLOW_LAYER_INNER_VLAN) :
10000                                              (MLX5_FLOW_LAYER_OUTER_L2 |
10001                                               MLX5_FLOW_LAYER_OUTER_VLAN);
10002                         break;
10003                 case RTE_FLOW_ITEM_TYPE_IPV4:
10004                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10005                                                   &item_flags, &tunnel);
10006                         flow_dv_translate_item_ipv4(match_mask, match_value,
10007                                                     items, tunnel,
10008                                                     dev_flow->dv.group);
10009                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10010                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
10011                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
10012                         if (items->mask != NULL &&
10013                             ((const struct rte_flow_item_ipv4 *)
10014                              items->mask)->hdr.next_proto_id) {
10015                                 next_protocol =
10016                                         ((const struct rte_flow_item_ipv4 *)
10017                                          (items->spec))->hdr.next_proto_id;
10018                                 next_protocol &=
10019                                         ((const struct rte_flow_item_ipv4 *)
10020                                          (items->mask))->hdr.next_proto_id;
10021                         } else {
10022                                 /* Reset for inner layer. */
10023                                 next_protocol = 0xff;
10024                         }
10025                         break;
10026                 case RTE_FLOW_ITEM_TYPE_IPV6:
10027                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10028                                                   &item_flags, &tunnel);
10029                         flow_dv_translate_item_ipv6(match_mask, match_value,
10030                                                     items, tunnel,
10031                                                     dev_flow->dv.group);
10032                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10033                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10034                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10035                         if (items->mask != NULL &&
10036                             ((const struct rte_flow_item_ipv6 *)
10037                              items->mask)->hdr.proto) {
10038                                 next_protocol =
10039                                         ((const struct rte_flow_item_ipv6 *)
10040                                          items->spec)->hdr.proto;
10041                                 next_protocol &=
10042                                         ((const struct rte_flow_item_ipv6 *)
10043                                          items->mask)->hdr.proto;
10044                         } else {
10045                                 /* Reset for inner layer. */
10046                                 next_protocol = 0xff;
10047                         }
10048                         break;
10049                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10050                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
10051                                                              match_value,
10052                                                              items, tunnel);
10053                         last_item = tunnel ?
10054                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10055                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10056                         if (items->mask != NULL &&
10057                             ((const struct rte_flow_item_ipv6_frag_ext *)
10058                              items->mask)->hdr.next_header) {
10059                                 next_protocol =
10060                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10061                                  items->spec)->hdr.next_header;
10062                                 next_protocol &=
10063                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10064                                  items->mask)->hdr.next_header;
10065                         } else {
10066                                 /* Reset for inner layer. */
10067                                 next_protocol = 0xff;
10068                         }
10069                         break;
10070                 case RTE_FLOW_ITEM_TYPE_TCP:
10071                         flow_dv_translate_item_tcp(match_mask, match_value,
10072                                                    items, tunnel);
10073                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10074                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10075                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10076                         break;
10077                 case RTE_FLOW_ITEM_TYPE_UDP:
10078                         flow_dv_translate_item_udp(match_mask, match_value,
10079                                                    items, tunnel);
10080                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10081                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10082                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10083                         break;
10084                 case RTE_FLOW_ITEM_TYPE_GRE:
10085                         flow_dv_translate_item_gre(match_mask, match_value,
10086                                                    items, tunnel);
10087                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10088                         last_item = MLX5_FLOW_LAYER_GRE;
10089                         break;
10090                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10091                         flow_dv_translate_item_gre_key(match_mask,
10092                                                        match_value, items);
10093                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10094                         break;
10095                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10096                         flow_dv_translate_item_nvgre(match_mask, match_value,
10097                                                      items, tunnel);
10098                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10099                         last_item = MLX5_FLOW_LAYER_GRE;
10100                         break;
10101                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10102                         flow_dv_translate_item_vxlan(match_mask, match_value,
10103                                                      items, tunnel);
10104                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10105                         last_item = MLX5_FLOW_LAYER_VXLAN;
10106                         break;
10107                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10108                         flow_dv_translate_item_vxlan_gpe(match_mask,
10109                                                          match_value, items,
10110                                                          tunnel);
10111                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10112                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10113                         break;
10114                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10115                         flow_dv_translate_item_geneve(match_mask, match_value,
10116                                                       items, tunnel);
10117                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10118                         last_item = MLX5_FLOW_LAYER_GENEVE;
10119                         break;
10120                 case RTE_FLOW_ITEM_TYPE_MPLS:
10121                         flow_dv_translate_item_mpls(match_mask, match_value,
10122                                                     items, last_item, tunnel);
10123                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10124                         last_item = MLX5_FLOW_LAYER_MPLS;
10125                         break;
10126                 case RTE_FLOW_ITEM_TYPE_MARK:
10127                         flow_dv_translate_item_mark(dev, match_mask,
10128                                                     match_value, items);
10129                         last_item = MLX5_FLOW_ITEM_MARK;
10130                         break;
10131                 case RTE_FLOW_ITEM_TYPE_META:
10132                         flow_dv_translate_item_meta(dev, match_mask,
10133                                                     match_value, attr, items);
10134                         last_item = MLX5_FLOW_ITEM_METADATA;
10135                         break;
10136                 case RTE_FLOW_ITEM_TYPE_ICMP:
10137                         flow_dv_translate_item_icmp(match_mask, match_value,
10138                                                     items, tunnel);
10139                         last_item = MLX5_FLOW_LAYER_ICMP;
10140                         break;
10141                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10142                         flow_dv_translate_item_icmp6(match_mask, match_value,
10143                                                       items, tunnel);
10144                         last_item = MLX5_FLOW_LAYER_ICMP6;
10145                         break;
10146                 case RTE_FLOW_ITEM_TYPE_TAG:
10147                         flow_dv_translate_item_tag(dev, match_mask,
10148                                                    match_value, items);
10149                         last_item = MLX5_FLOW_ITEM_TAG;
10150                         break;
10151                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10152                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10153                                                         match_value, items);
10154                         last_item = MLX5_FLOW_ITEM_TAG;
10155                         break;
10156                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10157                         flow_dv_translate_item_tx_queue(dev, match_mask,
10158                                                         match_value,
10159                                                         items);
10160                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10161                         break;
10162                 case RTE_FLOW_ITEM_TYPE_GTP:
10163                         flow_dv_translate_item_gtp(match_mask, match_value,
10164                                                    items, tunnel);
10165                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10166                         last_item = MLX5_FLOW_LAYER_GTP;
10167                         break;
10168                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10169                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10170                                 /* Create it only the first time to be used. */
10171                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10172                                 if (ret)
10173                                         return rte_flow_error_set
10174                                                 (error, -ret,
10175                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10176                                                 NULL,
10177                                                 "cannot create eCPRI parser");
10178                         }
10179                         /* Adjust the length matcher and device flow value. */
10180                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10181                         dev_flow->dv.value.size =
10182                                         MLX5_ST_SZ_BYTES(fte_match_param);
10183                         flow_dv_translate_item_ecpri(dev, match_mask,
10184                                                      match_value, items);
10185                         /* No other protocol should follow eCPRI layer. */
10186                         last_item = MLX5_FLOW_LAYER_ECPRI;
10187                         break;
10188                 default:
10189                         break;
10190                 }
10191                 item_flags |= last_item;
10192         }
10193         /*
10194          * When E-Switch mode is enabled, we have two cases where we need to
10195          * set the source port manually.
10196          * The first one, is in case of Nic steering rule, and the second is
10197          * E-Switch rule where no port_id item was found. In both cases
10198          * the source port is set according the current port in use.
10199          */
10200         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10201             (priv->representor || priv->master)) {
10202                 if (flow_dv_translate_item_port_id(dev, match_mask,
10203                                                    match_value, NULL))
10204                         return -rte_errno;
10205         }
10206 #ifdef RTE_LIBRTE_MLX5_DEBUG
10207         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10208                                               dev_flow->dv.value.buf));
10209 #endif
10210         /*
10211          * Layers may be already initialized from prefix flow if this dev_flow
10212          * is the suffix flow.
10213          */
10214         handle->layers |= item_flags;
10215         if (action_flags & MLX5_FLOW_ACTION_RSS)
10216                 flow_dv_hashfields_set(dev_flow, rss_desc);
10217         /* Register matcher. */
10218         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10219                                     matcher.mask.size);
10220         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10221                                                      matcher.priority);
10222         /* reserved field no needs to be set to 0 here. */
10223         tbl_key.domain = attr->transfer;
10224         tbl_key.direction = attr->egress;
10225         tbl_key.table_id = dev_flow->dv.group;
10226         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
10227                 return -rte_errno;
10228         return 0;
10229 }
10230
10231 /**
10232  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10233  * and tunnel.
10234  *
10235  * @param[in, out] action
10236  *   Shred RSS action holding hash RX queue objects.
10237  * @param[in] hash_fields
10238  *   Defines combination of packet fields to participate in RX hash.
10239  * @param[in] tunnel
10240  *   Tunnel type
10241  * @param[in] hrxq_idx
10242  *   Hash RX queue index to set.
10243  *
10244  * @return
10245  *   0 on success, otherwise negative errno value.
10246  */
10247 static int
10248 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10249                               const uint64_t hash_fields,
10250                               const int tunnel,
10251                               uint32_t hrxq_idx)
10252 {
10253         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10254
10255         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10256         case MLX5_RSS_HASH_IPV4:
10257                 hrxqs[0] = hrxq_idx;
10258                 return 0;
10259         case MLX5_RSS_HASH_IPV4_TCP:
10260                 hrxqs[1] = hrxq_idx;
10261                 return 0;
10262         case MLX5_RSS_HASH_IPV4_UDP:
10263                 hrxqs[2] = hrxq_idx;
10264                 return 0;
10265         case MLX5_RSS_HASH_IPV6:
10266                 hrxqs[3] = hrxq_idx;
10267                 return 0;
10268         case MLX5_RSS_HASH_IPV6_TCP:
10269                 hrxqs[4] = hrxq_idx;
10270                 return 0;
10271         case MLX5_RSS_HASH_IPV6_UDP:
10272                 hrxqs[5] = hrxq_idx;
10273                 return 0;
10274         case MLX5_RSS_HASH_NONE:
10275                 hrxqs[6] = hrxq_idx;
10276                 return 0;
10277         default:
10278                 return -1;
10279         }
10280 }
10281
10282 /**
10283  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10284  * and tunnel.
10285  *
10286  * @param[in] action
10287  *   Shred RSS action holding hash RX queue objects.
10288  * @param[in] hash_fields
10289  *   Defines combination of packet fields to participate in RX hash.
10290  * @param[in] tunnel
10291  *   Tunnel type
10292  *
10293  * @return
10294  *   Valid hash RX queue index, otherwise 0.
10295  */
10296 static uint32_t
10297 __flow_dv_action_rss_hrxq_lookup(const struct mlx5_shared_action_rss *action,
10298                                  const uint64_t hash_fields,
10299                                  const int tunnel)
10300 {
10301         const uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10302
10303         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10304         case MLX5_RSS_HASH_IPV4:
10305                 return hrxqs[0];
10306         case MLX5_RSS_HASH_IPV4_TCP:
10307                 return hrxqs[1];
10308         case MLX5_RSS_HASH_IPV4_UDP:
10309                 return hrxqs[2];
10310         case MLX5_RSS_HASH_IPV6:
10311                 return hrxqs[3];
10312         case MLX5_RSS_HASH_IPV6_TCP:
10313                 return hrxqs[4];
10314         case MLX5_RSS_HASH_IPV6_UDP:
10315                 return hrxqs[5];
10316         case MLX5_RSS_HASH_NONE:
10317                 return hrxqs[6];
10318         default:
10319                 return 0;
10320         }
10321 }
10322
10323 /**
10324  * Retrieves hash RX queue suitable for the *flow*.
10325  * If shared action configured for *flow* suitable hash RX queue will be
10326  * retrieved from attached shared action.
10327  *
10328  * @param[in] flow
10329  *   Shred RSS action holding hash RX queue objects.
10330  * @param[in] dev_flow
10331  *   Pointer to the sub flow.
10332  * @param[out] hrxq
10333  *   Pointer to retrieved hash RX queue object.
10334  *
10335  * @return
10336  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
10337  */
10338 static uint32_t
10339 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
10340                            struct mlx5_flow *dev_flow,
10341                            struct mlx5_hrxq **hrxq)
10342 {
10343         struct mlx5_priv *priv = dev->data->dev_private;
10344         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10345         uint32_t hrxq_idx;
10346
10347         if (flow->shared_rss) {
10348                 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10349                                 (flow->shared_rss, dev_flow->hash_fields,
10350                                  !!(dev_flow->handle->layers &
10351                                     MLX5_FLOW_LAYER_TUNNEL));
10352                 if (hrxq_idx) {
10353                         *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10354                                                hrxq_idx);
10355                         __atomic_fetch_add(&(*hrxq)->refcnt, 1,
10356                                            __ATOMIC_RELAXED);
10357                 }
10358         } else {
10359                 struct mlx5_flow_rss_desc *rss_desc =
10360                                 &wks->rss_desc[!!wks->flow_nested_idx];
10361
10362                 *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
10363                                              &hrxq_idx);
10364         }
10365         return hrxq_idx;
10366 }
10367
10368 /**
10369  * Apply the flow to the NIC, lock free,
10370  * (mutex should be acquired by caller).
10371  *
10372  * @param[in] dev
10373  *   Pointer to the Ethernet device structure.
10374  * @param[in, out] flow
10375  *   Pointer to flow structure.
10376  * @param[out] error
10377  *   Pointer to error structure.
10378  *
10379  * @return
10380  *   0 on success, a negative errno value otherwise and rte_errno is set.
10381  */
10382 static int
10383 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10384                 struct rte_flow_error *error)
10385 {
10386         struct mlx5_flow_dv_workspace *dv;
10387         struct mlx5_flow_handle *dh;
10388         struct mlx5_flow_handle_dv *dv_h;
10389         struct mlx5_flow *dev_flow;
10390         struct mlx5_priv *priv = dev->data->dev_private;
10391         uint32_t handle_idx;
10392         int n;
10393         int err;
10394         int idx;
10395         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10396
10397         MLX5_ASSERT(wks);
10398         for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
10399                 dev_flow = &wks->flows[idx];
10400                 dv = &dev_flow->dv;
10401                 dh = dev_flow->handle;
10402                 dv_h = &dh->dvh;
10403                 n = dv->actions_n;
10404                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10405                         if (dv->transfer) {
10406                                 dv->actions[n++] = priv->sh->esw_drop_action;
10407                         } else {
10408                                 MLX5_ASSERT(priv->drop_queue.hrxq);
10409                                 dv->actions[n++] =
10410                                                 priv->drop_queue.hrxq->action;
10411                         }
10412                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10413                            !dv_h->rix_sample && !dv_h->rix_dest_array) {
10414                         struct mlx5_hrxq *hrxq = NULL;
10415                         uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10416                                                 (dev, flow, dev_flow, &hrxq);
10417                         if (!hrxq) {
10418                                 rte_flow_error_set
10419                                         (error, rte_errno,
10420                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10421                                          "cannot get hash queue");
10422                                 goto error;
10423                         }
10424                         dh->rix_hrxq = hrxq_idx;
10425                         dv->actions[n++] = hrxq->action;
10426                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10427                         if (!priv->sh->default_miss_action) {
10428                                 rte_flow_error_set
10429                                         (error, rte_errno,
10430                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10431                                          "default miss action not be created.");
10432                                 goto error;
10433                         }
10434                         dv->actions[n++] = priv->sh->default_miss_action;
10435                 }
10436                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10437                                                (void *)&dv->value, n,
10438                                                dv->actions, &dh->drv_flow);
10439                 if (err) {
10440                         rte_flow_error_set(error, errno,
10441                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10442                                            NULL,
10443                                            "hardware refuses to create flow");
10444                         goto error;
10445                 }
10446                 if (priv->vmwa_context &&
10447                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
10448                         /*
10449                          * The rule contains the VLAN pattern.
10450                          * For VF we are going to create VLAN
10451                          * interface to make hypervisor set correct
10452                          * e-Switch vport context.
10453                          */
10454                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10455                 }
10456         }
10457         return 0;
10458 error:
10459         err = rte_errno; /* Save rte_errno before cleanup. */
10460         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10461                        handle_idx, dh, next) {
10462                 /* hrxq is union, don't clear it if the flag is not set. */
10463                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10464                         mlx5_hrxq_release(dev, dh->rix_hrxq);
10465                         dh->rix_hrxq = 0;
10466                 }
10467                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10468                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10469         }
10470         rte_errno = err; /* Restore rte_errno. */
10471         return -rte_errno;
10472 }
10473
10474 void
10475 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
10476                           struct mlx5_cache_entry *entry)
10477 {
10478         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
10479                                                           entry);
10480
10481         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
10482         mlx5_free(cache);
10483 }
10484
10485 /**
10486  * Release the flow matcher.
10487  *
10488  * @param dev
10489  *   Pointer to Ethernet device.
10490  * @param handle
10491  *   Pointer to mlx5_flow_handle.
10492  *
10493  * @return
10494  *   1 while a reference on it exists, 0 when freed.
10495  */
10496 static int
10497 flow_dv_matcher_release(struct rte_eth_dev *dev,
10498                         struct mlx5_flow_handle *handle)
10499 {
10500         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10501         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
10502                                                             typeof(*tbl), tbl);
10503         int ret;
10504
10505         MLX5_ASSERT(matcher->matcher_object);
10506         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
10507         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
10508         return ret;
10509 }
10510
10511 /**
10512  * Release encap_decap resource.
10513  *
10514  * @param list
10515  *   Pointer to the hash list.
10516  * @param entry
10517  *   Pointer to exist resource entry object.
10518  */
10519 void
10520 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
10521                               struct mlx5_hlist_entry *entry)
10522 {
10523         struct mlx5_dev_ctx_shared *sh = list->ctx;
10524         struct mlx5_flow_dv_encap_decap_resource *res =
10525                 container_of(entry, typeof(*res), entry);
10526
10527         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10528         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
10529 }
10530
10531 /**
10532  * Release an encap/decap resource.
10533  *
10534  * @param dev
10535  *   Pointer to Ethernet device.
10536  * @param encap_decap_idx
10537  *   Index of encap decap resource.
10538  *
10539  * @return
10540  *   1 while a reference on it exists, 0 when freed.
10541  */
10542 static int
10543 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10544                                      uint32_t encap_decap_idx)
10545 {
10546         struct mlx5_priv *priv = dev->data->dev_private;
10547         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10548
10549         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10550                                         encap_decap_idx);
10551         if (!cache_resource)
10552                 return 0;
10553         MLX5_ASSERT(cache_resource->action);
10554         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
10555                                      &cache_resource->entry);
10556 }
10557
10558 /**
10559  * Release an jump to table action resource.
10560  *
10561  * @param dev
10562  *   Pointer to Ethernet device.
10563  * @param handle
10564  *   Pointer to mlx5_flow_handle.
10565  *
10566  * @return
10567  *   1 while a reference on it exists, 0 when freed.
10568  */
10569 static int
10570 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10571                                   struct mlx5_flow_handle *handle)
10572 {
10573         struct mlx5_priv *priv = dev->data->dev_private;
10574         struct mlx5_flow_tbl_data_entry *tbl_data;
10575
10576         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10577                              handle->rix_jump);
10578         if (!tbl_data)
10579                 return 0;
10580         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
10581 }
10582
10583 void
10584 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10585                          struct mlx5_hlist_entry *entry)
10586 {
10587         struct mlx5_flow_dv_modify_hdr_resource *res =
10588                 container_of(entry, typeof(*res), entry);
10589
10590         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10591         mlx5_free(entry);
10592 }
10593
10594 /**
10595  * Release a modify-header resource.
10596  *
10597  * @param dev
10598  *   Pointer to Ethernet device.
10599  * @param handle
10600  *   Pointer to mlx5_flow_handle.
10601  *
10602  * @return
10603  *   1 while a reference on it exists, 0 when freed.
10604  */
10605 static int
10606 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10607                                     struct mlx5_flow_handle *handle)
10608 {
10609         struct mlx5_priv *priv = dev->data->dev_private;
10610         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10611
10612         MLX5_ASSERT(entry->action);
10613         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10614 }
10615
10616 void
10617 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
10618                           struct mlx5_cache_entry *entry)
10619 {
10620         struct mlx5_dev_ctx_shared *sh = list->ctx;
10621         struct mlx5_flow_dv_port_id_action_resource *cache =
10622                         container_of(entry, typeof(*cache), entry);
10623
10624         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10625         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
10626 }
10627
10628 /**
10629  * Release port ID action resource.
10630  *
10631  * @param dev
10632  *   Pointer to Ethernet device.
10633  * @param handle
10634  *   Pointer to mlx5_flow_handle.
10635  *
10636  * @return
10637  *   1 while a reference on it exists, 0 when freed.
10638  */
10639 static int
10640 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10641                                         uint32_t port_id)
10642 {
10643         struct mlx5_priv *priv = dev->data->dev_private;
10644         struct mlx5_flow_dv_port_id_action_resource *cache;
10645
10646         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
10647         if (!cache)
10648                 return 0;
10649         MLX5_ASSERT(cache->action);
10650         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
10651                                      &cache->entry);
10652 }
10653
10654 void
10655 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
10656                             struct mlx5_cache_entry *entry)
10657 {
10658         struct mlx5_dev_ctx_shared *sh = list->ctx;
10659         struct mlx5_flow_dv_push_vlan_action_resource *cache =
10660                         container_of(entry, typeof(*cache), entry);
10661
10662         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10663         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
10664 }
10665
10666 /**
10667  * Release push vlan action resource.
10668  *
10669  * @param dev
10670  *   Pointer to Ethernet device.
10671  * @param handle
10672  *   Pointer to mlx5_flow_handle.
10673  *
10674  * @return
10675  *   1 while a reference on it exists, 0 when freed.
10676  */
10677 static int
10678 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
10679                                           struct mlx5_flow_handle *handle)
10680 {
10681         struct mlx5_priv *priv = dev->data->dev_private;
10682         struct mlx5_flow_dv_push_vlan_action_resource *cache;
10683         uint32_t idx = handle->dvh.rix_push_vlan;
10684
10685         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
10686         if (!cache)
10687                 return 0;
10688         MLX5_ASSERT(cache->action);
10689         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
10690                                      &cache->entry);
10691 }
10692
10693 /**
10694  * Release the fate resource.
10695  *
10696  * @param dev
10697  *   Pointer to Ethernet device.
10698  * @param handle
10699  *   Pointer to mlx5_flow_handle.
10700  */
10701 static void
10702 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
10703                                struct mlx5_flow_handle *handle)
10704 {
10705         if (!handle->rix_fate)
10706                 return;
10707         switch (handle->fate_action) {
10708         case MLX5_FLOW_FATE_QUEUE:
10709                 mlx5_hrxq_release(dev, handle->rix_hrxq);
10710                 break;
10711         case MLX5_FLOW_FATE_JUMP:
10712                 flow_dv_jump_tbl_resource_release(dev, handle);
10713                 break;
10714         case MLX5_FLOW_FATE_PORT_ID:
10715                 flow_dv_port_id_action_resource_release(dev,
10716                                 handle->rix_port_id_action);
10717                 break;
10718         default:
10719                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
10720                 break;
10721         }
10722         handle->rix_fate = 0;
10723 }
10724
10725 void
10726 flow_dv_sample_remove_cb(struct mlx5_cache_list *list,
10727                          struct mlx5_cache_entry *entry)
10728 {
10729         struct rte_eth_dev *dev = list->ctx;
10730         struct mlx5_priv *priv = dev->data->dev_private;
10731         struct mlx5_flow_dv_sample_resource *cache_resource =
10732                         container_of(entry, typeof(*cache_resource), entry);
10733
10734         if (cache_resource->verbs_action)
10735                 claim_zero(mlx5_glue->destroy_flow_action
10736                                 (cache_resource->verbs_action));
10737         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10738                 if (cache_resource->default_miss)
10739                         claim_zero(mlx5_glue->destroy_flow_action
10740                           (cache_resource->default_miss));
10741         }
10742         if (cache_resource->normal_path_tbl)
10743                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10744                         cache_resource->normal_path_tbl);
10745         flow_dv_sample_sub_actions_release(dev,
10746                                 &cache_resource->sample_idx);
10747         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10748                         cache_resource->idx);
10749         DRV_LOG(DEBUG, "sample resource %p: removed",
10750                 (void *)cache_resource);
10751 }
10752
10753 /**
10754  * Release an sample resource.
10755  *
10756  * @param dev
10757  *   Pointer to Ethernet device.
10758  * @param handle
10759  *   Pointer to mlx5_flow_handle.
10760  *
10761  * @return
10762  *   1 while a reference on it exists, 0 when freed.
10763  */
10764 static int
10765 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
10766                                      struct mlx5_flow_handle *handle)
10767 {
10768         struct mlx5_priv *priv = dev->data->dev_private;
10769         struct mlx5_flow_dv_sample_resource *cache_resource;
10770
10771         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10772                          handle->dvh.rix_sample);
10773         if (!cache_resource)
10774                 return 0;
10775         MLX5_ASSERT(cache_resource->verbs_action);
10776         return mlx5_cache_unregister(&priv->sh->sample_action_list,
10777                                      &cache_resource->entry);
10778 }
10779
10780 void
10781 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
10782                              struct mlx5_cache_entry *entry)
10783 {
10784         struct rte_eth_dev *dev = list->ctx;
10785         struct mlx5_priv *priv = dev->data->dev_private;
10786         struct mlx5_flow_dv_dest_array_resource *cache_resource =
10787                         container_of(entry, typeof(*cache_resource), entry);
10788         uint32_t i = 0;
10789
10790         MLX5_ASSERT(cache_resource->action);
10791         if (cache_resource->action)
10792                 claim_zero(mlx5_glue->destroy_flow_action
10793                                         (cache_resource->action));
10794         for (; i < cache_resource->num_of_dest; i++)
10795                 flow_dv_sample_sub_actions_release(dev,
10796                                 &cache_resource->sample_idx[i]);
10797         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10798                         cache_resource->idx);
10799         DRV_LOG(DEBUG, "destination array resource %p: removed",
10800                 (void *)cache_resource);
10801 }
10802
10803 /**
10804  * Release an destination array resource.
10805  *
10806  * @param dev
10807  *   Pointer to Ethernet device.
10808  * @param handle
10809  *   Pointer to mlx5_flow_handle.
10810  *
10811  * @return
10812  *   1 while a reference on it exists, 0 when freed.
10813  */
10814 static int
10815 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
10816                                     struct mlx5_flow_handle *handle)
10817 {
10818         struct mlx5_priv *priv = dev->data->dev_private;
10819         struct mlx5_flow_dv_dest_array_resource *cache;
10820
10821         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
10822                                handle->dvh.rix_dest_array);
10823         if (!cache)
10824                 return 0;
10825         MLX5_ASSERT(cache->action);
10826         return mlx5_cache_unregister(&priv->sh->dest_array_list,
10827                                      &cache->entry);
10828 }
10829
10830 /**
10831  * Remove the flow from the NIC but keeps it in memory.
10832  * Lock free, (mutex should be acquired by caller).
10833  *
10834  * @param[in] dev
10835  *   Pointer to Ethernet device.
10836  * @param[in, out] flow
10837  *   Pointer to flow structure.
10838  */
10839 static void
10840 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
10841 {
10842         struct mlx5_flow_handle *dh;
10843         uint32_t handle_idx;
10844         struct mlx5_priv *priv = dev->data->dev_private;
10845
10846         if (!flow)
10847                 return;
10848         handle_idx = flow->dev_handles;
10849         while (handle_idx) {
10850                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10851                                     handle_idx);
10852                 if (!dh)
10853                         return;
10854                 if (dh->drv_flow) {
10855                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
10856                         dh->drv_flow = NULL;
10857                 }
10858                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
10859                         flow_dv_fate_resource_release(dev, dh);
10860                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10861                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10862                 handle_idx = dh->next.next;
10863         }
10864 }
10865
10866 /**
10867  * Remove the flow from the NIC and the memory.
10868  * Lock free, (mutex should be acquired by caller).
10869  *
10870  * @param[in] dev
10871  *   Pointer to the Ethernet device structure.
10872  * @param[in, out] flow
10873  *   Pointer to flow structure.
10874  */
10875 static void
10876 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
10877 {
10878         struct rte_flow_shared_action *shared;
10879         struct mlx5_flow_handle *dev_handle;
10880         struct mlx5_priv *priv = dev->data->dev_private;
10881
10882         if (!flow)
10883                 return;
10884         __flow_dv_remove(dev, flow);
10885         shared = mlx5_flow_get_shared_rss(flow);
10886         if (shared)
10887                 __atomic_sub_fetch(&shared->refcnt, 1, __ATOMIC_RELAXED);
10888         if (flow->counter) {
10889                 flow_dv_counter_release(dev, flow->counter);
10890                 flow->counter = 0;
10891         }
10892         if (flow->meter) {
10893                 struct mlx5_flow_meter *fm;
10894
10895                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
10896                                     flow->meter);
10897                 if (fm)
10898                         mlx5_flow_meter_detach(fm);
10899                 flow->meter = 0;
10900         }
10901         while (flow->dev_handles) {
10902                 uint32_t tmp_idx = flow->dev_handles;
10903
10904                 dev_handle = mlx5_ipool_get(priv->sh->ipool
10905                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
10906                 if (!dev_handle)
10907                         return;
10908                 flow->dev_handles = dev_handle->next.next;
10909                 if (dev_handle->dvh.matcher)
10910                         flow_dv_matcher_release(dev, dev_handle);
10911                 if (dev_handle->dvh.rix_sample)
10912                         flow_dv_sample_resource_release(dev, dev_handle);
10913                 if (dev_handle->dvh.rix_dest_array)
10914                         flow_dv_dest_array_resource_release(dev, dev_handle);
10915                 if (dev_handle->dvh.rix_encap_decap)
10916                         flow_dv_encap_decap_resource_release(dev,
10917                                 dev_handle->dvh.rix_encap_decap);
10918                 if (dev_handle->dvh.modify_hdr)
10919                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
10920                 if (dev_handle->dvh.rix_push_vlan)
10921                         flow_dv_push_vlan_action_resource_release(dev,
10922                                                                   dev_handle);
10923                 if (dev_handle->dvh.rix_tag)
10924                         flow_dv_tag_release(dev,
10925                                             dev_handle->dvh.rix_tag);
10926                 flow_dv_fate_resource_release(dev, dev_handle);
10927                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
10928                            tmp_idx);
10929         }
10930 }
10931
10932 /**
10933  * Release array of hash RX queue objects.
10934  * Helper function.
10935  *
10936  * @param[in] dev
10937  *   Pointer to the Ethernet device structure.
10938  * @param[in, out] hrxqs
10939  *   Array of hash RX queue objects.
10940  *
10941  * @return
10942  *   Total number of references to hash RX queue objects in *hrxqs* array
10943  *   after this operation.
10944  */
10945 static int
10946 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
10947                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
10948 {
10949         size_t i;
10950         int remaining = 0;
10951
10952         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
10953                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
10954
10955                 if (!ret)
10956                         (*hrxqs)[i] = 0;
10957                 remaining += ret;
10958         }
10959         return remaining;
10960 }
10961
10962 /**
10963  * Release all hash RX queue objects representing shared RSS action.
10964  *
10965  * @param[in] dev
10966  *   Pointer to the Ethernet device structure.
10967  * @param[in, out] action
10968  *   Shared RSS action to remove hash RX queue objects from.
10969  *
10970  * @return
10971  *   Total number of references to hash RX queue objects stored in *action*
10972  *   after this operation.
10973  *   Expected to be 0 if no external references held.
10974  */
10975 static int
10976 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
10977                                  struct mlx5_shared_action_rss *action)
10978 {
10979         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
10980                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
10981 }
10982
10983 /**
10984  * Setup shared RSS action.
10985  * Prepare set of hash RX queue objects sufficient to handle all valid
10986  * hash_fields combinations (see enum ibv_rx_hash_fields).
10987  *
10988  * @param[in] dev
10989  *   Pointer to the Ethernet device structure.
10990  * @param[in, out] action
10991  *   Partially initialized shared RSS action.
10992  * @param[out] error
10993  *   Perform verbose error reporting if not NULL. Initialized in case of
10994  *   error only.
10995  *
10996  * @return
10997  *   0 on success, otherwise negative errno value.
10998  */
10999 static int
11000 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11001                         struct mlx5_shared_action_rss *action,
11002                         struct rte_flow_error *error)
11003 {
11004         struct mlx5_flow_rss_desc rss_desc = { 0 };
11005         size_t i;
11006         int err;
11007
11008         memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
11009         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
11010         rss_desc.const_q = action->origin.queue;
11011         rss_desc.queue_num = action->origin.queue_num;
11012         rss_desc.standalone = true;
11013         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11014                 uint32_t hrxq_idx;
11015                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11016                 int tunnel;
11017
11018                 for (tunnel = 0; tunnel < 2; tunnel++) {
11019                         rss_desc.tunnel = tunnel;
11020                         rss_desc.hash_fields = hash_fields;
11021                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
11022                         if (!hrxq_idx) {
11023                                 rte_flow_error_set
11024                                         (error, rte_errno,
11025                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11026                                          "cannot get hash queue");
11027                                 goto error_hrxq_new;
11028                         }
11029                         err = __flow_dv_action_rss_hrxq_set
11030                                 (action, hash_fields, tunnel, hrxq_idx);
11031                         MLX5_ASSERT(!err);
11032                 }
11033         }
11034         return 0;
11035 error_hrxq_new:
11036         err = rte_errno;
11037         __flow_dv_action_rss_hrxqs_release(dev, action);
11038         rte_errno = err;
11039         return -rte_errno;
11040 }
11041
11042 /**
11043  * Create shared RSS action.
11044  *
11045  * @param[in] dev
11046  *   Pointer to the Ethernet device structure.
11047  * @param[in] conf
11048  *   Shared action configuration.
11049  * @param[in] rss
11050  *   RSS action specification used to create shared action.
11051  * @param[out] error
11052  *   Perform verbose error reporting if not NULL. Initialized in case of
11053  *   error only.
11054  *
11055  * @return
11056  *   A valid shared action handle in case of success, NULL otherwise and
11057  *   rte_errno is set.
11058  */
11059 static struct rte_flow_shared_action *
11060 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11061                             const struct rte_flow_shared_action_conf *conf,
11062                             const struct rte_flow_action_rss *rss,
11063                             struct rte_flow_error *error)
11064 {
11065         struct rte_flow_shared_action *shared_action = NULL;
11066         void *queue = NULL;
11067         struct mlx5_shared_action_rss *shared_rss;
11068         struct rte_flow_action_rss *origin;
11069         const uint8_t *rss_key;
11070         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11071
11072         RTE_SET_USED(conf);
11073         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11074                             0, SOCKET_ID_ANY);
11075         shared_action = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*shared_action), 0,
11076                                     SOCKET_ID_ANY);
11077         if (!shared_action || !queue) {
11078                 rte_flow_error_set(error, ENOMEM,
11079                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11080                                    "cannot allocate resource memory");
11081                 goto error_rss_init;
11082         }
11083         shared_rss = &shared_action->rss;
11084         shared_rss->queue = queue;
11085         origin = &shared_rss->origin;
11086         origin->func = rss->func;
11087         origin->level = rss->level;
11088         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11089         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11090         /* NULL RSS key indicates default RSS key. */
11091         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11092         memcpy(shared_rss->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11093         origin->key = &shared_rss->key[0];
11094         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11095         memcpy(shared_rss->queue, rss->queue, queue_size);
11096         origin->queue = shared_rss->queue;
11097         origin->queue_num = rss->queue_num;
11098         if (__flow_dv_action_rss_setup(dev, shared_rss, error))
11099                 goto error_rss_init;
11100         shared_action->type = MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS;
11101         return shared_action;
11102 error_rss_init:
11103         mlx5_free(shared_action);
11104         mlx5_free(queue);
11105         return NULL;
11106 }
11107
11108 /**
11109  * Destroy the shared RSS action.
11110  * Release related hash RX queue objects.
11111  *
11112  * @param[in] dev
11113  *   Pointer to the Ethernet device structure.
11114  * @param[in] shared_rss
11115  *   The shared RSS action object to be removed.
11116  * @param[out] error
11117  *   Perform verbose error reporting if not NULL. Initialized in case of
11118  *   error only.
11119  *
11120  * @return
11121  *   0 on success, otherwise negative errno value.
11122  */
11123 static int
11124 __flow_dv_action_rss_release(struct rte_eth_dev *dev,
11125                          struct mlx5_shared_action_rss *shared_rss,
11126                          struct rte_flow_error *error)
11127 {
11128         struct rte_flow_shared_action *shared_action = NULL;
11129         uint32_t old_refcnt = 1;
11130         int remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11131
11132         if (remaining) {
11133                 return rte_flow_error_set(error, ETOOMANYREFS,
11134                                           RTE_FLOW_ERROR_TYPE_ACTION,
11135                                           NULL,
11136                                           "shared rss hrxq has references");
11137         }
11138         shared_action = container_of(shared_rss,
11139                                      struct rte_flow_shared_action, rss);
11140         if (!__atomic_compare_exchange_n(&shared_action->refcnt, &old_refcnt,
11141                                          0, 0,
11142                                          __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
11143                 return rte_flow_error_set(error, ETOOMANYREFS,
11144                                           RTE_FLOW_ERROR_TYPE_ACTION,
11145                                           NULL,
11146                                           "shared rss has references");
11147         }
11148         rte_free(shared_rss->queue);
11149         return 0;
11150 }
11151
11152 /**
11153  * Create shared action, lock free,
11154  * (mutex should be acquired by caller).
11155  * Dispatcher for action type specific call.
11156  *
11157  * @param[in] dev
11158  *   Pointer to the Ethernet device structure.
11159  * @param[in] conf
11160  *   Shared action configuration.
11161  * @param[in] action
11162  *   Action specification used to create shared action.
11163  * @param[out] error
11164  *   Perform verbose error reporting if not NULL. Initialized in case of
11165  *   error only.
11166  *
11167  * @return
11168  *   A valid shared action handle in case of success, NULL otherwise and
11169  *   rte_errno is set.
11170  */
11171 static struct rte_flow_shared_action *
11172 __flow_dv_action_create(struct rte_eth_dev *dev,
11173                         const struct rte_flow_shared_action_conf *conf,
11174                         const struct rte_flow_action *action,
11175                         struct rte_flow_error *error)
11176 {
11177         struct rte_flow_shared_action *shared_action = NULL;
11178         struct mlx5_priv *priv = dev->data->dev_private;
11179
11180         switch (action->type) {
11181         case RTE_FLOW_ACTION_TYPE_RSS:
11182                 shared_action = __flow_dv_action_rss_create(dev, conf,
11183                                                             action->conf,
11184                                                             error);
11185                 break;
11186         default:
11187                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11188                                    NULL, "action type not supported");
11189                 break;
11190         }
11191         if (shared_action) {
11192                 __atomic_add_fetch(&shared_action->refcnt, 1,
11193                                    __ATOMIC_RELAXED);
11194                 LIST_INSERT_HEAD(&priv->shared_actions, shared_action, next);
11195         }
11196         return shared_action;
11197 }
11198
11199 /**
11200  * Destroy the shared action.
11201  * Release action related resources on the NIC and the memory.
11202  * Lock free, (mutex should be acquired by caller).
11203  * Dispatcher for action type specific call.
11204  *
11205  * @param[in] dev
11206  *   Pointer to the Ethernet device structure.
11207  * @param[in] action
11208  *   The shared action object to be removed.
11209  * @param[out] error
11210  *   Perform verbose error reporting if not NULL. Initialized in case of
11211  *   error only.
11212  *
11213  * @return
11214  *   0 on success, otherwise negative errno value.
11215  */
11216 static int
11217 __flow_dv_action_destroy(struct rte_eth_dev *dev,
11218                          struct rte_flow_shared_action *action,
11219                          struct rte_flow_error *error)
11220 {
11221         int ret;
11222
11223         switch (action->type) {
11224         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11225                 ret = __flow_dv_action_rss_release(dev, &action->rss, error);
11226                 break;
11227         default:
11228                 return rte_flow_error_set(error, ENOTSUP,
11229                                           RTE_FLOW_ERROR_TYPE_ACTION,
11230                                           NULL,
11231                                           "action type not supported");
11232         }
11233         if (ret)
11234                 return ret;
11235         LIST_REMOVE(action, next);
11236         rte_free(action);
11237         return 0;
11238 }
11239
11240 /**
11241  * Updates in place shared RSS action configuration.
11242  *
11243  * @param[in] dev
11244  *   Pointer to the Ethernet device structure.
11245  * @param[in] shared_rss
11246  *   The shared RSS action object to be updated.
11247  * @param[in] action_conf
11248  *   RSS action specification used to modify *shared_rss*.
11249  * @param[out] error
11250  *   Perform verbose error reporting if not NULL. Initialized in case of
11251  *   error only.
11252  *
11253  * @return
11254  *   0 on success, otherwise negative errno value.
11255  * @note: currently only support update of RSS queues.
11256  */
11257 static int
11258 __flow_dv_action_rss_update(struct rte_eth_dev *dev,
11259                             struct mlx5_shared_action_rss *shared_rss,
11260                             const struct rte_flow_action_rss *action_conf,
11261                             struct rte_flow_error *error)
11262 {
11263         size_t i;
11264         int ret;
11265         void *queue = NULL;
11266         const uint8_t *rss_key;
11267         uint32_t rss_key_len;
11268         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11269
11270         queue = mlx5_malloc(MLX5_MEM_ZERO,
11271                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11272                             0, SOCKET_ID_ANY);
11273         if (!queue)
11274                 return rte_flow_error_set(error, ENOMEM,
11275                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11276                                           NULL,
11277                                           "cannot allocate resource memory");
11278         if (action_conf->key) {
11279                 rss_key = action_conf->key;
11280                 rss_key_len = action_conf->key_len;
11281         } else {
11282                 rss_key = rss_hash_default_key;
11283                 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
11284         }
11285         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11286                 uint32_t hrxq_idx;
11287                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11288                 int tunnel;
11289
11290                 for (tunnel = 0; tunnel < 2; tunnel++) {
11291                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup
11292                                         (shared_rss, hash_fields, tunnel);
11293                         MLX5_ASSERT(hrxq_idx);
11294                         ret = mlx5_hrxq_modify
11295                                 (dev, hrxq_idx,
11296                                  rss_key, rss_key_len,
11297                                  hash_fields,
11298                                  action_conf->queue, action_conf->queue_num);
11299                         if (ret) {
11300                                 mlx5_free(queue);
11301                                 return rte_flow_error_set
11302                                         (error, rte_errno,
11303                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11304                                          "cannot update hash queue");
11305                         }
11306                 }
11307         }
11308         mlx5_free(shared_rss->queue);
11309         shared_rss->queue = queue;
11310         memcpy(shared_rss->queue, action_conf->queue, queue_size);
11311         shared_rss->origin.queue = shared_rss->queue;
11312         shared_rss->origin.queue_num = action_conf->queue_num;
11313         return 0;
11314 }
11315
11316 /**
11317  * Updates in place shared action configuration, lock free,
11318  * (mutex should be acquired by caller).
11319  *
11320  * @param[in] dev
11321  *   Pointer to the Ethernet device structure.
11322  * @param[in] action
11323  *   The shared action object to be updated.
11324  * @param[in] action_conf
11325  *   Action specification used to modify *action*.
11326  *   *action_conf* should be of type correlating with type of the *action*,
11327  *   otherwise considered as invalid.
11328  * @param[out] error
11329  *   Perform verbose error reporting if not NULL. Initialized in case of
11330  *   error only.
11331  *
11332  * @return
11333  *   0 on success, otherwise negative errno value.
11334  */
11335 static int
11336 __flow_dv_action_update(struct rte_eth_dev *dev,
11337                         struct rte_flow_shared_action *action,
11338                         const void *action_conf,
11339                         struct rte_flow_error *error)
11340 {
11341         switch (action->type) {
11342         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
11343                 return __flow_dv_action_rss_update(dev, &action->rss,
11344                                                    action_conf, error);
11345         default:
11346                 return rte_flow_error_set(error, ENOTSUP,
11347                                           RTE_FLOW_ERROR_TYPE_ACTION,
11348                                           NULL,
11349                                           "action type not supported");
11350         }
11351 }
11352 /**
11353  * Query a dv flow  rule for its statistics via devx.
11354  *
11355  * @param[in] dev
11356  *   Pointer to Ethernet device.
11357  * @param[in] flow
11358  *   Pointer to the sub flow.
11359  * @param[out] data
11360  *   data retrieved by the query.
11361  * @param[out] error
11362  *   Perform verbose error reporting if not NULL.
11363  *
11364  * @return
11365  *   0 on success, a negative errno value otherwise and rte_errno is set.
11366  */
11367 static int
11368 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11369                     void *data, struct rte_flow_error *error)
11370 {
11371         struct mlx5_priv *priv = dev->data->dev_private;
11372         struct rte_flow_query_count *qc = data;
11373
11374         if (!priv->config.devx)
11375                 return rte_flow_error_set(error, ENOTSUP,
11376                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11377                                           NULL,
11378                                           "counters are not supported");
11379         if (flow->counter) {
11380                 uint64_t pkts, bytes;
11381                 struct mlx5_flow_counter *cnt;
11382
11383                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11384                                                  NULL);
11385                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11386                                                &bytes);
11387
11388                 if (err)
11389                         return rte_flow_error_set(error, -err,
11390                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11391                                         NULL, "cannot read counters");
11392                 qc->hits_set = 1;
11393                 qc->bytes_set = 1;
11394                 qc->hits = pkts - cnt->hits;
11395                 qc->bytes = bytes - cnt->bytes;
11396                 if (qc->reset) {
11397                         cnt->hits = pkts;
11398                         cnt->bytes = bytes;
11399                 }
11400                 return 0;
11401         }
11402         return rte_flow_error_set(error, EINVAL,
11403                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11404                                   NULL,
11405                                   "counters are not available");
11406 }
11407
11408 /**
11409  * Query a flow rule AGE action for aging information.
11410  *
11411  * @param[in] dev
11412  *   Pointer to Ethernet device.
11413  * @param[in] flow
11414  *   Pointer to the sub flow.
11415  * @param[out] data
11416  *   data retrieved by the query.
11417  * @param[out] error
11418  *   Perform verbose error reporting if not NULL.
11419  *
11420  * @return
11421  *   0 on success, a negative errno value otherwise and rte_errno is set.
11422  */
11423 static int
11424 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11425                   void *data, struct rte_flow_error *error)
11426 {
11427         struct rte_flow_query_age *resp = data;
11428
11429         if (flow->counter) {
11430                 struct mlx5_age_param *age_param =
11431                                 flow_dv_counter_idx_get_age(dev, flow->counter);
11432
11433                 if (!age_param || !age_param->timeout)
11434                         return rte_flow_error_set
11435                                         (error, EINVAL,
11436                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11437                                          NULL, "cannot read age data");
11438                 resp->aged = __atomic_load_n(&age_param->state,
11439                                              __ATOMIC_RELAXED) ==
11440                                                         AGE_TMOUT ? 1 : 0;
11441                 resp->sec_since_last_hit_valid = !resp->aged;
11442                 if (resp->sec_since_last_hit_valid)
11443                         resp->sec_since_last_hit =
11444                                 __atomic_load_n(&age_param->sec_since_last_hit,
11445                                                 __ATOMIC_RELAXED);
11446                 return 0;
11447         }
11448         return rte_flow_error_set(error, EINVAL,
11449                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11450                                   NULL,
11451                                   "age data not available");
11452 }
11453
11454 /**
11455  * Query a flow.
11456  *
11457  * @see rte_flow_query()
11458  * @see rte_flow_ops
11459  */
11460 static int
11461 flow_dv_query(struct rte_eth_dev *dev,
11462               struct rte_flow *flow __rte_unused,
11463               const struct rte_flow_action *actions __rte_unused,
11464               void *data __rte_unused,
11465               struct rte_flow_error *error __rte_unused)
11466 {
11467         int ret = -EINVAL;
11468
11469         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11470                 switch (actions->type) {
11471                 case RTE_FLOW_ACTION_TYPE_VOID:
11472                         break;
11473                 case RTE_FLOW_ACTION_TYPE_COUNT:
11474                         ret = flow_dv_query_count(dev, flow, data, error);
11475                         break;
11476                 case RTE_FLOW_ACTION_TYPE_AGE:
11477                         ret = flow_dv_query_age(dev, flow, data, error);
11478                         break;
11479                 default:
11480                         return rte_flow_error_set(error, ENOTSUP,
11481                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11482                                                   actions,
11483                                                   "action not supported");
11484                 }
11485         }
11486         return ret;
11487 }
11488
11489 /**
11490  * Destroy the meter table set.
11491  * Lock free, (mutex should be acquired by caller).
11492  *
11493  * @param[in] dev
11494  *   Pointer to Ethernet device.
11495  * @param[in] tbl
11496  *   Pointer to the meter table set.
11497  *
11498  * @return
11499  *   Always 0.
11500  */
11501 static int
11502 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11503                         struct mlx5_meter_domains_infos *tbl)
11504 {
11505         struct mlx5_priv *priv = dev->data->dev_private;
11506         struct mlx5_meter_domains_infos *mtd =
11507                                 (struct mlx5_meter_domains_infos *)tbl;
11508
11509         if (!mtd || !priv->config.dv_flow_en)
11510                 return 0;
11511         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11512                 claim_zero(mlx5_flow_os_destroy_flow
11513                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11514         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11515                 claim_zero(mlx5_flow_os_destroy_flow
11516                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11517         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11518                 claim_zero(mlx5_flow_os_destroy_flow
11519                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11520         if (mtd->egress.color_matcher)
11521                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11522                            (mtd->egress.color_matcher));
11523         if (mtd->egress.any_matcher)
11524                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11525                            (mtd->egress.any_matcher));
11526         if (mtd->egress.tbl)
11527                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
11528         if (mtd->egress.sfx_tbl)
11529                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
11530         if (mtd->ingress.color_matcher)
11531                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11532                            (mtd->ingress.color_matcher));
11533         if (mtd->ingress.any_matcher)
11534                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11535                            (mtd->ingress.any_matcher));
11536         if (mtd->ingress.tbl)
11537                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
11538         if (mtd->ingress.sfx_tbl)
11539                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11540                                              mtd->ingress.sfx_tbl);
11541         if (mtd->transfer.color_matcher)
11542                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11543                            (mtd->transfer.color_matcher));
11544         if (mtd->transfer.any_matcher)
11545                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11546                            (mtd->transfer.any_matcher));
11547         if (mtd->transfer.tbl)
11548                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
11549         if (mtd->transfer.sfx_tbl)
11550                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11551                                              mtd->transfer.sfx_tbl);
11552         if (mtd->drop_actn)
11553                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
11554         mlx5_free(mtd);
11555         return 0;
11556 }
11557
11558 /* Number of meter flow actions, count and jump or count and drop. */
11559 #define METER_ACTIONS 2
11560
11561 /**
11562  * Create specify domain meter table and suffix table.
11563  *
11564  * @param[in] dev
11565  *   Pointer to Ethernet device.
11566  * @param[in,out] mtb
11567  *   Pointer to DV meter table set.
11568  * @param[in] egress
11569  *   Table attribute.
11570  * @param[in] transfer
11571  *   Table attribute.
11572  * @param[in] color_reg_c_idx
11573  *   Reg C index for color match.
11574  *
11575  * @return
11576  *   0 on success, -1 otherwise and rte_errno is set.
11577  */
11578 static int
11579 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
11580                            struct mlx5_meter_domains_infos *mtb,
11581                            uint8_t egress, uint8_t transfer,
11582                            uint32_t color_reg_c_idx)
11583 {
11584         struct mlx5_priv *priv = dev->data->dev_private;
11585         struct mlx5_dev_ctx_shared *sh = priv->sh;
11586         struct mlx5_flow_dv_match_params mask = {
11587                 .size = sizeof(mask.buf),
11588         };
11589         struct mlx5_flow_dv_match_params value = {
11590                 .size = sizeof(value.buf),
11591         };
11592         struct mlx5dv_flow_matcher_attr dv_attr = {
11593                 .type = IBV_FLOW_ATTR_NORMAL,
11594                 .priority = 0,
11595                 .match_criteria_enable = 0,
11596                 .match_mask = (void *)&mask,
11597         };
11598         void *actions[METER_ACTIONS];
11599         struct mlx5_meter_domain_info *dtb;
11600         struct rte_flow_error error;
11601         int i = 0;
11602         int ret;
11603
11604         if (transfer)
11605                 dtb = &mtb->transfer;
11606         else if (egress)
11607                 dtb = &mtb->egress;
11608         else
11609                 dtb = &mtb->ingress;
11610         /* Create the meter table with METER level. */
11611         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
11612                                             egress, transfer, false, NULL, 0,
11613                                             0, &error);
11614         if (!dtb->tbl) {
11615                 DRV_LOG(ERR, "Failed to create meter policer table.");
11616                 return -1;
11617         }
11618         /* Create the meter suffix table with SUFFIX level. */
11619         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
11620                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
11621                                             egress, transfer, false, NULL, 0,
11622                                             0, &error);
11623         if (!dtb->sfx_tbl) {
11624                 DRV_LOG(ERR, "Failed to create meter suffix table.");
11625                 return -1;
11626         }
11627         /* Create matchers, Any and Color. */
11628         dv_attr.priority = 3;
11629         dv_attr.match_criteria_enable = 0;
11630         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11631                                                &dtb->any_matcher);
11632         if (ret) {
11633                 DRV_LOG(ERR, "Failed to create meter"
11634                              " policer default matcher.");
11635                 goto error_exit;
11636         }
11637         dv_attr.priority = 0;
11638         dv_attr.match_criteria_enable =
11639                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
11640         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
11641                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
11642         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11643                                                &dtb->color_matcher);
11644         if (ret) {
11645                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
11646                 goto error_exit;
11647         }
11648         if (mtb->count_actns[RTE_MTR_DROPPED])
11649                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
11650         actions[i++] = mtb->drop_actn;
11651         /* Default rule: lowest priority, match any, actions: drop. */
11652         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
11653                                        actions,
11654                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
11655         if (ret) {
11656                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
11657                 goto error_exit;
11658         }
11659         return 0;
11660 error_exit:
11661         return -1;
11662 }
11663
11664 /**
11665  * Create the needed meter and suffix tables.
11666  * Lock free, (mutex should be acquired by caller).
11667  *
11668  * @param[in] dev
11669  *   Pointer to Ethernet device.
11670  * @param[in] fm
11671  *   Pointer to the flow meter.
11672  *
11673  * @return
11674  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
11675  */
11676 static struct mlx5_meter_domains_infos *
11677 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
11678                        const struct mlx5_flow_meter *fm)
11679 {
11680         struct mlx5_priv *priv = dev->data->dev_private;
11681         struct mlx5_meter_domains_infos *mtb;
11682         int ret;
11683         int i;
11684
11685         if (!priv->mtr_en) {
11686                 rte_errno = ENOTSUP;
11687                 return NULL;
11688         }
11689         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
11690         if (!mtb) {
11691                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
11692                 return NULL;
11693         }
11694         /* Create meter count actions */
11695         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
11696                 struct mlx5_flow_counter *cnt;
11697                 if (!fm->policer_stats.cnt[i])
11698                         continue;
11699                 cnt = flow_dv_counter_get_by_idx(dev,
11700                       fm->policer_stats.cnt[i], NULL);
11701                 mtb->count_actns[i] = cnt->action;
11702         }
11703         /* Create drop action. */
11704         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
11705         if (ret) {
11706                 DRV_LOG(ERR, "Failed to create drop action.");
11707                 goto error_exit;
11708         }
11709         /* Egress meter table. */
11710         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
11711         if (ret) {
11712                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
11713                 goto error_exit;
11714         }
11715         /* Ingress meter table. */
11716         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
11717         if (ret) {
11718                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
11719                 goto error_exit;
11720         }
11721         /* FDB meter table. */
11722         if (priv->config.dv_esw_en) {
11723                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
11724                                                  priv->mtr_color_reg);
11725                 if (ret) {
11726                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
11727                         goto error_exit;
11728                 }
11729         }
11730         return mtb;
11731 error_exit:
11732         flow_dv_destroy_mtr_tbl(dev, mtb);
11733         return NULL;
11734 }
11735
11736 /**
11737  * Destroy domain policer rule.
11738  *
11739  * @param[in] dt
11740  *   Pointer to domain table.
11741  */
11742 static void
11743 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
11744 {
11745         int i;
11746
11747         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11748                 if (dt->policer_rules[i]) {
11749                         claim_zero(mlx5_flow_os_destroy_flow
11750                                    (dt->policer_rules[i]));
11751                         dt->policer_rules[i] = NULL;
11752                 }
11753         }
11754         if (dt->jump_actn) {
11755                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
11756                 dt->jump_actn = NULL;
11757         }
11758 }
11759
11760 /**
11761  * Destroy policer rules.
11762  *
11763  * @param[in] dev
11764  *   Pointer to Ethernet device.
11765  * @param[in] fm
11766  *   Pointer to flow meter structure.
11767  * @param[in] attr
11768  *   Pointer to flow attributes.
11769  *
11770  * @return
11771  *   Always 0.
11772  */
11773 static int
11774 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
11775                               const struct mlx5_flow_meter *fm,
11776                               const struct rte_flow_attr *attr)
11777 {
11778         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
11779
11780         if (!mtb)
11781                 return 0;
11782         if (attr->egress)
11783                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
11784         if (attr->ingress)
11785                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
11786         if (attr->transfer)
11787                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
11788         return 0;
11789 }
11790
11791 /**
11792  * Create specify domain meter policer rule.
11793  *
11794  * @param[in] fm
11795  *   Pointer to flow meter structure.
11796  * @param[in] mtb
11797  *   Pointer to DV meter table set.
11798  * @param[in] mtr_reg_c
11799  *   Color match REG_C.
11800  *
11801  * @return
11802  *   0 on success, -1 otherwise.
11803  */
11804 static int
11805 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
11806                                     struct mlx5_meter_domain_info *dtb,
11807                                     uint8_t mtr_reg_c)
11808 {
11809         struct mlx5_flow_dv_match_params matcher = {
11810                 .size = sizeof(matcher.buf),
11811         };
11812         struct mlx5_flow_dv_match_params value = {
11813                 .size = sizeof(value.buf),
11814         };
11815         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11816         void *actions[METER_ACTIONS];
11817         int i;
11818         int ret = 0;
11819
11820         /* Create jump action. */
11821         if (!dtb->jump_actn)
11822                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11823                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
11824         if (ret) {
11825                 DRV_LOG(ERR, "Failed to create policer jump action.");
11826                 goto error;
11827         }
11828         for (i = 0; i < RTE_MTR_DROPPED; i++) {
11829                 int j = 0;
11830
11831                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
11832                                        rte_col_2_mlx5_col(i), UINT8_MAX);
11833                 if (mtb->count_actns[i])
11834                         actions[j++] = mtb->count_actns[i];
11835                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
11836                         actions[j++] = mtb->drop_actn;
11837                 else
11838                         actions[j++] = dtb->jump_actn;
11839                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
11840                                                (void *)&value, j, actions,
11841                                                &dtb->policer_rules[i]);
11842                 if (ret) {
11843                         DRV_LOG(ERR, "Failed to create policer rule.");
11844                         goto error;
11845                 }
11846         }
11847         return 0;
11848 error:
11849         rte_errno = errno;
11850         return -1;
11851 }
11852
11853 /**
11854  * Create policer rules.
11855  *
11856  * @param[in] dev
11857  *   Pointer to Ethernet device.
11858  * @param[in] fm
11859  *   Pointer to flow meter structure.
11860  * @param[in] attr
11861  *   Pointer to flow attributes.
11862  *
11863  * @return
11864  *   0 on success, -1 otherwise.
11865  */
11866 static int
11867 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
11868                              struct mlx5_flow_meter *fm,
11869                              const struct rte_flow_attr *attr)
11870 {
11871         struct mlx5_priv *priv = dev->data->dev_private;
11872         struct mlx5_meter_domains_infos *mtb = fm->mfts;
11873         int ret;
11874
11875         if (attr->egress) {
11876                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
11877                                                 priv->mtr_color_reg);
11878                 if (ret) {
11879                         DRV_LOG(ERR, "Failed to create egress policer.");
11880                         goto error;
11881                 }
11882         }
11883         if (attr->ingress) {
11884                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
11885                                                 priv->mtr_color_reg);
11886                 if (ret) {
11887                         DRV_LOG(ERR, "Failed to create ingress policer.");
11888                         goto error;
11889                 }
11890         }
11891         if (attr->transfer) {
11892                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
11893                                                 priv->mtr_color_reg);
11894                 if (ret) {
11895                         DRV_LOG(ERR, "Failed to create transfer policer.");
11896                         goto error;
11897                 }
11898         }
11899         return 0;
11900 error:
11901         flow_dv_destroy_policer_rules(dev, fm, attr);
11902         return -1;
11903 }
11904
11905 /**
11906  * Validate the batch counter support in root table.
11907  *
11908  * Create a simple flow with invalid counter and drop action on root table to
11909  * validate if batch counter with offset on root table is supported or not.
11910  *
11911  * @param[in] dev
11912  *   Pointer to rte_eth_dev structure.
11913  *
11914  * @return
11915  *   0 on success, a negative errno value otherwise and rte_errno is set.
11916  */
11917 int
11918 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
11919 {
11920         struct mlx5_priv *priv = dev->data->dev_private;
11921         struct mlx5_dev_ctx_shared *sh = priv->sh;
11922         struct mlx5_flow_dv_match_params mask = {
11923                 .size = sizeof(mask.buf),
11924         };
11925         struct mlx5_flow_dv_match_params value = {
11926                 .size = sizeof(value.buf),
11927         };
11928         struct mlx5dv_flow_matcher_attr dv_attr = {
11929                 .type = IBV_FLOW_ATTR_NORMAL,
11930                 .priority = 0,
11931                 .match_criteria_enable = 0,
11932                 .match_mask = (void *)&mask,
11933         };
11934         void *actions[2] = { 0 };
11935         struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
11936         struct mlx5_devx_obj *dcs = NULL;
11937         void *matcher = NULL;
11938         void *flow = NULL;
11939         int i, ret = -1;
11940
11941         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
11942         if (!tbl)
11943                 goto err;
11944         dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
11945                                             NULL, 0, 0, NULL);
11946         if (!dest_tbl)
11947                 goto err;
11948         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
11949         if (!dcs)
11950                 goto err;
11951         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
11952                                                     &actions[0]);
11953         if (ret)
11954                 goto err;
11955         ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
11956                                 (dest_tbl->obj, &actions[1]);
11957         if (ret)
11958                 goto err;
11959         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
11960         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
11961                                                &matcher);
11962         if (ret)
11963                 goto err;
11964         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
11965                                        actions, &flow);
11966 err:
11967         /*
11968          * If batch counter with offset is not supported, the driver will not
11969          * validate the invalid offset value, flow create should success.
11970          * In this case, it means batch counter is not supported in root table.
11971          *
11972          * Otherwise, if flow create is failed, counter offset is supported.
11973          */
11974         if (flow) {
11975                 DRV_LOG(INFO, "Batch counter is not supported in root "
11976                               "table. Switch to fallback mode.");
11977                 rte_errno = ENOTSUP;
11978                 ret = -rte_errno;
11979                 claim_zero(mlx5_flow_os_destroy_flow(flow));
11980         } else {
11981                 /* Check matcher to make sure validate fail at flow create. */
11982                 if (!matcher || (matcher && errno != EINVAL))
11983                         DRV_LOG(ERR, "Unexpected error in counter offset "
11984                                      "support detection");
11985                 ret = 0;
11986         }
11987         for (i = 0; i < 2; i++) {
11988                 if (actions[i])
11989                         claim_zero(mlx5_flow_os_destroy_flow_action
11990                                    (actions[i]));
11991         }
11992         if (matcher)
11993                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
11994         if (tbl)
11995                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
11996         if (dest_tbl)
11997                 flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);
11998         if (dcs)
11999                 claim_zero(mlx5_devx_cmd_destroy(dcs));
12000         return ret;
12001 }
12002
12003 /**
12004  * Query a devx counter.
12005  *
12006  * @param[in] dev
12007  *   Pointer to the Ethernet device structure.
12008  * @param[in] cnt
12009  *   Index to the flow counter.
12010  * @param[in] clear
12011  *   Set to clear the counter statistics.
12012  * @param[out] pkts
12013  *   The statistics value of packets.
12014  * @param[out] bytes
12015  *   The statistics value of bytes.
12016  *
12017  * @return
12018  *   0 on success, otherwise return -1.
12019  */
12020 static int
12021 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12022                       uint64_t *pkts, uint64_t *bytes)
12023 {
12024         struct mlx5_priv *priv = dev->data->dev_private;
12025         struct mlx5_flow_counter *cnt;
12026         uint64_t inn_pkts, inn_bytes;
12027         int ret;
12028
12029         if (!priv->config.devx)
12030                 return -1;
12031
12032         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
12033         if (ret)
12034                 return -1;
12035         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
12036         *pkts = inn_pkts - cnt->hits;
12037         *bytes = inn_bytes - cnt->bytes;
12038         if (clear) {
12039                 cnt->hits = inn_pkts;
12040                 cnt->bytes = inn_bytes;
12041         }
12042         return 0;
12043 }
12044
12045 /**
12046  * Get aged-out flows.
12047  *
12048  * @param[in] dev
12049  *   Pointer to the Ethernet device structure.
12050  * @param[in] context
12051  *   The address of an array of pointers to the aged-out flows contexts.
12052  * @param[in] nb_contexts
12053  *   The length of context array pointers.
12054  * @param[out] error
12055  *   Perform verbose error reporting if not NULL. Initialized in case of
12056  *   error only.
12057  *
12058  * @return
12059  *   how many contexts get in success, otherwise negative errno value.
12060  *   if nb_contexts is 0, return the amount of all aged contexts.
12061  *   if nb_contexts is not 0 , return the amount of aged flows reported
12062  *   in the context array.
12063  * @note: only stub for now
12064  */
12065 static int
12066 flow_get_aged_flows(struct rte_eth_dev *dev,
12067                     void **context,
12068                     uint32_t nb_contexts,
12069                     struct rte_flow_error *error)
12070 {
12071         struct mlx5_priv *priv = dev->data->dev_private;
12072         struct mlx5_age_info *age_info;
12073         struct mlx5_age_param *age_param;
12074         struct mlx5_flow_counter *counter;
12075         int nb_flows = 0;
12076
12077         if (nb_contexts && !context)
12078                 return rte_flow_error_set(error, EINVAL,
12079                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12080                                           NULL,
12081                                           "Should assign at least one flow or"
12082                                           " context to get if nb_contexts != 0");
12083         age_info = GET_PORT_AGE_INFO(priv);
12084         rte_spinlock_lock(&age_info->aged_sl);
12085         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12086                 nb_flows++;
12087                 if (nb_contexts) {
12088                         age_param = MLX5_CNT_TO_AGE(counter);
12089                         context[nb_flows - 1] = age_param->context;
12090                         if (!(--nb_contexts))
12091                                 break;
12092                 }
12093         }
12094         rte_spinlock_unlock(&age_info->aged_sl);
12095         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12096         return nb_flows;
12097 }
12098
12099 /*
12100  * Mutex-protected thunk to lock-free  __flow_dv_translate().
12101  */
12102 static int
12103 flow_dv_translate(struct rte_eth_dev *dev,
12104                   struct mlx5_flow *dev_flow,
12105                   const struct rte_flow_attr *attr,
12106                   const struct rte_flow_item items[],
12107                   const struct rte_flow_action actions[],
12108                   struct rte_flow_error *error)
12109 {
12110         int ret;
12111
12112         flow_dv_shared_lock(dev);
12113         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
12114         flow_dv_shared_unlock(dev);
12115         return ret;
12116 }
12117
12118 /*
12119  * Mutex-protected thunk to lock-free  __flow_dv_apply().
12120  */
12121 static int
12122 flow_dv_apply(struct rte_eth_dev *dev,
12123               struct rte_flow *flow,
12124               struct rte_flow_error *error)
12125 {
12126         int ret;
12127
12128         flow_dv_shared_lock(dev);
12129         ret = __flow_dv_apply(dev, flow, error);
12130         flow_dv_shared_unlock(dev);
12131         return ret;
12132 }
12133
12134 /*
12135  * Mutex-protected thunk to lock-free __flow_dv_remove().
12136  */
12137 static void
12138 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
12139 {
12140         flow_dv_shared_lock(dev);
12141         __flow_dv_remove(dev, flow);
12142         flow_dv_shared_unlock(dev);
12143 }
12144
12145 /*
12146  * Mutex-protected thunk to lock-free __flow_dv_destroy().
12147  */
12148 static void
12149 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
12150 {
12151         flow_dv_shared_lock(dev);
12152         __flow_dv_destroy(dev, flow);
12153         flow_dv_shared_unlock(dev);
12154 }
12155
12156 /*
12157  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12158  */
12159 static uint32_t
12160 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12161 {
12162         uint32_t cnt;
12163
12164         flow_dv_shared_lock(dev);
12165         cnt = flow_dv_counter_alloc(dev, 0);
12166         flow_dv_shared_unlock(dev);
12167         return cnt;
12168 }
12169
12170 /*
12171  * Mutex-protected thunk to lock-free flow_dv_counter_release().
12172  */
12173 static void
12174 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
12175 {
12176         flow_dv_shared_lock(dev);
12177         flow_dv_counter_release(dev, cnt);
12178         flow_dv_shared_unlock(dev);
12179 }
12180
12181 /**
12182  * Validate shared action.
12183  * Dispatcher for action type specific validation.
12184  *
12185  * @param[in] dev
12186  *   Pointer to the Ethernet device structure.
12187  * @param[in] conf
12188  *   Shared action configuration.
12189  * @param[in] action
12190  *   The shared action object to validate.
12191  * @param[out] error
12192  *   Perform verbose error reporting if not NULL. Initialized in case of
12193  *   error only.
12194  *
12195  * @return
12196  *   0 on success, otherwise negative errno value.
12197  */
12198 static int
12199 flow_dv_action_validate(struct rte_eth_dev *dev,
12200                         const struct rte_flow_shared_action_conf *conf,
12201                         const struct rte_flow_action *action,
12202                         struct rte_flow_error *error)
12203 {
12204         RTE_SET_USED(conf);
12205         switch (action->type) {
12206         case RTE_FLOW_ACTION_TYPE_RSS:
12207                 return mlx5_validate_action_rss(dev, action, error);
12208         default:
12209                 return rte_flow_error_set(error, ENOTSUP,
12210                                           RTE_FLOW_ERROR_TYPE_ACTION,
12211                                           NULL,
12212                                           "action type not supported");
12213         }
12214 }
12215
12216 /*
12217  * Mutex-protected thunk to lock-free  __flow_dv_action_create().
12218  */
12219 static struct rte_flow_shared_action *
12220 flow_dv_action_create(struct rte_eth_dev *dev,
12221                       const struct rte_flow_shared_action_conf *conf,
12222                       const struct rte_flow_action *action,
12223                       struct rte_flow_error *error)
12224 {
12225         struct rte_flow_shared_action *shared_action = NULL;
12226
12227         flow_dv_shared_lock(dev);
12228         shared_action = __flow_dv_action_create(dev, conf, action, error);
12229         flow_dv_shared_unlock(dev);
12230         return shared_action;
12231 }
12232
12233 /*
12234  * Mutex-protected thunk to lock-free  __flow_dv_action_destroy().
12235  */
12236 static int
12237 flow_dv_action_destroy(struct rte_eth_dev *dev,
12238                        struct rte_flow_shared_action *action,
12239                        struct rte_flow_error *error)
12240 {
12241         int ret;
12242
12243         flow_dv_shared_lock(dev);
12244         ret = __flow_dv_action_destroy(dev, action, error);
12245         flow_dv_shared_unlock(dev);
12246         return ret;
12247 }
12248
12249 /*
12250  * Mutex-protected thunk to lock-free  __flow_dv_action_update().
12251  */
12252 static int
12253 flow_dv_action_update(struct rte_eth_dev *dev,
12254                       struct rte_flow_shared_action *action,
12255                       const void *action_conf,
12256                       struct rte_flow_error *error)
12257 {
12258         int ret;
12259
12260         flow_dv_shared_lock(dev);
12261         ret = __flow_dv_action_update(dev, action, action_conf,
12262                                       error);
12263         flow_dv_shared_unlock(dev);
12264         return ret;
12265 }
12266
12267 static int
12268 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12269 {
12270         struct mlx5_priv *priv = dev->data->dev_private;
12271         int ret = 0;
12272
12273         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12274                 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12275                                                 flags);
12276                 if (ret != 0)
12277                         return ret;
12278         }
12279         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12280                 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12281                 if (ret != 0)
12282                         return ret;
12283         }
12284         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12285                 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12286                 if (ret != 0)
12287                         return ret;
12288         }
12289         return 0;
12290 }
12291
12292 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12293         .validate = flow_dv_validate,
12294         .prepare = flow_dv_prepare,
12295         .translate = flow_dv_translate,
12296         .apply = flow_dv_apply,
12297         .remove = flow_dv_remove,
12298         .destroy = flow_dv_destroy,
12299         .query = flow_dv_query,
12300         .create_mtr_tbls = flow_dv_create_mtr_tbl,
12301         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12302         .create_policer_rules = flow_dv_create_policer_rules,
12303         .destroy_policer_rules = flow_dv_destroy_policer_rules,
12304         .counter_alloc = flow_dv_counter_allocate,
12305         .counter_free = flow_dv_counter_free,
12306         .counter_query = flow_dv_counter_query,
12307         .get_aged_flows = flow_get_aged_flows,
12308         .action_validate = flow_dv_action_validate,
12309         .action_create = flow_dv_action_create,
12310         .action_destroy = flow_dv_action_destroy,
12311         .action_update = flow_dv_action_update,
12312         .sync_domain = flow_dv_sync_domain,
12313 };
12314
12315 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
12316