net/mlx5: fix tunnel flow priority
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23
24 #include <mlx5_glue.h>
25 #include <mlx5_devx_cmds.h>
26 #include <mlx5_prm.h>
27 #include <mlx5_malloc.h>
28
29 #include "mlx5_defs.h"
30 #include "mlx5.h"
31 #include "mlx5_common_os.h"
32 #include "mlx5_flow.h"
33 #include "mlx5_flow_os.h"
34 #include "mlx5_rxtx.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
40 #endif
41
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
45 #endif
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR
49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
50 #endif
51
52 /* VLAN header definitions */
53 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
54 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
55 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
56 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
57 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
58
59 union flow_dv_attr {
60         struct {
61                 uint32_t valid:1;
62                 uint32_t ipv4:1;
63                 uint32_t ipv6:1;
64                 uint32_t tcp:1;
65                 uint32_t udp:1;
66                 uint32_t reserved:27;
67         };
68         uint32_t attr;
69 };
70
71 static int
72 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
73                              struct mlx5_flow_tbl_resource *tbl);
74
75 static int
76 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev);
77
78 /**
79  * Initialize flow attributes structure according to flow items' types.
80  *
81  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
82  * mode. For tunnel mode, the items to be modified are the outermost ones.
83  *
84  * @param[in] item
85  *   Pointer to item specification.
86  * @param[out] attr
87  *   Pointer to flow attributes structure.
88  * @param[in] dev_flow
89  *   Pointer to the sub flow.
90  * @param[in] tunnel_decap
91  *   Whether action is after tunnel decapsulation.
92  */
93 static void
94 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
95                   struct mlx5_flow *dev_flow, bool tunnel_decap)
96 {
97         uint64_t layers = dev_flow->handle->layers;
98
99         /*
100          * If layers is already initialized, it means this dev_flow is the
101          * suffix flow, the layers flags is set by the prefix flow. Need to
102          * use the layer flags from prefix flow as the suffix flow may not
103          * have the user defined items as the flow is split.
104          */
105         if (layers) {
106                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
107                         attr->ipv4 = 1;
108                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
109                         attr->ipv6 = 1;
110                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
111                         attr->tcp = 1;
112                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
113                         attr->udp = 1;
114                 attr->valid = 1;
115                 return;
116         }
117         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
118                 uint8_t next_protocol = 0xff;
119                 switch (item->type) {
120                 case RTE_FLOW_ITEM_TYPE_GRE:
121                 case RTE_FLOW_ITEM_TYPE_NVGRE:
122                 case RTE_FLOW_ITEM_TYPE_VXLAN:
123                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
124                 case RTE_FLOW_ITEM_TYPE_GENEVE:
125                 case RTE_FLOW_ITEM_TYPE_MPLS:
126                         if (tunnel_decap)
127                                 attr->attr = 0;
128                         break;
129                 case RTE_FLOW_ITEM_TYPE_IPV4:
130                         if (!attr->ipv6)
131                                 attr->ipv4 = 1;
132                         if (item->mask != NULL &&
133                             ((const struct rte_flow_item_ipv4 *)
134                             item->mask)->hdr.next_proto_id)
135                                 next_protocol =
136                                     ((const struct rte_flow_item_ipv4 *)
137                                       (item->spec))->hdr.next_proto_id &
138                                     ((const struct rte_flow_item_ipv4 *)
139                                       (item->mask))->hdr.next_proto_id;
140                         if ((next_protocol == IPPROTO_IPIP ||
141                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
142                                 attr->attr = 0;
143                         break;
144                 case RTE_FLOW_ITEM_TYPE_IPV6:
145                         if (!attr->ipv4)
146                                 attr->ipv6 = 1;
147                         if (item->mask != NULL &&
148                             ((const struct rte_flow_item_ipv6 *)
149                             item->mask)->hdr.proto)
150                                 next_protocol =
151                                     ((const struct rte_flow_item_ipv6 *)
152                                       (item->spec))->hdr.proto &
153                                     ((const struct rte_flow_item_ipv6 *)
154                                       (item->mask))->hdr.proto;
155                         if ((next_protocol == IPPROTO_IPIP ||
156                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
157                                 attr->attr = 0;
158                         break;
159                 case RTE_FLOW_ITEM_TYPE_UDP:
160                         if (!attr->tcp)
161                                 attr->udp = 1;
162                         break;
163                 case RTE_FLOW_ITEM_TYPE_TCP:
164                         if (!attr->udp)
165                                 attr->tcp = 1;
166                         break;
167                 default:
168                         break;
169                 }
170         }
171         attr->valid = 1;
172 }
173
174 /**
175  * Convert rte_mtr_color to mlx5 color.
176  *
177  * @param[in] rcol
178  *   rte_mtr_color.
179  *
180  * @return
181  *   mlx5 color.
182  */
183 static int
184 rte_col_2_mlx5_col(enum rte_color rcol)
185 {
186         switch (rcol) {
187         case RTE_COLOR_GREEN:
188                 return MLX5_FLOW_COLOR_GREEN;
189         case RTE_COLOR_YELLOW:
190                 return MLX5_FLOW_COLOR_YELLOW;
191         case RTE_COLOR_RED:
192                 return MLX5_FLOW_COLOR_RED;
193         default:
194                 break;
195         }
196         return MLX5_FLOW_COLOR_UNDEFINED;
197 }
198
199 struct field_modify_info {
200         uint32_t size; /* Size of field in protocol header, in bytes. */
201         uint32_t offset; /* Offset of field in protocol header, in bytes. */
202         enum mlx5_modification_field id;
203 };
204
205 struct field_modify_info modify_eth[] = {
206         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
207         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
208         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
209         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
210         {0, 0, 0},
211 };
212
213 struct field_modify_info modify_vlan_out_first_vid[] = {
214         /* Size in bits !!! */
215         {12, 0, MLX5_MODI_OUT_FIRST_VID},
216         {0, 0, 0},
217 };
218
219 struct field_modify_info modify_ipv4[] = {
220         {1,  1, MLX5_MODI_OUT_IP_DSCP},
221         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
222         {4, 12, MLX5_MODI_OUT_SIPV4},
223         {4, 16, MLX5_MODI_OUT_DIPV4},
224         {0, 0, 0},
225 };
226
227 struct field_modify_info modify_ipv6[] = {
228         {1,  0, MLX5_MODI_OUT_IP_DSCP},
229         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
230         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
231         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
232         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
233         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
234         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
235         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
236         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
237         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
238         {0, 0, 0},
239 };
240
241 struct field_modify_info modify_udp[] = {
242         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
243         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
244         {0, 0, 0},
245 };
246
247 struct field_modify_info modify_tcp[] = {
248         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
249         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
250         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
251         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
252         {0, 0, 0},
253 };
254
255 static void
256 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
257                           uint8_t next_protocol, uint64_t *item_flags,
258                           int *tunnel)
259 {
260         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
261                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
262         if (next_protocol == IPPROTO_IPIP) {
263                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
264                 *tunnel = 1;
265         }
266         if (next_protocol == IPPROTO_IPV6) {
267                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
268                 *tunnel = 1;
269         }
270 }
271
272 /**
273  * Acquire the synchronizing object to protect multithreaded access
274  * to shared dv context. Lock occurs only if context is actually
275  * shared, i.e. we have multiport IB device and representors are
276  * created.
277  *
278  * @param[in] dev
279  *   Pointer to the rte_eth_dev structure.
280  */
281 static void
282 flow_dv_shared_lock(struct rte_eth_dev *dev)
283 {
284         struct mlx5_priv *priv = dev->data->dev_private;
285         struct mlx5_dev_ctx_shared *sh = priv->sh;
286
287         if (sh->dv_refcnt > 1) {
288                 int ret;
289
290                 ret = pthread_mutex_lock(&sh->dv_mutex);
291                 MLX5_ASSERT(!ret);
292                 (void)ret;
293         }
294 }
295
296 static void
297 flow_dv_shared_unlock(struct rte_eth_dev *dev)
298 {
299         struct mlx5_priv *priv = dev->data->dev_private;
300         struct mlx5_dev_ctx_shared *sh = priv->sh;
301
302         if (sh->dv_refcnt > 1) {
303                 int ret;
304
305                 ret = pthread_mutex_unlock(&sh->dv_mutex);
306                 MLX5_ASSERT(!ret);
307                 (void)ret;
308         }
309 }
310
311 /* Update VLAN's VID/PCP based on input rte_flow_action.
312  *
313  * @param[in] action
314  *   Pointer to struct rte_flow_action.
315  * @param[out] vlan
316  *   Pointer to struct rte_vlan_hdr.
317  */
318 static void
319 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
320                          struct rte_vlan_hdr *vlan)
321 {
322         uint16_t vlan_tci;
323         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
324                 vlan_tci =
325                     ((const struct rte_flow_action_of_set_vlan_pcp *)
326                                                action->conf)->vlan_pcp;
327                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
328                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
329                 vlan->vlan_tci |= vlan_tci;
330         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
331                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
332                 vlan->vlan_tci |= rte_be_to_cpu_16
333                     (((const struct rte_flow_action_of_set_vlan_vid *)
334                                              action->conf)->vlan_vid);
335         }
336 }
337
338 /**
339  * Fetch 1, 2, 3 or 4 byte field from the byte array
340  * and return as unsigned integer in host-endian format.
341  *
342  * @param[in] data
343  *   Pointer to data array.
344  * @param[in] size
345  *   Size of field to extract.
346  *
347  * @return
348  *   converted field in host endian format.
349  */
350 static inline uint32_t
351 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
352 {
353         uint32_t ret;
354
355         switch (size) {
356         case 1:
357                 ret = *data;
358                 break;
359         case 2:
360                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
361                 break;
362         case 3:
363                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
364                 ret = (ret << 8) | *(data + sizeof(uint16_t));
365                 break;
366         case 4:
367                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
368                 break;
369         default:
370                 MLX5_ASSERT(false);
371                 ret = 0;
372                 break;
373         }
374         return ret;
375 }
376
377 /**
378  * Convert modify-header action to DV specification.
379  *
380  * Data length of each action is determined by provided field description
381  * and the item mask. Data bit offset and width of each action is determined
382  * by provided item mask.
383  *
384  * @param[in] item
385  *   Pointer to item specification.
386  * @param[in] field
387  *   Pointer to field modification information.
388  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
389  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
390  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
391  * @param[in] dcopy
392  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
393  *   Negative offset value sets the same offset as source offset.
394  *   size field is ignored, value is taken from source field.
395  * @param[in,out] resource
396  *   Pointer to the modify-header resource.
397  * @param[in] type
398  *   Type of modification.
399  * @param[out] error
400  *   Pointer to the error structure.
401  *
402  * @return
403  *   0 on success, a negative errno value otherwise and rte_errno is set.
404  */
405 static int
406 flow_dv_convert_modify_action(struct rte_flow_item *item,
407                               struct field_modify_info *field,
408                               struct field_modify_info *dcopy,
409                               struct mlx5_flow_dv_modify_hdr_resource *resource,
410                               uint32_t type, struct rte_flow_error *error)
411 {
412         uint32_t i = resource->actions_num;
413         struct mlx5_modification_cmd *actions = resource->actions;
414
415         /*
416          * The item and mask are provided in big-endian format.
417          * The fields should be presented as in big-endian format either.
418          * Mask must be always present, it defines the actual field width.
419          */
420         MLX5_ASSERT(item->mask);
421         MLX5_ASSERT(field->size);
422         do {
423                 unsigned int size_b;
424                 unsigned int off_b;
425                 uint32_t mask;
426                 uint32_t data;
427
428                 if (i >= MLX5_MAX_MODIFY_NUM)
429                         return rte_flow_error_set(error, EINVAL,
430                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
431                                  "too many items to modify");
432                 /* Fetch variable byte size mask from the array. */
433                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
434                                            field->offset, field->size);
435                 if (!mask) {
436                         ++field;
437                         continue;
438                 }
439                 /* Deduce actual data width in bits from mask value. */
440                 off_b = rte_bsf32(mask);
441                 size_b = sizeof(uint32_t) * CHAR_BIT -
442                          off_b - __builtin_clz(mask);
443                 MLX5_ASSERT(size_b);
444                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
445                 actions[i] = (struct mlx5_modification_cmd) {
446                         .action_type = type,
447                         .field = field->id,
448                         .offset = off_b,
449                         .length = size_b,
450                 };
451                 /* Convert entire record to expected big-endian format. */
452                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
453                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
454                         MLX5_ASSERT(dcopy);
455                         actions[i].dst_field = dcopy->id;
456                         actions[i].dst_offset =
457                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
458                         /* Convert entire record to big-endian format. */
459                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
460                 } else {
461                         MLX5_ASSERT(item->spec);
462                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
463                                                    field->offset, field->size);
464                         /* Shift out the trailing masked bits from data. */
465                         data = (data & mask) >> off_b;
466                         actions[i].data1 = rte_cpu_to_be_32(data);
467                 }
468                 ++i;
469                 ++field;
470         } while (field->size);
471         if (resource->actions_num == i)
472                 return rte_flow_error_set(error, EINVAL,
473                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
474                                           "invalid modification flow item");
475         resource->actions_num = i;
476         return 0;
477 }
478
479 /**
480  * Convert modify-header set IPv4 address action to DV specification.
481  *
482  * @param[in,out] resource
483  *   Pointer to the modify-header resource.
484  * @param[in] action
485  *   Pointer to action specification.
486  * @param[out] error
487  *   Pointer to the error structure.
488  *
489  * @return
490  *   0 on success, a negative errno value otherwise and rte_errno is set.
491  */
492 static int
493 flow_dv_convert_action_modify_ipv4
494                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
495                          const struct rte_flow_action *action,
496                          struct rte_flow_error *error)
497 {
498         const struct rte_flow_action_set_ipv4 *conf =
499                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
500         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
501         struct rte_flow_item_ipv4 ipv4;
502         struct rte_flow_item_ipv4 ipv4_mask;
503
504         memset(&ipv4, 0, sizeof(ipv4));
505         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
506         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
507                 ipv4.hdr.src_addr = conf->ipv4_addr;
508                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
509         } else {
510                 ipv4.hdr.dst_addr = conf->ipv4_addr;
511                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
512         }
513         item.spec = &ipv4;
514         item.mask = &ipv4_mask;
515         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
516                                              MLX5_MODIFICATION_TYPE_SET, error);
517 }
518
519 /**
520  * Convert modify-header set IPv6 address action to DV specification.
521  *
522  * @param[in,out] resource
523  *   Pointer to the modify-header resource.
524  * @param[in] action
525  *   Pointer to action specification.
526  * @param[out] error
527  *   Pointer to the error structure.
528  *
529  * @return
530  *   0 on success, a negative errno value otherwise and rte_errno is set.
531  */
532 static int
533 flow_dv_convert_action_modify_ipv6
534                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
535                          const struct rte_flow_action *action,
536                          struct rte_flow_error *error)
537 {
538         const struct rte_flow_action_set_ipv6 *conf =
539                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
540         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
541         struct rte_flow_item_ipv6 ipv6;
542         struct rte_flow_item_ipv6 ipv6_mask;
543
544         memset(&ipv6, 0, sizeof(ipv6));
545         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
546         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
547                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
548                        sizeof(ipv6.hdr.src_addr));
549                 memcpy(&ipv6_mask.hdr.src_addr,
550                        &rte_flow_item_ipv6_mask.hdr.src_addr,
551                        sizeof(ipv6.hdr.src_addr));
552         } else {
553                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
554                        sizeof(ipv6.hdr.dst_addr));
555                 memcpy(&ipv6_mask.hdr.dst_addr,
556                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
557                        sizeof(ipv6.hdr.dst_addr));
558         }
559         item.spec = &ipv6;
560         item.mask = &ipv6_mask;
561         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
562                                              MLX5_MODIFICATION_TYPE_SET, error);
563 }
564
565 /**
566  * Convert modify-header set MAC address action to DV specification.
567  *
568  * @param[in,out] resource
569  *   Pointer to the modify-header resource.
570  * @param[in] action
571  *   Pointer to action specification.
572  * @param[out] error
573  *   Pointer to the error structure.
574  *
575  * @return
576  *   0 on success, a negative errno value otherwise and rte_errno is set.
577  */
578 static int
579 flow_dv_convert_action_modify_mac
580                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
581                          const struct rte_flow_action *action,
582                          struct rte_flow_error *error)
583 {
584         const struct rte_flow_action_set_mac *conf =
585                 (const struct rte_flow_action_set_mac *)(action->conf);
586         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
587         struct rte_flow_item_eth eth;
588         struct rte_flow_item_eth eth_mask;
589
590         memset(&eth, 0, sizeof(eth));
591         memset(&eth_mask, 0, sizeof(eth_mask));
592         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
593                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
594                        sizeof(eth.src.addr_bytes));
595                 memcpy(&eth_mask.src.addr_bytes,
596                        &rte_flow_item_eth_mask.src.addr_bytes,
597                        sizeof(eth_mask.src.addr_bytes));
598         } else {
599                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
600                        sizeof(eth.dst.addr_bytes));
601                 memcpy(&eth_mask.dst.addr_bytes,
602                        &rte_flow_item_eth_mask.dst.addr_bytes,
603                        sizeof(eth_mask.dst.addr_bytes));
604         }
605         item.spec = &eth;
606         item.mask = &eth_mask;
607         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
608                                              MLX5_MODIFICATION_TYPE_SET, error);
609 }
610
611 /**
612  * Convert modify-header set VLAN VID action to DV specification.
613  *
614  * @param[in,out] resource
615  *   Pointer to the modify-header resource.
616  * @param[in] action
617  *   Pointer to action specification.
618  * @param[out] error
619  *   Pointer to the error structure.
620  *
621  * @return
622  *   0 on success, a negative errno value otherwise and rte_errno is set.
623  */
624 static int
625 flow_dv_convert_action_modify_vlan_vid
626                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
627                          const struct rte_flow_action *action,
628                          struct rte_flow_error *error)
629 {
630         const struct rte_flow_action_of_set_vlan_vid *conf =
631                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
632         int i = resource->actions_num;
633         struct mlx5_modification_cmd *actions = resource->actions;
634         struct field_modify_info *field = modify_vlan_out_first_vid;
635
636         if (i >= MLX5_MAX_MODIFY_NUM)
637                 return rte_flow_error_set(error, EINVAL,
638                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
639                          "too many items to modify");
640         actions[i] = (struct mlx5_modification_cmd) {
641                 .action_type = MLX5_MODIFICATION_TYPE_SET,
642                 .field = field->id,
643                 .length = field->size,
644                 .offset = field->offset,
645         };
646         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
647         actions[i].data1 = conf->vlan_vid;
648         actions[i].data1 = actions[i].data1 << 16;
649         resource->actions_num = ++i;
650         return 0;
651 }
652
653 /**
654  * Convert modify-header set TP action to DV specification.
655  *
656  * @param[in,out] resource
657  *   Pointer to the modify-header resource.
658  * @param[in] action
659  *   Pointer to action specification.
660  * @param[in] items
661  *   Pointer to rte_flow_item objects list.
662  * @param[in] attr
663  *   Pointer to flow attributes structure.
664  * @param[in] dev_flow
665  *   Pointer to the sub flow.
666  * @param[in] tunnel_decap
667  *   Whether action is after tunnel decapsulation.
668  * @param[out] error
669  *   Pointer to the error structure.
670  *
671  * @return
672  *   0 on success, a negative errno value otherwise and rte_errno is set.
673  */
674 static int
675 flow_dv_convert_action_modify_tp
676                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
677                          const struct rte_flow_action *action,
678                          const struct rte_flow_item *items,
679                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
680                          bool tunnel_decap, struct rte_flow_error *error)
681 {
682         const struct rte_flow_action_set_tp *conf =
683                 (const struct rte_flow_action_set_tp *)(action->conf);
684         struct rte_flow_item item;
685         struct rte_flow_item_udp udp;
686         struct rte_flow_item_udp udp_mask;
687         struct rte_flow_item_tcp tcp;
688         struct rte_flow_item_tcp tcp_mask;
689         struct field_modify_info *field;
690
691         if (!attr->valid)
692                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
693         if (attr->udp) {
694                 memset(&udp, 0, sizeof(udp));
695                 memset(&udp_mask, 0, sizeof(udp_mask));
696                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
697                         udp.hdr.src_port = conf->port;
698                         udp_mask.hdr.src_port =
699                                         rte_flow_item_udp_mask.hdr.src_port;
700                 } else {
701                         udp.hdr.dst_port = conf->port;
702                         udp_mask.hdr.dst_port =
703                                         rte_flow_item_udp_mask.hdr.dst_port;
704                 }
705                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
706                 item.spec = &udp;
707                 item.mask = &udp_mask;
708                 field = modify_udp;
709         } else {
710                 MLX5_ASSERT(attr->tcp);
711                 memset(&tcp, 0, sizeof(tcp));
712                 memset(&tcp_mask, 0, sizeof(tcp_mask));
713                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
714                         tcp.hdr.src_port = conf->port;
715                         tcp_mask.hdr.src_port =
716                                         rte_flow_item_tcp_mask.hdr.src_port;
717                 } else {
718                         tcp.hdr.dst_port = conf->port;
719                         tcp_mask.hdr.dst_port =
720                                         rte_flow_item_tcp_mask.hdr.dst_port;
721                 }
722                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
723                 item.spec = &tcp;
724                 item.mask = &tcp_mask;
725                 field = modify_tcp;
726         }
727         return flow_dv_convert_modify_action(&item, field, NULL, resource,
728                                              MLX5_MODIFICATION_TYPE_SET, error);
729 }
730
731 /**
732  * Convert modify-header set TTL action to DV specification.
733  *
734  * @param[in,out] resource
735  *   Pointer to the modify-header resource.
736  * @param[in] action
737  *   Pointer to action specification.
738  * @param[in] items
739  *   Pointer to rte_flow_item objects list.
740  * @param[in] attr
741  *   Pointer to flow attributes structure.
742  * @param[in] dev_flow
743  *   Pointer to the sub flow.
744  * @param[in] tunnel_decap
745  *   Whether action is after tunnel decapsulation.
746  * @param[out] error
747  *   Pointer to the error structure.
748  *
749  * @return
750  *   0 on success, a negative errno value otherwise and rte_errno is set.
751  */
752 static int
753 flow_dv_convert_action_modify_ttl
754                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
755                          const struct rte_flow_action *action,
756                          const struct rte_flow_item *items,
757                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
758                          bool tunnel_decap, struct rte_flow_error *error)
759 {
760         const struct rte_flow_action_set_ttl *conf =
761                 (const struct rte_flow_action_set_ttl *)(action->conf);
762         struct rte_flow_item item;
763         struct rte_flow_item_ipv4 ipv4;
764         struct rte_flow_item_ipv4 ipv4_mask;
765         struct rte_flow_item_ipv6 ipv6;
766         struct rte_flow_item_ipv6 ipv6_mask;
767         struct field_modify_info *field;
768
769         if (!attr->valid)
770                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
771         if (attr->ipv4) {
772                 memset(&ipv4, 0, sizeof(ipv4));
773                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
774                 ipv4.hdr.time_to_live = conf->ttl_value;
775                 ipv4_mask.hdr.time_to_live = 0xFF;
776                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
777                 item.spec = &ipv4;
778                 item.mask = &ipv4_mask;
779                 field = modify_ipv4;
780         } else {
781                 MLX5_ASSERT(attr->ipv6);
782                 memset(&ipv6, 0, sizeof(ipv6));
783                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
784                 ipv6.hdr.hop_limits = conf->ttl_value;
785                 ipv6_mask.hdr.hop_limits = 0xFF;
786                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
787                 item.spec = &ipv6;
788                 item.mask = &ipv6_mask;
789                 field = modify_ipv6;
790         }
791         return flow_dv_convert_modify_action(&item, field, NULL, resource,
792                                              MLX5_MODIFICATION_TYPE_SET, error);
793 }
794
795 /**
796  * Convert modify-header decrement TTL action to DV specification.
797  *
798  * @param[in,out] resource
799  *   Pointer to the modify-header resource.
800  * @param[in] action
801  *   Pointer to action specification.
802  * @param[in] items
803  *   Pointer to rte_flow_item objects list.
804  * @param[in] attr
805  *   Pointer to flow attributes structure.
806  * @param[in] dev_flow
807  *   Pointer to the sub flow.
808  * @param[in] tunnel_decap
809  *   Whether action is after tunnel decapsulation.
810  * @param[out] error
811  *   Pointer to the error structure.
812  *
813  * @return
814  *   0 on success, a negative errno value otherwise and rte_errno is set.
815  */
816 static int
817 flow_dv_convert_action_modify_dec_ttl
818                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
819                          const struct rte_flow_item *items,
820                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
821                          bool tunnel_decap, struct rte_flow_error *error)
822 {
823         struct rte_flow_item item;
824         struct rte_flow_item_ipv4 ipv4;
825         struct rte_flow_item_ipv4 ipv4_mask;
826         struct rte_flow_item_ipv6 ipv6;
827         struct rte_flow_item_ipv6 ipv6_mask;
828         struct field_modify_info *field;
829
830         if (!attr->valid)
831                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
832         if (attr->ipv4) {
833                 memset(&ipv4, 0, sizeof(ipv4));
834                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
835                 ipv4.hdr.time_to_live = 0xFF;
836                 ipv4_mask.hdr.time_to_live = 0xFF;
837                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
838                 item.spec = &ipv4;
839                 item.mask = &ipv4_mask;
840                 field = modify_ipv4;
841         } else {
842                 MLX5_ASSERT(attr->ipv6);
843                 memset(&ipv6, 0, sizeof(ipv6));
844                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
845                 ipv6.hdr.hop_limits = 0xFF;
846                 ipv6_mask.hdr.hop_limits = 0xFF;
847                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
848                 item.spec = &ipv6;
849                 item.mask = &ipv6_mask;
850                 field = modify_ipv6;
851         }
852         return flow_dv_convert_modify_action(&item, field, NULL, resource,
853                                              MLX5_MODIFICATION_TYPE_ADD, error);
854 }
855
856 /**
857  * Convert modify-header increment/decrement TCP Sequence number
858  * to DV specification.
859  *
860  * @param[in,out] resource
861  *   Pointer to the modify-header resource.
862  * @param[in] action
863  *   Pointer to action specification.
864  * @param[out] error
865  *   Pointer to the error structure.
866  *
867  * @return
868  *   0 on success, a negative errno value otherwise and rte_errno is set.
869  */
870 static int
871 flow_dv_convert_action_modify_tcp_seq
872                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
873                          const struct rte_flow_action *action,
874                          struct rte_flow_error *error)
875 {
876         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
877         uint64_t value = rte_be_to_cpu_32(*conf);
878         struct rte_flow_item item;
879         struct rte_flow_item_tcp tcp;
880         struct rte_flow_item_tcp tcp_mask;
881
882         memset(&tcp, 0, sizeof(tcp));
883         memset(&tcp_mask, 0, sizeof(tcp_mask));
884         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
885                 /*
886                  * The HW has no decrement operation, only increment operation.
887                  * To simulate decrement X from Y using increment operation
888                  * we need to add UINT32_MAX X times to Y.
889                  * Each adding of UINT32_MAX decrements Y by 1.
890                  */
891                 value *= UINT32_MAX;
892         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
893         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
894         item.type = RTE_FLOW_ITEM_TYPE_TCP;
895         item.spec = &tcp;
896         item.mask = &tcp_mask;
897         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
898                                              MLX5_MODIFICATION_TYPE_ADD, error);
899 }
900
901 /**
902  * Convert modify-header increment/decrement TCP Acknowledgment number
903  * to DV specification.
904  *
905  * @param[in,out] resource
906  *   Pointer to the modify-header resource.
907  * @param[in] action
908  *   Pointer to action specification.
909  * @param[out] error
910  *   Pointer to the error structure.
911  *
912  * @return
913  *   0 on success, a negative errno value otherwise and rte_errno is set.
914  */
915 static int
916 flow_dv_convert_action_modify_tcp_ack
917                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
918                          const struct rte_flow_action *action,
919                          struct rte_flow_error *error)
920 {
921         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
922         uint64_t value = rte_be_to_cpu_32(*conf);
923         struct rte_flow_item item;
924         struct rte_flow_item_tcp tcp;
925         struct rte_flow_item_tcp tcp_mask;
926
927         memset(&tcp, 0, sizeof(tcp));
928         memset(&tcp_mask, 0, sizeof(tcp_mask));
929         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
930                 /*
931                  * The HW has no decrement operation, only increment operation.
932                  * To simulate decrement X from Y using increment operation
933                  * we need to add UINT32_MAX X times to Y.
934                  * Each adding of UINT32_MAX decrements Y by 1.
935                  */
936                 value *= UINT32_MAX;
937         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
938         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
939         item.type = RTE_FLOW_ITEM_TYPE_TCP;
940         item.spec = &tcp;
941         item.mask = &tcp_mask;
942         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
943                                              MLX5_MODIFICATION_TYPE_ADD, error);
944 }
945
946 static enum mlx5_modification_field reg_to_field[] = {
947         [REG_NONE] = MLX5_MODI_OUT_NONE,
948         [REG_A] = MLX5_MODI_META_DATA_REG_A,
949         [REG_B] = MLX5_MODI_META_DATA_REG_B,
950         [REG_C_0] = MLX5_MODI_META_REG_C_0,
951         [REG_C_1] = MLX5_MODI_META_REG_C_1,
952         [REG_C_2] = MLX5_MODI_META_REG_C_2,
953         [REG_C_3] = MLX5_MODI_META_REG_C_3,
954         [REG_C_4] = MLX5_MODI_META_REG_C_4,
955         [REG_C_5] = MLX5_MODI_META_REG_C_5,
956         [REG_C_6] = MLX5_MODI_META_REG_C_6,
957         [REG_C_7] = MLX5_MODI_META_REG_C_7,
958 };
959
960 /**
961  * Convert register set to DV specification.
962  *
963  * @param[in,out] resource
964  *   Pointer to the modify-header resource.
965  * @param[in] action
966  *   Pointer to action specification.
967  * @param[out] error
968  *   Pointer to the error structure.
969  *
970  * @return
971  *   0 on success, a negative errno value otherwise and rte_errno is set.
972  */
973 static int
974 flow_dv_convert_action_set_reg
975                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
976                          const struct rte_flow_action *action,
977                          struct rte_flow_error *error)
978 {
979         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
980         struct mlx5_modification_cmd *actions = resource->actions;
981         uint32_t i = resource->actions_num;
982
983         if (i >= MLX5_MAX_MODIFY_NUM)
984                 return rte_flow_error_set(error, EINVAL,
985                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
986                                           "too many items to modify");
987         MLX5_ASSERT(conf->id != REG_NONE);
988         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
989         actions[i] = (struct mlx5_modification_cmd) {
990                 .action_type = MLX5_MODIFICATION_TYPE_SET,
991                 .field = reg_to_field[conf->id],
992         };
993         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
994         actions[i].data1 = rte_cpu_to_be_32(conf->data);
995         ++i;
996         resource->actions_num = i;
997         return 0;
998 }
999
1000 /**
1001  * Convert SET_TAG action to DV specification.
1002  *
1003  * @param[in] dev
1004  *   Pointer to the rte_eth_dev structure.
1005  * @param[in,out] resource
1006  *   Pointer to the modify-header resource.
1007  * @param[in] conf
1008  *   Pointer to action specification.
1009  * @param[out] error
1010  *   Pointer to the error structure.
1011  *
1012  * @return
1013  *   0 on success, a negative errno value otherwise and rte_errno is set.
1014  */
1015 static int
1016 flow_dv_convert_action_set_tag
1017                         (struct rte_eth_dev *dev,
1018                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1019                          const struct rte_flow_action_set_tag *conf,
1020                          struct rte_flow_error *error)
1021 {
1022         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1023         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1024         struct rte_flow_item item = {
1025                 .spec = &data,
1026                 .mask = &mask,
1027         };
1028         struct field_modify_info reg_c_x[] = {
1029                 [1] = {0, 0, 0},
1030         };
1031         enum mlx5_modification_field reg_type;
1032         int ret;
1033
1034         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1035         if (ret < 0)
1036                 return ret;
1037         MLX5_ASSERT(ret != REG_NONE);
1038         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1039         reg_type = reg_to_field[ret];
1040         MLX5_ASSERT(reg_type > 0);
1041         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1042         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1043                                              MLX5_MODIFICATION_TYPE_SET, error);
1044 }
1045
1046 /**
1047  * Convert internal COPY_REG action to DV specification.
1048  *
1049  * @param[in] dev
1050  *   Pointer to the rte_eth_dev structure.
1051  * @param[in,out] res
1052  *   Pointer to the modify-header resource.
1053  * @param[in] action
1054  *   Pointer to action specification.
1055  * @param[out] error
1056  *   Pointer to the error structure.
1057  *
1058  * @return
1059  *   0 on success, a negative errno value otherwise and rte_errno is set.
1060  */
1061 static int
1062 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1063                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1064                                  const struct rte_flow_action *action,
1065                                  struct rte_flow_error *error)
1066 {
1067         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1068         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1069         struct rte_flow_item item = {
1070                 .spec = NULL,
1071                 .mask = &mask,
1072         };
1073         struct field_modify_info reg_src[] = {
1074                 {4, 0, reg_to_field[conf->src]},
1075                 {0, 0, 0},
1076         };
1077         struct field_modify_info reg_dst = {
1078                 .offset = 0,
1079                 .id = reg_to_field[conf->dst],
1080         };
1081         /* Adjust reg_c[0] usage according to reported mask. */
1082         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1083                 struct mlx5_priv *priv = dev->data->dev_private;
1084                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1085
1086                 MLX5_ASSERT(reg_c0);
1087                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1088                 if (conf->dst == REG_C_0) {
1089                         /* Copy to reg_c[0], within mask only. */
1090                         reg_dst.offset = rte_bsf32(reg_c0);
1091                         /*
1092                          * Mask is ignoring the enianness, because
1093                          * there is no conversion in datapath.
1094                          */
1095 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1096                         /* Copy from destination lower bits to reg_c[0]. */
1097                         mask = reg_c0 >> reg_dst.offset;
1098 #else
1099                         /* Copy from destination upper bits to reg_c[0]. */
1100                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1101                                           rte_fls_u32(reg_c0));
1102 #endif
1103                 } else {
1104                         mask = rte_cpu_to_be_32(reg_c0);
1105 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1106                         /* Copy from reg_c[0] to destination lower bits. */
1107                         reg_dst.offset = 0;
1108 #else
1109                         /* Copy from reg_c[0] to destination upper bits. */
1110                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1111                                          (rte_fls_u32(reg_c0) -
1112                                           rte_bsf32(reg_c0));
1113 #endif
1114                 }
1115         }
1116         return flow_dv_convert_modify_action(&item,
1117                                              reg_src, &reg_dst, res,
1118                                              MLX5_MODIFICATION_TYPE_COPY,
1119                                              error);
1120 }
1121
1122 /**
1123  * Convert MARK action to DV specification. This routine is used
1124  * in extensive metadata only and requires metadata register to be
1125  * handled. In legacy mode hardware tag resource is engaged.
1126  *
1127  * @param[in] dev
1128  *   Pointer to the rte_eth_dev structure.
1129  * @param[in] conf
1130  *   Pointer to MARK action specification.
1131  * @param[in,out] resource
1132  *   Pointer to the modify-header resource.
1133  * @param[out] error
1134  *   Pointer to the error structure.
1135  *
1136  * @return
1137  *   0 on success, a negative errno value otherwise and rte_errno is set.
1138  */
1139 static int
1140 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1141                             const struct rte_flow_action_mark *conf,
1142                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1143                             struct rte_flow_error *error)
1144 {
1145         struct mlx5_priv *priv = dev->data->dev_private;
1146         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1147                                            priv->sh->dv_mark_mask);
1148         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1149         struct rte_flow_item item = {
1150                 .spec = &data,
1151                 .mask = &mask,
1152         };
1153         struct field_modify_info reg_c_x[] = {
1154                 {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
1155                 {0, 0, 0},
1156         };
1157         int reg;
1158
1159         if (!mask)
1160                 return rte_flow_error_set(error, EINVAL,
1161                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1162                                           NULL, "zero mark action mask");
1163         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1164         if (reg < 0)
1165                 return reg;
1166         MLX5_ASSERT(reg > 0);
1167         if (reg == REG_C_0) {
1168                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1169                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1170
1171                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1172                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1173                 mask = rte_cpu_to_be_32(mask << shl_c0);
1174         }
1175         reg_c_x[0].id = reg_to_field[reg];
1176         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1177                                              MLX5_MODIFICATION_TYPE_SET, error);
1178 }
1179
1180 /**
1181  * Get metadata register index for specified steering domain.
1182  *
1183  * @param[in] dev
1184  *   Pointer to the rte_eth_dev structure.
1185  * @param[in] attr
1186  *   Attributes of flow to determine steering domain.
1187  * @param[out] error
1188  *   Pointer to the error structure.
1189  *
1190  * @return
1191  *   positive index on success, a negative errno value otherwise
1192  *   and rte_errno is set.
1193  */
1194 static enum modify_reg
1195 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1196                          const struct rte_flow_attr *attr,
1197                          struct rte_flow_error *error)
1198 {
1199         int reg =
1200                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1201                                           MLX5_METADATA_FDB :
1202                                             attr->egress ?
1203                                             MLX5_METADATA_TX :
1204                                             MLX5_METADATA_RX, 0, error);
1205         if (reg < 0)
1206                 return rte_flow_error_set(error,
1207                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1208                                           NULL, "unavailable "
1209                                           "metadata register");
1210         return reg;
1211 }
1212
1213 /**
1214  * Convert SET_META action to DV specification.
1215  *
1216  * @param[in] dev
1217  *   Pointer to the rte_eth_dev structure.
1218  * @param[in,out] resource
1219  *   Pointer to the modify-header resource.
1220  * @param[in] attr
1221  *   Attributes of flow that includes this item.
1222  * @param[in] conf
1223  *   Pointer to action specification.
1224  * @param[out] error
1225  *   Pointer to the error structure.
1226  *
1227  * @return
1228  *   0 on success, a negative errno value otherwise and rte_errno is set.
1229  */
1230 static int
1231 flow_dv_convert_action_set_meta
1232                         (struct rte_eth_dev *dev,
1233                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1234                          const struct rte_flow_attr *attr,
1235                          const struct rte_flow_action_set_meta *conf,
1236                          struct rte_flow_error *error)
1237 {
1238         uint32_t data = conf->data;
1239         uint32_t mask = conf->mask;
1240         struct rte_flow_item item = {
1241                 .spec = &data,
1242                 .mask = &mask,
1243         };
1244         struct field_modify_info reg_c_x[] = {
1245                 [1] = {0, 0, 0},
1246         };
1247         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1248
1249         if (reg < 0)
1250                 return reg;
1251         /*
1252          * In datapath code there is no endianness
1253          * coversions for perfromance reasons, all
1254          * pattern conversions are done in rte_flow.
1255          */
1256         if (reg == REG_C_0) {
1257                 struct mlx5_priv *priv = dev->data->dev_private;
1258                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1259                 uint32_t shl_c0;
1260
1261                 MLX5_ASSERT(msk_c0);
1262 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1263                 shl_c0 = rte_bsf32(msk_c0);
1264 #else
1265                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1266 #endif
1267                 mask <<= shl_c0;
1268                 data <<= shl_c0;
1269                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1270         }
1271         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1272         /* The routine expects parameters in memory as big-endian ones. */
1273         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1274                                              MLX5_MODIFICATION_TYPE_SET, error);
1275 }
1276
1277 /**
1278  * Convert modify-header set IPv4 DSCP action to DV specification.
1279  *
1280  * @param[in,out] resource
1281  *   Pointer to the modify-header resource.
1282  * @param[in] action
1283  *   Pointer to action specification.
1284  * @param[out] error
1285  *   Pointer to the error structure.
1286  *
1287  * @return
1288  *   0 on success, a negative errno value otherwise and rte_errno is set.
1289  */
1290 static int
1291 flow_dv_convert_action_modify_ipv4_dscp
1292                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1293                          const struct rte_flow_action *action,
1294                          struct rte_flow_error *error)
1295 {
1296         const struct rte_flow_action_set_dscp *conf =
1297                 (const struct rte_flow_action_set_dscp *)(action->conf);
1298         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1299         struct rte_flow_item_ipv4 ipv4;
1300         struct rte_flow_item_ipv4 ipv4_mask;
1301
1302         memset(&ipv4, 0, sizeof(ipv4));
1303         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1304         ipv4.hdr.type_of_service = conf->dscp;
1305         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1306         item.spec = &ipv4;
1307         item.mask = &ipv4_mask;
1308         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1309                                              MLX5_MODIFICATION_TYPE_SET, error);
1310 }
1311
1312 /**
1313  * Convert modify-header set IPv6 DSCP action to DV specification.
1314  *
1315  * @param[in,out] resource
1316  *   Pointer to the modify-header resource.
1317  * @param[in] action
1318  *   Pointer to action specification.
1319  * @param[out] error
1320  *   Pointer to the error structure.
1321  *
1322  * @return
1323  *   0 on success, a negative errno value otherwise and rte_errno is set.
1324  */
1325 static int
1326 flow_dv_convert_action_modify_ipv6_dscp
1327                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1328                          const struct rte_flow_action *action,
1329                          struct rte_flow_error *error)
1330 {
1331         const struct rte_flow_action_set_dscp *conf =
1332                 (const struct rte_flow_action_set_dscp *)(action->conf);
1333         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1334         struct rte_flow_item_ipv6 ipv6;
1335         struct rte_flow_item_ipv6 ipv6_mask;
1336
1337         memset(&ipv6, 0, sizeof(ipv6));
1338         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1339         /*
1340          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1341          * rdma-core only accept the DSCP bits byte aligned start from
1342          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1343          * bits in IPv6 case as rdma-core requires byte aligned value.
1344          */
1345         ipv6.hdr.vtc_flow = conf->dscp;
1346         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1347         item.spec = &ipv6;
1348         item.mask = &ipv6_mask;
1349         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1350                                              MLX5_MODIFICATION_TYPE_SET, error);
1351 }
1352
1353 /**
1354  * Validate MARK item.
1355  *
1356  * @param[in] dev
1357  *   Pointer to the rte_eth_dev structure.
1358  * @param[in] item
1359  *   Item specification.
1360  * @param[in] attr
1361  *   Attributes of flow that includes this item.
1362  * @param[out] error
1363  *   Pointer to error structure.
1364  *
1365  * @return
1366  *   0 on success, a negative errno value otherwise and rte_errno is set.
1367  */
1368 static int
1369 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1370                            const struct rte_flow_item *item,
1371                            const struct rte_flow_attr *attr __rte_unused,
1372                            struct rte_flow_error *error)
1373 {
1374         struct mlx5_priv *priv = dev->data->dev_private;
1375         struct mlx5_dev_config *config = &priv->config;
1376         const struct rte_flow_item_mark *spec = item->spec;
1377         const struct rte_flow_item_mark *mask = item->mask;
1378         const struct rte_flow_item_mark nic_mask = {
1379                 .id = priv->sh->dv_mark_mask,
1380         };
1381         int ret;
1382
1383         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1384                 return rte_flow_error_set(error, ENOTSUP,
1385                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1386                                           "extended metadata feature"
1387                                           " isn't enabled");
1388         if (!mlx5_flow_ext_mreg_supported(dev))
1389                 return rte_flow_error_set(error, ENOTSUP,
1390                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1391                                           "extended metadata register"
1392                                           " isn't supported");
1393         if (!nic_mask.id)
1394                 return rte_flow_error_set(error, ENOTSUP,
1395                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1396                                           "extended metadata register"
1397                                           " isn't available");
1398         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1399         if (ret < 0)
1400                 return ret;
1401         if (!spec)
1402                 return rte_flow_error_set(error, EINVAL,
1403                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1404                                           item->spec,
1405                                           "data cannot be empty");
1406         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1407                 return rte_flow_error_set(error, EINVAL,
1408                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1409                                           &spec->id,
1410                                           "mark id exceeds the limit");
1411         if (!mask)
1412                 mask = &nic_mask;
1413         if (!mask->id)
1414                 return rte_flow_error_set(error, EINVAL,
1415                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1416                                         "mask cannot be zero");
1417
1418         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1419                                         (const uint8_t *)&nic_mask,
1420                                         sizeof(struct rte_flow_item_mark),
1421                                         error);
1422         if (ret < 0)
1423                 return ret;
1424         return 0;
1425 }
1426
1427 /**
1428  * Validate META item.
1429  *
1430  * @param[in] dev
1431  *   Pointer to the rte_eth_dev structure.
1432  * @param[in] item
1433  *   Item specification.
1434  * @param[in] attr
1435  *   Attributes of flow that includes this item.
1436  * @param[out] error
1437  *   Pointer to error structure.
1438  *
1439  * @return
1440  *   0 on success, a negative errno value otherwise and rte_errno is set.
1441  */
1442 static int
1443 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1444                            const struct rte_flow_item *item,
1445                            const struct rte_flow_attr *attr,
1446                            struct rte_flow_error *error)
1447 {
1448         struct mlx5_priv *priv = dev->data->dev_private;
1449         struct mlx5_dev_config *config = &priv->config;
1450         const struct rte_flow_item_meta *spec = item->spec;
1451         const struct rte_flow_item_meta *mask = item->mask;
1452         struct rte_flow_item_meta nic_mask = {
1453                 .data = UINT32_MAX
1454         };
1455         int reg;
1456         int ret;
1457
1458         if (!spec)
1459                 return rte_flow_error_set(error, EINVAL,
1460                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1461                                           item->spec,
1462                                           "data cannot be empty");
1463         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1464                 if (!mlx5_flow_ext_mreg_supported(dev))
1465                         return rte_flow_error_set(error, ENOTSUP,
1466                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1467                                           "extended metadata register"
1468                                           " isn't supported");
1469                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1470                 if (reg < 0)
1471                         return reg;
1472                 if (reg == REG_B)
1473                         return rte_flow_error_set(error, ENOTSUP,
1474                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1475                                           "match on reg_b "
1476                                           "isn't supported");
1477                 if (reg != REG_A)
1478                         nic_mask.data = priv->sh->dv_meta_mask;
1479         } else if (attr->transfer) {
1480                 return rte_flow_error_set(error, ENOTSUP,
1481                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1482                                         "extended metadata feature "
1483                                         "should be enabled when "
1484                                         "meta item is requested "
1485                                         "with e-switch mode ");
1486         }
1487         if (!mask)
1488                 mask = &rte_flow_item_meta_mask;
1489         if (!mask->data)
1490                 return rte_flow_error_set(error, EINVAL,
1491                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1492                                         "mask cannot be zero");
1493
1494         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1495                                         (const uint8_t *)&nic_mask,
1496                                         sizeof(struct rte_flow_item_meta),
1497                                         error);
1498         return ret;
1499 }
1500
1501 /**
1502  * Validate TAG item.
1503  *
1504  * @param[in] dev
1505  *   Pointer to the rte_eth_dev structure.
1506  * @param[in] item
1507  *   Item specification.
1508  * @param[in] attr
1509  *   Attributes of flow that includes this item.
1510  * @param[out] error
1511  *   Pointer to error structure.
1512  *
1513  * @return
1514  *   0 on success, a negative errno value otherwise and rte_errno is set.
1515  */
1516 static int
1517 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1518                           const struct rte_flow_item *item,
1519                           const struct rte_flow_attr *attr __rte_unused,
1520                           struct rte_flow_error *error)
1521 {
1522         const struct rte_flow_item_tag *spec = item->spec;
1523         const struct rte_flow_item_tag *mask = item->mask;
1524         const struct rte_flow_item_tag nic_mask = {
1525                 .data = RTE_BE32(UINT32_MAX),
1526                 .index = 0xff,
1527         };
1528         int ret;
1529
1530         if (!mlx5_flow_ext_mreg_supported(dev))
1531                 return rte_flow_error_set(error, ENOTSUP,
1532                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1533                                           "extensive metadata register"
1534                                           " isn't supported");
1535         if (!spec)
1536                 return rte_flow_error_set(error, EINVAL,
1537                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1538                                           item->spec,
1539                                           "data cannot be empty");
1540         if (!mask)
1541                 mask = &rte_flow_item_tag_mask;
1542         if (!mask->data)
1543                 return rte_flow_error_set(error, EINVAL,
1544                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1545                                         "mask cannot be zero");
1546
1547         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1548                                         (const uint8_t *)&nic_mask,
1549                                         sizeof(struct rte_flow_item_tag),
1550                                         error);
1551         if (ret < 0)
1552                 return ret;
1553         if (mask->index != 0xff)
1554                 return rte_flow_error_set(error, EINVAL,
1555                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1556                                           "partial mask for tag index"
1557                                           " is not supported");
1558         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1559         if (ret < 0)
1560                 return ret;
1561         MLX5_ASSERT(ret != REG_NONE);
1562         return 0;
1563 }
1564
1565 /**
1566  * Validate vport item.
1567  *
1568  * @param[in] dev
1569  *   Pointer to the rte_eth_dev structure.
1570  * @param[in] item
1571  *   Item specification.
1572  * @param[in] attr
1573  *   Attributes of flow that includes this item.
1574  * @param[in] item_flags
1575  *   Bit-fields that holds the items detected until now.
1576  * @param[out] error
1577  *   Pointer to error structure.
1578  *
1579  * @return
1580  *   0 on success, a negative errno value otherwise and rte_errno is set.
1581  */
1582 static int
1583 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1584                               const struct rte_flow_item *item,
1585                               const struct rte_flow_attr *attr,
1586                               uint64_t item_flags,
1587                               struct rte_flow_error *error)
1588 {
1589         const struct rte_flow_item_port_id *spec = item->spec;
1590         const struct rte_flow_item_port_id *mask = item->mask;
1591         const struct rte_flow_item_port_id switch_mask = {
1592                         .id = 0xffffffff,
1593         };
1594         struct mlx5_priv *esw_priv;
1595         struct mlx5_priv *dev_priv;
1596         int ret;
1597
1598         if (!attr->transfer)
1599                 return rte_flow_error_set(error, EINVAL,
1600                                           RTE_FLOW_ERROR_TYPE_ITEM,
1601                                           NULL,
1602                                           "match on port id is valid only"
1603                                           " when transfer flag is enabled");
1604         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1605                 return rte_flow_error_set(error, ENOTSUP,
1606                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1607                                           "multiple source ports are not"
1608                                           " supported");
1609         if (!mask)
1610                 mask = &switch_mask;
1611         if (mask->id != 0xffffffff)
1612                 return rte_flow_error_set(error, ENOTSUP,
1613                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1614                                            mask,
1615                                            "no support for partial mask on"
1616                                            " \"id\" field");
1617         ret = mlx5_flow_item_acceptable
1618                                 (item, (const uint8_t *)mask,
1619                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1620                                  sizeof(struct rte_flow_item_port_id),
1621                                  error);
1622         if (ret)
1623                 return ret;
1624         if (!spec)
1625                 return 0;
1626         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1627         if (!esw_priv)
1628                 return rte_flow_error_set(error, rte_errno,
1629                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1630                                           "failed to obtain E-Switch info for"
1631                                           " port");
1632         dev_priv = mlx5_dev_to_eswitch_info(dev);
1633         if (!dev_priv)
1634                 return rte_flow_error_set(error, rte_errno,
1635                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1636                                           NULL,
1637                                           "failed to obtain E-Switch info");
1638         if (esw_priv->domain_id != dev_priv->domain_id)
1639                 return rte_flow_error_set(error, EINVAL,
1640                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1641                                           "cannot match on a port from a"
1642                                           " different E-Switch");
1643         return 0;
1644 }
1645
1646 /**
1647  * Validate VLAN item.
1648  *
1649  * @param[in] item
1650  *   Item specification.
1651  * @param[in] item_flags
1652  *   Bit-fields that holds the items detected until now.
1653  * @param[in] dev
1654  *   Ethernet device flow is being created on.
1655  * @param[out] error
1656  *   Pointer to error structure.
1657  *
1658  * @return
1659  *   0 on success, a negative errno value otherwise and rte_errno is set.
1660  */
1661 static int
1662 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1663                            uint64_t item_flags,
1664                            struct rte_eth_dev *dev,
1665                            struct rte_flow_error *error)
1666 {
1667         const struct rte_flow_item_vlan *mask = item->mask;
1668         const struct rte_flow_item_vlan nic_mask = {
1669                 .tci = RTE_BE16(UINT16_MAX),
1670                 .inner_type = RTE_BE16(UINT16_MAX),
1671         };
1672         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1673         int ret;
1674         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1675                                         MLX5_FLOW_LAYER_INNER_L4) :
1676                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1677                                         MLX5_FLOW_LAYER_OUTER_L4);
1678         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1679                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1680
1681         if (item_flags & vlanm)
1682                 return rte_flow_error_set(error, EINVAL,
1683                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1684                                           "multiple VLAN layers not supported");
1685         else if ((item_flags & l34m) != 0)
1686                 return rte_flow_error_set(error, EINVAL,
1687                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1688                                           "VLAN cannot follow L3/L4 layer");
1689         if (!mask)
1690                 mask = &rte_flow_item_vlan_mask;
1691         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1692                                         (const uint8_t *)&nic_mask,
1693                                         sizeof(struct rte_flow_item_vlan),
1694                                         error);
1695         if (ret)
1696                 return ret;
1697         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1698                 struct mlx5_priv *priv = dev->data->dev_private;
1699
1700                 if (priv->vmwa_context) {
1701                         /*
1702                          * Non-NULL context means we have a virtual machine
1703                          * and SR-IOV enabled, we have to create VLAN interface
1704                          * to make hypervisor to setup E-Switch vport
1705                          * context correctly. We avoid creating the multiple
1706                          * VLAN interfaces, so we cannot support VLAN tag mask.
1707                          */
1708                         return rte_flow_error_set(error, EINVAL,
1709                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1710                                                   item,
1711                                                   "VLAN tag mask is not"
1712                                                   " supported in virtual"
1713                                                   " environment");
1714                 }
1715         }
1716         return 0;
1717 }
1718
1719 /*
1720  * GTP flags are contained in 1 byte of the format:
1721  * -------------------------------------------
1722  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1723  * |-----------------------------------------|
1724  * | value | Version | PT | Res | E | S | PN |
1725  * -------------------------------------------
1726  *
1727  * Matching is supported only for GTP flags E, S, PN.
1728  */
1729 #define MLX5_GTP_FLAGS_MASK     0x07
1730
1731 /**
1732  * Validate GTP item.
1733  *
1734  * @param[in] dev
1735  *   Pointer to the rte_eth_dev structure.
1736  * @param[in] item
1737  *   Item specification.
1738  * @param[in] item_flags
1739  *   Bit-fields that holds the items detected until now.
1740  * @param[out] error
1741  *   Pointer to error structure.
1742  *
1743  * @return
1744  *   0 on success, a negative errno value otherwise and rte_errno is set.
1745  */
1746 static int
1747 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1748                           const struct rte_flow_item *item,
1749                           uint64_t item_flags,
1750                           struct rte_flow_error *error)
1751 {
1752         struct mlx5_priv *priv = dev->data->dev_private;
1753         const struct rte_flow_item_gtp *spec = item->spec;
1754         const struct rte_flow_item_gtp *mask = item->mask;
1755         const struct rte_flow_item_gtp nic_mask = {
1756                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1757                 .msg_type = 0xff,
1758                 .teid = RTE_BE32(0xffffffff),
1759         };
1760
1761         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1762                 return rte_flow_error_set(error, ENOTSUP,
1763                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1764                                           "GTP support is not enabled");
1765         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1766                 return rte_flow_error_set(error, ENOTSUP,
1767                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1768                                           "multiple tunnel layers not"
1769                                           " supported");
1770         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1771                 return rte_flow_error_set(error, EINVAL,
1772                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1773                                           "no outer UDP layer found");
1774         if (!mask)
1775                 mask = &rte_flow_item_gtp_mask;
1776         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1777                 return rte_flow_error_set(error, ENOTSUP,
1778                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1779                                           "Match is supported for GTP"
1780                                           " flags only");
1781         return mlx5_flow_item_acceptable
1782                 (item, (const uint8_t *)mask,
1783                  (const uint8_t *)&nic_mask,
1784                  sizeof(struct rte_flow_item_gtp),
1785                  error);
1786 }
1787
1788 /**
1789  * Validate the pop VLAN action.
1790  *
1791  * @param[in] dev
1792  *   Pointer to the rte_eth_dev structure.
1793  * @param[in] action_flags
1794  *   Holds the actions detected until now.
1795  * @param[in] action
1796  *   Pointer to the pop vlan action.
1797  * @param[in] item_flags
1798  *   The items found in this flow rule.
1799  * @param[in] attr
1800  *   Pointer to flow attributes.
1801  * @param[out] error
1802  *   Pointer to error structure.
1803  *
1804  * @return
1805  *   0 on success, a negative errno value otherwise and rte_errno is set.
1806  */
1807 static int
1808 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1809                                  uint64_t action_flags,
1810                                  const struct rte_flow_action *action,
1811                                  uint64_t item_flags,
1812                                  const struct rte_flow_attr *attr,
1813                                  struct rte_flow_error *error)
1814 {
1815         const struct mlx5_priv *priv = dev->data->dev_private;
1816
1817         (void)action;
1818         (void)attr;
1819         if (!priv->sh->pop_vlan_action)
1820                 return rte_flow_error_set(error, ENOTSUP,
1821                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1822                                           NULL,
1823                                           "pop vlan action is not supported");
1824         if (attr->egress)
1825                 return rte_flow_error_set(error, ENOTSUP,
1826                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1827                                           NULL,
1828                                           "pop vlan action not supported for "
1829                                           "egress");
1830         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1831                 return rte_flow_error_set(error, ENOTSUP,
1832                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1833                                           "no support for multiple VLAN "
1834                                           "actions");
1835         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
1836         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
1837             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
1838                 return rte_flow_error_set(error, ENOTSUP,
1839                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1840                                           NULL,
1841                                           "cannot pop vlan after decap without "
1842                                           "match on inner vlan in the flow");
1843         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
1844         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
1845             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1846                 return rte_flow_error_set(error, ENOTSUP,
1847                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1848                                           NULL,
1849                                           "cannot pop vlan without a "
1850                                           "match on (outer) vlan in the flow");
1851         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1852                 return rte_flow_error_set(error, EINVAL,
1853                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1854                                           "wrong action order, port_id should "
1855                                           "be after pop VLAN action");
1856         if (!attr->transfer && priv->representor)
1857                 return rte_flow_error_set(error, ENOTSUP,
1858                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1859                                           "pop vlan action for VF representor "
1860                                           "not supported on NIC table");
1861         return 0;
1862 }
1863
1864 /**
1865  * Get VLAN default info from vlan match info.
1866  *
1867  * @param[in] items
1868  *   the list of item specifications.
1869  * @param[out] vlan
1870  *   pointer VLAN info to fill to.
1871  *
1872  * @return
1873  *   0 on success, a negative errno value otherwise and rte_errno is set.
1874  */
1875 static void
1876 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1877                                   struct rte_vlan_hdr *vlan)
1878 {
1879         const struct rte_flow_item_vlan nic_mask = {
1880                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1881                                 MLX5DV_FLOW_VLAN_VID_MASK),
1882                 .inner_type = RTE_BE16(0xffff),
1883         };
1884
1885         if (items == NULL)
1886                 return;
1887         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1888                 int type = items->type;
1889
1890                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
1891                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
1892                         break;
1893         }
1894         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
1895                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1896                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1897
1898                 /* If VLAN item in pattern doesn't contain data, return here. */
1899                 if (!vlan_v)
1900                         return;
1901                 if (!vlan_m)
1902                         vlan_m = &nic_mask;
1903                 /* Only full match values are accepted */
1904                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1905                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1906                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
1907                         vlan->vlan_tci |=
1908                                 rte_be_to_cpu_16(vlan_v->tci &
1909                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1910                 }
1911                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1912                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1913                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1914                         vlan->vlan_tci |=
1915                                 rte_be_to_cpu_16(vlan_v->tci &
1916                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1917                 }
1918                 if (vlan_m->inner_type == nic_mask.inner_type)
1919                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1920                                                            vlan_m->inner_type);
1921         }
1922 }
1923
1924 /**
1925  * Validate the push VLAN action.
1926  *
1927  * @param[in] dev
1928  *   Pointer to the rte_eth_dev structure.
1929  * @param[in] action_flags
1930  *   Holds the actions detected until now.
1931  * @param[in] item_flags
1932  *   The items found in this flow rule.
1933  * @param[in] action
1934  *   Pointer to the action structure.
1935  * @param[in] attr
1936  *   Pointer to flow attributes
1937  * @param[out] error
1938  *   Pointer to error structure.
1939  *
1940  * @return
1941  *   0 on success, a negative errno value otherwise and rte_errno is set.
1942  */
1943 static int
1944 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
1945                                   uint64_t action_flags,
1946                                   const struct rte_flow_item_vlan *vlan_m,
1947                                   const struct rte_flow_action *action,
1948                                   const struct rte_flow_attr *attr,
1949                                   struct rte_flow_error *error)
1950 {
1951         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1952         const struct mlx5_priv *priv = dev->data->dev_private;
1953
1954         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1955             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1956                 return rte_flow_error_set(error, EINVAL,
1957                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1958                                           "invalid vlan ethertype");
1959         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1960                 return rte_flow_error_set(error, EINVAL,
1961                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1962                                           "wrong action order, port_id should "
1963                                           "be after push VLAN");
1964         if (!attr->transfer && priv->representor)
1965                 return rte_flow_error_set(error, ENOTSUP,
1966                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1967                                           "push vlan action for VF representor "
1968                                           "not supported on NIC table");
1969         if (vlan_m &&
1970             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
1971             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
1972                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
1973             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
1974             !(mlx5_flow_find_action
1975                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
1976                 return rte_flow_error_set(error, EINVAL,
1977                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1978                                           "not full match mask on VLAN PCP and "
1979                                           "there is no of_set_vlan_pcp action, "
1980                                           "push VLAN action cannot figure out "
1981                                           "PCP value");
1982         if (vlan_m &&
1983             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
1984             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
1985                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
1986             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
1987             !(mlx5_flow_find_action
1988                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
1989                 return rte_flow_error_set(error, EINVAL,
1990                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1991                                           "not full match mask on VLAN VID and "
1992                                           "there is no of_set_vlan_vid action, "
1993                                           "push VLAN action cannot figure out "
1994                                           "VID value");
1995         (void)attr;
1996         return 0;
1997 }
1998
1999 /**
2000  * Validate the set VLAN PCP.
2001  *
2002  * @param[in] action_flags
2003  *   Holds the actions detected until now.
2004  * @param[in] actions
2005  *   Pointer to the list of actions remaining in the flow rule.
2006  * @param[out] error
2007  *   Pointer to error structure.
2008  *
2009  * @return
2010  *   0 on success, a negative errno value otherwise and rte_errno is set.
2011  */
2012 static int
2013 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2014                                      const struct rte_flow_action actions[],
2015                                      struct rte_flow_error *error)
2016 {
2017         const struct rte_flow_action *action = actions;
2018         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2019
2020         if (conf->vlan_pcp > 7)
2021                 return rte_flow_error_set(error, EINVAL,
2022                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2023                                           "VLAN PCP value is too big");
2024         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2025                 return rte_flow_error_set(error, ENOTSUP,
2026                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2027                                           "set VLAN PCP action must follow "
2028                                           "the push VLAN action");
2029         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2030                 return rte_flow_error_set(error, ENOTSUP,
2031                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2032                                           "Multiple VLAN PCP modification are "
2033                                           "not supported");
2034         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2035                 return rte_flow_error_set(error, EINVAL,
2036                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2037                                           "wrong action order, port_id should "
2038                                           "be after set VLAN PCP");
2039         return 0;
2040 }
2041
2042 /**
2043  * Validate the set VLAN VID.
2044  *
2045  * @param[in] item_flags
2046  *   Holds the items detected in this rule.
2047  * @param[in] action_flags
2048  *   Holds the actions detected until now.
2049  * @param[in] actions
2050  *   Pointer to the list of actions remaining in the flow rule.
2051  * @param[out] error
2052  *   Pointer to error structure.
2053  *
2054  * @return
2055  *   0 on success, a negative errno value otherwise and rte_errno is set.
2056  */
2057 static int
2058 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2059                                      uint64_t action_flags,
2060                                      const struct rte_flow_action actions[],
2061                                      struct rte_flow_error *error)
2062 {
2063         const struct rte_flow_action *action = actions;
2064         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2065
2066         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2067                 return rte_flow_error_set(error, EINVAL,
2068                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2069                                           "VLAN VID value is too big");
2070         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2071             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2072                 return rte_flow_error_set(error, ENOTSUP,
2073                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2074                                           "set VLAN VID action must follow push"
2075                                           " VLAN action or match on VLAN item");
2076         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2077                 return rte_flow_error_set(error, ENOTSUP,
2078                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2079                                           "Multiple VLAN VID modifications are "
2080                                           "not supported");
2081         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2082                 return rte_flow_error_set(error, EINVAL,
2083                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2084                                           "wrong action order, port_id should "
2085                                           "be after set VLAN VID");
2086         return 0;
2087 }
2088
2089 /*
2090  * Validate the FLAG action.
2091  *
2092  * @param[in] dev
2093  *   Pointer to the rte_eth_dev structure.
2094  * @param[in] action_flags
2095  *   Holds the actions detected until now.
2096  * @param[in] attr
2097  *   Pointer to flow attributes
2098  * @param[out] error
2099  *   Pointer to error structure.
2100  *
2101  * @return
2102  *   0 on success, a negative errno value otherwise and rte_errno is set.
2103  */
2104 static int
2105 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2106                              uint64_t action_flags,
2107                              const struct rte_flow_attr *attr,
2108                              struct rte_flow_error *error)
2109 {
2110         struct mlx5_priv *priv = dev->data->dev_private;
2111         struct mlx5_dev_config *config = &priv->config;
2112         int ret;
2113
2114         /* Fall back if no extended metadata register support. */
2115         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2116                 return mlx5_flow_validate_action_flag(action_flags, attr,
2117                                                       error);
2118         /* Extensive metadata mode requires registers. */
2119         if (!mlx5_flow_ext_mreg_supported(dev))
2120                 return rte_flow_error_set(error, ENOTSUP,
2121                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2122                                           "no metadata registers "
2123                                           "to support flag action");
2124         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2125                 return rte_flow_error_set(error, ENOTSUP,
2126                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2127                                           "extended metadata register"
2128                                           " isn't available");
2129         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2130         if (ret < 0)
2131                 return ret;
2132         MLX5_ASSERT(ret > 0);
2133         if (action_flags & MLX5_FLOW_ACTION_MARK)
2134                 return rte_flow_error_set(error, EINVAL,
2135                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2136                                           "can't mark and flag in same flow");
2137         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2138                 return rte_flow_error_set(error, EINVAL,
2139                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2140                                           "can't have 2 flag"
2141                                           " actions in same flow");
2142         return 0;
2143 }
2144
2145 /**
2146  * Validate MARK action.
2147  *
2148  * @param[in] dev
2149  *   Pointer to the rte_eth_dev structure.
2150  * @param[in] action
2151  *   Pointer to action.
2152  * @param[in] action_flags
2153  *   Holds the actions detected until now.
2154  * @param[in] attr
2155  *   Pointer to flow attributes
2156  * @param[out] error
2157  *   Pointer to error structure.
2158  *
2159  * @return
2160  *   0 on success, a negative errno value otherwise and rte_errno is set.
2161  */
2162 static int
2163 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2164                              const struct rte_flow_action *action,
2165                              uint64_t action_flags,
2166                              const struct rte_flow_attr *attr,
2167                              struct rte_flow_error *error)
2168 {
2169         struct mlx5_priv *priv = dev->data->dev_private;
2170         struct mlx5_dev_config *config = &priv->config;
2171         const struct rte_flow_action_mark *mark = action->conf;
2172         int ret;
2173
2174         /* Fall back if no extended metadata register support. */
2175         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2176                 return mlx5_flow_validate_action_mark(action, action_flags,
2177                                                       attr, error);
2178         /* Extensive metadata mode requires registers. */
2179         if (!mlx5_flow_ext_mreg_supported(dev))
2180                 return rte_flow_error_set(error, ENOTSUP,
2181                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2182                                           "no metadata registers "
2183                                           "to support mark action");
2184         if (!priv->sh->dv_mark_mask)
2185                 return rte_flow_error_set(error, ENOTSUP,
2186                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2187                                           "extended metadata register"
2188                                           " isn't available");
2189         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2190         if (ret < 0)
2191                 return ret;
2192         MLX5_ASSERT(ret > 0);
2193         if (!mark)
2194                 return rte_flow_error_set(error, EINVAL,
2195                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2196                                           "configuration cannot be null");
2197         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2198                 return rte_flow_error_set(error, EINVAL,
2199                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2200                                           &mark->id,
2201                                           "mark id exceeds the limit");
2202         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2203                 return rte_flow_error_set(error, EINVAL,
2204                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2205                                           "can't flag and mark in same flow");
2206         if (action_flags & MLX5_FLOW_ACTION_MARK)
2207                 return rte_flow_error_set(error, EINVAL,
2208                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2209                                           "can't have 2 mark actions in same"
2210                                           " flow");
2211         return 0;
2212 }
2213
2214 /**
2215  * Validate SET_META action.
2216  *
2217  * @param[in] dev
2218  *   Pointer to the rte_eth_dev structure.
2219  * @param[in] action
2220  *   Pointer to the action structure.
2221  * @param[in] action_flags
2222  *   Holds the actions detected until now.
2223  * @param[in] attr
2224  *   Pointer to flow attributes
2225  * @param[out] error
2226  *   Pointer to error structure.
2227  *
2228  * @return
2229  *   0 on success, a negative errno value otherwise and rte_errno is set.
2230  */
2231 static int
2232 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2233                                  const struct rte_flow_action *action,
2234                                  uint64_t action_flags __rte_unused,
2235                                  const struct rte_flow_attr *attr,
2236                                  struct rte_flow_error *error)
2237 {
2238         const struct rte_flow_action_set_meta *conf;
2239         uint32_t nic_mask = UINT32_MAX;
2240         int reg;
2241
2242         if (!mlx5_flow_ext_mreg_supported(dev))
2243                 return rte_flow_error_set(error, ENOTSUP,
2244                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2245                                           "extended metadata register"
2246                                           " isn't supported");
2247         reg = flow_dv_get_metadata_reg(dev, attr, error);
2248         if (reg < 0)
2249                 return reg;
2250         if (reg != REG_A && reg != REG_B) {
2251                 struct mlx5_priv *priv = dev->data->dev_private;
2252
2253                 nic_mask = priv->sh->dv_meta_mask;
2254         }
2255         if (!(action->conf))
2256                 return rte_flow_error_set(error, EINVAL,
2257                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2258                                           "configuration cannot be null");
2259         conf = (const struct rte_flow_action_set_meta *)action->conf;
2260         if (!conf->mask)
2261                 return rte_flow_error_set(error, EINVAL,
2262                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2263                                           "zero mask doesn't have any effect");
2264         if (conf->mask & ~nic_mask)
2265                 return rte_flow_error_set(error, EINVAL,
2266                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2267                                           "meta data must be within reg C0");
2268         return 0;
2269 }
2270
2271 /**
2272  * Validate SET_TAG action.
2273  *
2274  * @param[in] dev
2275  *   Pointer to the rte_eth_dev structure.
2276  * @param[in] action
2277  *   Pointer to the action structure.
2278  * @param[in] action_flags
2279  *   Holds the actions detected until now.
2280  * @param[in] attr
2281  *   Pointer to flow attributes
2282  * @param[out] error
2283  *   Pointer to error structure.
2284  *
2285  * @return
2286  *   0 on success, a negative errno value otherwise and rte_errno is set.
2287  */
2288 static int
2289 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2290                                 const struct rte_flow_action *action,
2291                                 uint64_t action_flags,
2292                                 const struct rte_flow_attr *attr,
2293                                 struct rte_flow_error *error)
2294 {
2295         const struct rte_flow_action_set_tag *conf;
2296         const uint64_t terminal_action_flags =
2297                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2298                 MLX5_FLOW_ACTION_RSS;
2299         int ret;
2300
2301         if (!mlx5_flow_ext_mreg_supported(dev))
2302                 return rte_flow_error_set(error, ENOTSUP,
2303                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2304                                           "extensive metadata register"
2305                                           " isn't supported");
2306         if (!(action->conf))
2307                 return rte_flow_error_set(error, EINVAL,
2308                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2309                                           "configuration cannot be null");
2310         conf = (const struct rte_flow_action_set_tag *)action->conf;
2311         if (!conf->mask)
2312                 return rte_flow_error_set(error, EINVAL,
2313                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2314                                           "zero mask doesn't have any effect");
2315         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2316         if (ret < 0)
2317                 return ret;
2318         if (!attr->transfer && attr->ingress &&
2319             (action_flags & terminal_action_flags))
2320                 return rte_flow_error_set(error, EINVAL,
2321                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2322                                           "set_tag has no effect"
2323                                           " with terminal actions");
2324         return 0;
2325 }
2326
2327 /**
2328  * Validate count action.
2329  *
2330  * @param[in] dev
2331  *   Pointer to rte_eth_dev structure.
2332  * @param[out] error
2333  *   Pointer to error structure.
2334  *
2335  * @return
2336  *   0 on success, a negative errno value otherwise and rte_errno is set.
2337  */
2338 static int
2339 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2340                               struct rte_flow_error *error)
2341 {
2342         struct mlx5_priv *priv = dev->data->dev_private;
2343
2344         if (!priv->config.devx)
2345                 goto notsup_err;
2346 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2347         return 0;
2348 #endif
2349 notsup_err:
2350         return rte_flow_error_set
2351                       (error, ENOTSUP,
2352                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2353                        NULL,
2354                        "count action not supported");
2355 }
2356
2357 /**
2358  * Validate the L2 encap action.
2359  *
2360  * @param[in] dev
2361  *   Pointer to the rte_eth_dev structure.
2362  * @param[in] action_flags
2363  *   Holds the actions detected until now.
2364  * @param[in] action
2365  *   Pointer to the action structure.
2366  * @param[in] attr
2367  *   Pointer to flow attributes.
2368  * @param[out] error
2369  *   Pointer to error structure.
2370  *
2371  * @return
2372  *   0 on success, a negative errno value otherwise and rte_errno is set.
2373  */
2374 static int
2375 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2376                                  uint64_t action_flags,
2377                                  const struct rte_flow_action *action,
2378                                  const struct rte_flow_attr *attr,
2379                                  struct rte_flow_error *error)
2380 {
2381         const struct mlx5_priv *priv = dev->data->dev_private;
2382
2383         if (!(action->conf))
2384                 return rte_flow_error_set(error, EINVAL,
2385                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2386                                           "configuration cannot be null");
2387         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2388                 return rte_flow_error_set(error, EINVAL,
2389                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2390                                           "can only have a single encap action "
2391                                           "in a flow");
2392         if (!attr->transfer && priv->representor)
2393                 return rte_flow_error_set(error, ENOTSUP,
2394                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2395                                           "encap action for VF representor "
2396                                           "not supported on NIC table");
2397         return 0;
2398 }
2399
2400 /**
2401  * Validate a decap action.
2402  *
2403  * @param[in] dev
2404  *   Pointer to the rte_eth_dev structure.
2405  * @param[in] action_flags
2406  *   Holds the actions detected until now.
2407  * @param[in] attr
2408  *   Pointer to flow attributes
2409  * @param[out] error
2410  *   Pointer to error structure.
2411  *
2412  * @return
2413  *   0 on success, a negative errno value otherwise and rte_errno is set.
2414  */
2415 static int
2416 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2417                               uint64_t action_flags,
2418                               const struct rte_flow_attr *attr,
2419                               struct rte_flow_error *error)
2420 {
2421         const struct mlx5_priv *priv = dev->data->dev_private;
2422
2423         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2424             !priv->config.decap_en)
2425                 return rte_flow_error_set(error, ENOTSUP,
2426                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2427                                           "decap is not enabled");
2428         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2429                 return rte_flow_error_set(error, ENOTSUP,
2430                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2431                                           action_flags &
2432                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2433                                           "have a single decap action" : "decap "
2434                                           "after encap is not supported");
2435         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2436                 return rte_flow_error_set(error, EINVAL,
2437                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2438                                           "can't have decap action after"
2439                                           " modify action");
2440         if (attr->egress)
2441                 return rte_flow_error_set(error, ENOTSUP,
2442                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2443                                           NULL,
2444                                           "decap action not supported for "
2445                                           "egress");
2446         if (!attr->transfer && priv->representor)
2447                 return rte_flow_error_set(error, ENOTSUP,
2448                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2449                                           "decap action for VF representor "
2450                                           "not supported on NIC table");
2451         return 0;
2452 }
2453
2454 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2455
2456 /**
2457  * Validate the raw encap and decap actions.
2458  *
2459  * @param[in] dev
2460  *   Pointer to the rte_eth_dev structure.
2461  * @param[in] decap
2462  *   Pointer to the decap action.
2463  * @param[in] encap
2464  *   Pointer to the encap action.
2465  * @param[in] attr
2466  *   Pointer to flow attributes
2467  * @param[in/out] action_flags
2468  *   Holds the actions detected until now.
2469  * @param[out] actions_n
2470  *   pointer to the number of actions counter.
2471  * @param[out] error
2472  *   Pointer to error structure.
2473  *
2474  * @return
2475  *   0 on success, a negative errno value otherwise and rte_errno is set.
2476  */
2477 static int
2478 flow_dv_validate_action_raw_encap_decap
2479         (struct rte_eth_dev *dev,
2480          const struct rte_flow_action_raw_decap *decap,
2481          const struct rte_flow_action_raw_encap *encap,
2482          const struct rte_flow_attr *attr, uint64_t *action_flags,
2483          int *actions_n, struct rte_flow_error *error)
2484 {
2485         const struct mlx5_priv *priv = dev->data->dev_private;
2486         int ret;
2487
2488         if (encap && (!encap->size || !encap->data))
2489                 return rte_flow_error_set(error, EINVAL,
2490                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2491                                           "raw encap data cannot be empty");
2492         if (decap && encap) {
2493                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2494                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2495                         /* L3 encap. */
2496                         decap = NULL;
2497                 else if (encap->size <=
2498                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2499                            decap->size >
2500                            MLX5_ENCAPSULATION_DECISION_SIZE)
2501                         /* L3 decap. */
2502                         encap = NULL;
2503                 else if (encap->size >
2504                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2505                            decap->size >
2506                            MLX5_ENCAPSULATION_DECISION_SIZE)
2507                         /* 2 L2 actions: encap and decap. */
2508                         ;
2509                 else
2510                         return rte_flow_error_set(error,
2511                                 ENOTSUP,
2512                                 RTE_FLOW_ERROR_TYPE_ACTION,
2513                                 NULL, "unsupported too small "
2514                                 "raw decap and too small raw "
2515                                 "encap combination");
2516         }
2517         if (decap) {
2518                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2519                                                     error);
2520                 if (ret < 0)
2521                         return ret;
2522                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2523                 ++(*actions_n);
2524         }
2525         if (encap) {
2526                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2527                         return rte_flow_error_set(error, ENOTSUP,
2528                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2529                                                   NULL,
2530                                                   "small raw encap size");
2531                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2532                         return rte_flow_error_set(error, EINVAL,
2533                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2534                                                   NULL,
2535                                                   "more than one encap action");
2536                 if (!attr->transfer && priv->representor)
2537                         return rte_flow_error_set
2538                                         (error, ENOTSUP,
2539                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2540                                          "encap action for VF representor "
2541                                          "not supported on NIC table");
2542                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2543                 ++(*actions_n);
2544         }
2545         return 0;
2546 }
2547
2548 /**
2549  * Find existing encap/decap resource or create and register a new one.
2550  *
2551  * @param[in, out] dev
2552  *   Pointer to rte_eth_dev structure.
2553  * @param[in, out] resource
2554  *   Pointer to encap/decap resource.
2555  * @parm[in, out] dev_flow
2556  *   Pointer to the dev_flow.
2557  * @param[out] error
2558  *   pointer to error structure.
2559  *
2560  * @return
2561  *   0 on success otherwise -errno and errno is set.
2562  */
2563 static int
2564 flow_dv_encap_decap_resource_register
2565                         (struct rte_eth_dev *dev,
2566                          struct mlx5_flow_dv_encap_decap_resource *resource,
2567                          struct mlx5_flow *dev_flow,
2568                          struct rte_flow_error *error)
2569 {
2570         struct mlx5_priv *priv = dev->data->dev_private;
2571         struct mlx5_dev_ctx_shared *sh = priv->sh;
2572         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2573         struct mlx5dv_dr_domain *domain;
2574         uint32_t idx = 0;
2575         int ret;
2576
2577         resource->flags = dev_flow->dv.group ? 0 : 1;
2578         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2579                 domain = sh->fdb_domain;
2580         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2581                 domain = sh->rx_domain;
2582         else
2583                 domain = sh->tx_domain;
2584         /* Lookup a matching resource from cache. */
2585         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx,
2586                       cache_resource, next) {
2587                 if (resource->reformat_type == cache_resource->reformat_type &&
2588                     resource->ft_type == cache_resource->ft_type &&
2589                     resource->flags == cache_resource->flags &&
2590                     resource->size == cache_resource->size &&
2591                     !memcmp((const void *)resource->buf,
2592                             (const void *)cache_resource->buf,
2593                             resource->size)) {
2594                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2595                                 (void *)cache_resource,
2596                                 rte_atomic32_read(&cache_resource->refcnt));
2597                         rte_atomic32_inc(&cache_resource->refcnt);
2598                         dev_flow->handle->dvh.rix_encap_decap = idx;
2599                         dev_flow->dv.encap_decap = cache_resource;
2600                         return 0;
2601                 }
2602         }
2603         /* Register new encap/decap resource. */
2604         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2605                                        &dev_flow->handle->dvh.rix_encap_decap);
2606         if (!cache_resource)
2607                 return rte_flow_error_set(error, ENOMEM,
2608                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2609                                           "cannot allocate resource memory");
2610         *cache_resource = *resource;
2611         ret = mlx5_flow_os_create_flow_action_packet_reformat
2612                                         (sh->ctx, domain, cache_resource,
2613                                          &cache_resource->action);
2614         if (ret) {
2615                 mlx5_free(cache_resource);
2616                 return rte_flow_error_set(error, ENOMEM,
2617                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2618                                           NULL, "cannot create action");
2619         }
2620         rte_atomic32_init(&cache_resource->refcnt);
2621         rte_atomic32_inc(&cache_resource->refcnt);
2622         ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
2623                      dev_flow->handle->dvh.rix_encap_decap, cache_resource,
2624                      next);
2625         dev_flow->dv.encap_decap = cache_resource;
2626         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2627                 (void *)cache_resource,
2628                 rte_atomic32_read(&cache_resource->refcnt));
2629         return 0;
2630 }
2631
2632 /**
2633  * Find existing table jump resource or create and register a new one.
2634  *
2635  * @param[in, out] dev
2636  *   Pointer to rte_eth_dev structure.
2637  * @param[in, out] tbl
2638  *   Pointer to flow table resource.
2639  * @parm[in, out] dev_flow
2640  *   Pointer to the dev_flow.
2641  * @param[out] error
2642  *   pointer to error structure.
2643  *
2644  * @return
2645  *   0 on success otherwise -errno and errno is set.
2646  */
2647 static int
2648 flow_dv_jump_tbl_resource_register
2649                         (struct rte_eth_dev *dev __rte_unused,
2650                          struct mlx5_flow_tbl_resource *tbl,
2651                          struct mlx5_flow *dev_flow,
2652                          struct rte_flow_error *error)
2653 {
2654         struct mlx5_flow_tbl_data_entry *tbl_data =
2655                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2656         int cnt, ret;
2657
2658         MLX5_ASSERT(tbl);
2659         cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
2660         if (!cnt) {
2661                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
2662                                 (tbl->obj, &tbl_data->jump.action);
2663                 if (ret)
2664                         return rte_flow_error_set(error, ENOMEM,
2665                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2666                                         NULL, "cannot create jump action");
2667                 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2668                         (void *)&tbl_data->jump, cnt);
2669         } else {
2670                 /* old jump should not make the table ref++. */
2671                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
2672                 MLX5_ASSERT(tbl_data->jump.action);
2673                 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2674                         (void *)&tbl_data->jump, cnt);
2675         }
2676         rte_atomic32_inc(&tbl_data->jump.refcnt);
2677         dev_flow->handle->rix_jump = tbl_data->idx;
2678         dev_flow->dv.jump = &tbl_data->jump;
2679         return 0;
2680 }
2681
2682 /**
2683  * Find existing default miss resource or create and register a new one.
2684  *
2685  * @param[in, out] dev
2686  *   Pointer to rte_eth_dev structure.
2687  * @param[out] error
2688  *   pointer to error structure.
2689  *
2690  * @return
2691  *   0 on success otherwise -errno and errno is set.
2692  */
2693 static int
2694 flow_dv_default_miss_resource_register(struct rte_eth_dev *dev,
2695                 struct rte_flow_error *error)
2696 {
2697         struct mlx5_priv *priv = dev->data->dev_private;
2698         struct mlx5_dev_ctx_shared *sh = priv->sh;
2699         struct mlx5_flow_default_miss_resource *cache_resource =
2700                         &sh->default_miss;
2701         int cnt = rte_atomic32_read(&cache_resource->refcnt);
2702
2703         if (!cnt) {
2704                 MLX5_ASSERT(cache_resource->action);
2705                 cache_resource->action =
2706                 mlx5_glue->dr_create_flow_action_default_miss();
2707                 if (!cache_resource->action)
2708                         return rte_flow_error_set(error, ENOMEM,
2709                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2710                                         "cannot create default miss action");
2711                 DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++",
2712                                 (void *)cache_resource->action, cnt);
2713         }
2714         rte_atomic32_inc(&cache_resource->refcnt);
2715         return 0;
2716 }
2717
2718 /**
2719  * Find existing table port ID resource or create and register a new one.
2720  *
2721  * @param[in, out] dev
2722  *   Pointer to rte_eth_dev structure.
2723  * @param[in, out] resource
2724  *   Pointer to port ID action resource.
2725  * @parm[in, out] dev_flow
2726  *   Pointer to the dev_flow.
2727  * @param[out] error
2728  *   pointer to error structure.
2729  *
2730  * @return
2731  *   0 on success otherwise -errno and errno is set.
2732  */
2733 static int
2734 flow_dv_port_id_action_resource_register
2735                         (struct rte_eth_dev *dev,
2736                          struct mlx5_flow_dv_port_id_action_resource *resource,
2737                          struct mlx5_flow *dev_flow,
2738                          struct rte_flow_error *error)
2739 {
2740         struct mlx5_priv *priv = dev->data->dev_private;
2741         struct mlx5_dev_ctx_shared *sh = priv->sh;
2742         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2743         uint32_t idx = 0;
2744         int ret;
2745
2746         /* Lookup a matching resource from cache. */
2747         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
2748                       idx, cache_resource, next) {
2749                 if (resource->port_id == cache_resource->port_id) {
2750                         DRV_LOG(DEBUG, "port id action resource resource %p: "
2751                                 "refcnt %d++",
2752                                 (void *)cache_resource,
2753                                 rte_atomic32_read(&cache_resource->refcnt));
2754                         rte_atomic32_inc(&cache_resource->refcnt);
2755                         dev_flow->handle->rix_port_id_action = idx;
2756                         dev_flow->dv.port_id_action = cache_resource;
2757                         return 0;
2758                 }
2759         }
2760         /* Register new port id action resource. */
2761         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
2762                                        &dev_flow->handle->rix_port_id_action);
2763         if (!cache_resource)
2764                 return rte_flow_error_set(error, ENOMEM,
2765                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2766                                           "cannot allocate resource memory");
2767         *cache_resource = *resource;
2768         ret = mlx5_flow_os_create_flow_action_dest_port
2769                                 (priv->sh->fdb_domain, resource->port_id,
2770                                  &cache_resource->action);
2771         if (ret) {
2772                 mlx5_free(cache_resource);
2773                 return rte_flow_error_set(error, ENOMEM,
2774                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2775                                           NULL, "cannot create action");
2776         }
2777         rte_atomic32_init(&cache_resource->refcnt);
2778         rte_atomic32_inc(&cache_resource->refcnt);
2779         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
2780                      dev_flow->handle->rix_port_id_action, cache_resource,
2781                      next);
2782         dev_flow->dv.port_id_action = cache_resource;
2783         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
2784                 (void *)cache_resource,
2785                 rte_atomic32_read(&cache_resource->refcnt));
2786         return 0;
2787 }
2788
2789 /**
2790  * Find existing push vlan resource or create and register a new one.
2791  *
2792  * @param [in, out] dev
2793  *   Pointer to rte_eth_dev structure.
2794  * @param[in, out] resource
2795  *   Pointer to port ID action resource.
2796  * @parm[in, out] dev_flow
2797  *   Pointer to the dev_flow.
2798  * @param[out] error
2799  *   pointer to error structure.
2800  *
2801  * @return
2802  *   0 on success otherwise -errno and errno is set.
2803  */
2804 static int
2805 flow_dv_push_vlan_action_resource_register
2806                        (struct rte_eth_dev *dev,
2807                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
2808                         struct mlx5_flow *dev_flow,
2809                         struct rte_flow_error *error)
2810 {
2811         struct mlx5_priv *priv = dev->data->dev_private;
2812         struct mlx5_dev_ctx_shared *sh = priv->sh;
2813         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
2814         struct mlx5dv_dr_domain *domain;
2815         uint32_t idx = 0;
2816         int ret;
2817
2818         /* Lookup a matching resource from cache. */
2819         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2820                       sh->push_vlan_action_list, idx, cache_resource, next) {
2821                 if (resource->vlan_tag == cache_resource->vlan_tag &&
2822                     resource->ft_type == cache_resource->ft_type) {
2823                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
2824                                 "refcnt %d++",
2825                                 (void *)cache_resource,
2826                                 rte_atomic32_read(&cache_resource->refcnt));
2827                         rte_atomic32_inc(&cache_resource->refcnt);
2828                         dev_flow->handle->dvh.rix_push_vlan = idx;
2829                         dev_flow->dv.push_vlan_res = cache_resource;
2830                         return 0;
2831                 }
2832         }
2833         /* Register new push_vlan action resource. */
2834         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2835                                        &dev_flow->handle->dvh.rix_push_vlan);
2836         if (!cache_resource)
2837                 return rte_flow_error_set(error, ENOMEM,
2838                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2839                                           "cannot allocate resource memory");
2840         *cache_resource = *resource;
2841         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2842                 domain = sh->fdb_domain;
2843         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2844                 domain = sh->rx_domain;
2845         else
2846                 domain = sh->tx_domain;
2847         ret = mlx5_flow_os_create_flow_action_push_vlan
2848                                         (domain, resource->vlan_tag,
2849                                          &cache_resource->action);
2850         if (ret) {
2851                 mlx5_free(cache_resource);
2852                 return rte_flow_error_set(error, ENOMEM,
2853                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2854                                           NULL, "cannot create action");
2855         }
2856         rte_atomic32_init(&cache_resource->refcnt);
2857         rte_atomic32_inc(&cache_resource->refcnt);
2858         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2859                      &sh->push_vlan_action_list,
2860                      dev_flow->handle->dvh.rix_push_vlan,
2861                      cache_resource, next);
2862         dev_flow->dv.push_vlan_res = cache_resource;
2863         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
2864                 (void *)cache_resource,
2865                 rte_atomic32_read(&cache_resource->refcnt));
2866         return 0;
2867 }
2868 /**
2869  * Get the size of specific rte_flow_item_type
2870  *
2871  * @param[in] item_type
2872  *   Tested rte_flow_item_type.
2873  *
2874  * @return
2875  *   sizeof struct item_type, 0 if void or irrelevant.
2876  */
2877 static size_t
2878 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
2879 {
2880         size_t retval;
2881
2882         switch (item_type) {
2883         case RTE_FLOW_ITEM_TYPE_ETH:
2884                 retval = sizeof(struct rte_flow_item_eth);
2885                 break;
2886         case RTE_FLOW_ITEM_TYPE_VLAN:
2887                 retval = sizeof(struct rte_flow_item_vlan);
2888                 break;
2889         case RTE_FLOW_ITEM_TYPE_IPV4:
2890                 retval = sizeof(struct rte_flow_item_ipv4);
2891                 break;
2892         case RTE_FLOW_ITEM_TYPE_IPV6:
2893                 retval = sizeof(struct rte_flow_item_ipv6);
2894                 break;
2895         case RTE_FLOW_ITEM_TYPE_UDP:
2896                 retval = sizeof(struct rte_flow_item_udp);
2897                 break;
2898         case RTE_FLOW_ITEM_TYPE_TCP:
2899                 retval = sizeof(struct rte_flow_item_tcp);
2900                 break;
2901         case RTE_FLOW_ITEM_TYPE_VXLAN:
2902                 retval = sizeof(struct rte_flow_item_vxlan);
2903                 break;
2904         case RTE_FLOW_ITEM_TYPE_GRE:
2905                 retval = sizeof(struct rte_flow_item_gre);
2906                 break;
2907         case RTE_FLOW_ITEM_TYPE_NVGRE:
2908                 retval = sizeof(struct rte_flow_item_nvgre);
2909                 break;
2910         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2911                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
2912                 break;
2913         case RTE_FLOW_ITEM_TYPE_MPLS:
2914                 retval = sizeof(struct rte_flow_item_mpls);
2915                 break;
2916         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
2917         default:
2918                 retval = 0;
2919                 break;
2920         }
2921         return retval;
2922 }
2923
2924 #define MLX5_ENCAP_IPV4_VERSION         0x40
2925 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
2926 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
2927 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
2928 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
2929 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
2930 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
2931
2932 /**
2933  * Convert the encap action data from list of rte_flow_item to raw buffer
2934  *
2935  * @param[in] items
2936  *   Pointer to rte_flow_item objects list.
2937  * @param[out] buf
2938  *   Pointer to the output buffer.
2939  * @param[out] size
2940  *   Pointer to the output buffer size.
2941  * @param[out] error
2942  *   Pointer to the error structure.
2943  *
2944  * @return
2945  *   0 on success, a negative errno value otherwise and rte_errno is set.
2946  */
2947 static int
2948 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2949                            size_t *size, struct rte_flow_error *error)
2950 {
2951         struct rte_ether_hdr *eth = NULL;
2952         struct rte_vlan_hdr *vlan = NULL;
2953         struct rte_ipv4_hdr *ipv4 = NULL;
2954         struct rte_ipv6_hdr *ipv6 = NULL;
2955         struct rte_udp_hdr *udp = NULL;
2956         struct rte_vxlan_hdr *vxlan = NULL;
2957         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
2958         struct rte_gre_hdr *gre = NULL;
2959         size_t len;
2960         size_t temp_size = 0;
2961
2962         if (!items)
2963                 return rte_flow_error_set(error, EINVAL,
2964                                           RTE_FLOW_ERROR_TYPE_ACTION,
2965                                           NULL, "invalid empty data");
2966         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2967                 len = flow_dv_get_item_len(items->type);
2968                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
2969                         return rte_flow_error_set(error, EINVAL,
2970                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2971                                                   (void *)items->type,
2972                                                   "items total size is too big"
2973                                                   " for encap action");
2974                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
2975                 switch (items->type) {
2976                 case RTE_FLOW_ITEM_TYPE_ETH:
2977                         eth = (struct rte_ether_hdr *)&buf[temp_size];
2978                         break;
2979                 case RTE_FLOW_ITEM_TYPE_VLAN:
2980                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
2981                         if (!eth)
2982                                 return rte_flow_error_set(error, EINVAL,
2983                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2984                                                 (void *)items->type,
2985                                                 "eth header not found");
2986                         if (!eth->ether_type)
2987                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2988                         break;
2989                 case RTE_FLOW_ITEM_TYPE_IPV4:
2990                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
2991                         if (!vlan && !eth)
2992                                 return rte_flow_error_set(error, EINVAL,
2993                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2994                                                 (void *)items->type,
2995                                                 "neither eth nor vlan"
2996                                                 " header found");
2997                         if (vlan && !vlan->eth_proto)
2998                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2999                         else if (eth && !eth->ether_type)
3000                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3001                         if (!ipv4->version_ihl)
3002                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3003                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3004                         if (!ipv4->time_to_live)
3005                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3006                         break;
3007                 case RTE_FLOW_ITEM_TYPE_IPV6:
3008                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3009                         if (!vlan && !eth)
3010                                 return rte_flow_error_set(error, EINVAL,
3011                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3012                                                 (void *)items->type,
3013                                                 "neither eth nor vlan"
3014                                                 " header found");
3015                         if (vlan && !vlan->eth_proto)
3016                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3017                         else if (eth && !eth->ether_type)
3018                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3019                         if (!ipv6->vtc_flow)
3020                                 ipv6->vtc_flow =
3021                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3022                         if (!ipv6->hop_limits)
3023                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3024                         break;
3025                 case RTE_FLOW_ITEM_TYPE_UDP:
3026                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3027                         if (!ipv4 && !ipv6)
3028                                 return rte_flow_error_set(error, EINVAL,
3029                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3030                                                 (void *)items->type,
3031                                                 "ip header not found");
3032                         if (ipv4 && !ipv4->next_proto_id)
3033                                 ipv4->next_proto_id = IPPROTO_UDP;
3034                         else if (ipv6 && !ipv6->proto)
3035                                 ipv6->proto = IPPROTO_UDP;
3036                         break;
3037                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3038                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3039                         if (!udp)
3040                                 return rte_flow_error_set(error, EINVAL,
3041                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3042                                                 (void *)items->type,
3043                                                 "udp header not found");
3044                         if (!udp->dst_port)
3045                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3046                         if (!vxlan->vx_flags)
3047                                 vxlan->vx_flags =
3048                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3049                         break;
3050                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3051                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3052                         if (!udp)
3053                                 return rte_flow_error_set(error, EINVAL,
3054                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3055                                                 (void *)items->type,
3056                                                 "udp header not found");
3057                         if (!vxlan_gpe->proto)
3058                                 return rte_flow_error_set(error, EINVAL,
3059                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3060                                                 (void *)items->type,
3061                                                 "next protocol not found");
3062                         if (!udp->dst_port)
3063                                 udp->dst_port =
3064                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3065                         if (!vxlan_gpe->vx_flags)
3066                                 vxlan_gpe->vx_flags =
3067                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3068                         break;
3069                 case RTE_FLOW_ITEM_TYPE_GRE:
3070                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3071                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3072                         if (!gre->proto)
3073                                 return rte_flow_error_set(error, EINVAL,
3074                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3075                                                 (void *)items->type,
3076                                                 "next protocol not found");
3077                         if (!ipv4 && !ipv6)
3078                                 return rte_flow_error_set(error, EINVAL,
3079                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3080                                                 (void *)items->type,
3081                                                 "ip header not found");
3082                         if (ipv4 && !ipv4->next_proto_id)
3083                                 ipv4->next_proto_id = IPPROTO_GRE;
3084                         else if (ipv6 && !ipv6->proto)
3085                                 ipv6->proto = IPPROTO_GRE;
3086                         break;
3087                 case RTE_FLOW_ITEM_TYPE_VOID:
3088                         break;
3089                 default:
3090                         return rte_flow_error_set(error, EINVAL,
3091                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3092                                                   (void *)items->type,
3093                                                   "unsupported item type");
3094                         break;
3095                 }
3096                 temp_size += len;
3097         }
3098         *size = temp_size;
3099         return 0;
3100 }
3101
3102 static int
3103 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3104 {
3105         struct rte_ether_hdr *eth = NULL;
3106         struct rte_vlan_hdr *vlan = NULL;
3107         struct rte_ipv6_hdr *ipv6 = NULL;
3108         struct rte_udp_hdr *udp = NULL;
3109         char *next_hdr;
3110         uint16_t proto;
3111
3112         eth = (struct rte_ether_hdr *)data;
3113         next_hdr = (char *)(eth + 1);
3114         proto = RTE_BE16(eth->ether_type);
3115
3116         /* VLAN skipping */
3117         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3118                 vlan = (struct rte_vlan_hdr *)next_hdr;
3119                 proto = RTE_BE16(vlan->eth_proto);
3120                 next_hdr += sizeof(struct rte_vlan_hdr);
3121         }
3122
3123         /* HW calculates IPv4 csum. no need to proceed */
3124         if (proto == RTE_ETHER_TYPE_IPV4)
3125                 return 0;
3126
3127         /* non IPv4/IPv6 header. not supported */
3128         if (proto != RTE_ETHER_TYPE_IPV6) {
3129                 return rte_flow_error_set(error, ENOTSUP,
3130                                           RTE_FLOW_ERROR_TYPE_ACTION,
3131                                           NULL, "Cannot offload non IPv4/IPv6");
3132         }
3133
3134         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3135
3136         /* ignore non UDP */
3137         if (ipv6->proto != IPPROTO_UDP)
3138                 return 0;
3139
3140         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3141         udp->dgram_cksum = 0;
3142
3143         return 0;
3144 }
3145
3146 /**
3147  * Convert L2 encap action to DV specification.
3148  *
3149  * @param[in] dev
3150  *   Pointer to rte_eth_dev structure.
3151  * @param[in] action
3152  *   Pointer to action structure.
3153  * @param[in, out] dev_flow
3154  *   Pointer to the mlx5_flow.
3155  * @param[in] transfer
3156  *   Mark if the flow is E-Switch flow.
3157  * @param[out] error
3158  *   Pointer to the error structure.
3159  *
3160  * @return
3161  *   0 on success, a negative errno value otherwise and rte_errno is set.
3162  */
3163 static int
3164 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3165                                const struct rte_flow_action *action,
3166                                struct mlx5_flow *dev_flow,
3167                                uint8_t transfer,
3168                                struct rte_flow_error *error)
3169 {
3170         const struct rte_flow_item *encap_data;
3171         const struct rte_flow_action_raw_encap *raw_encap_data;
3172         struct mlx5_flow_dv_encap_decap_resource res = {
3173                 .reformat_type =
3174                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3175                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3176                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3177         };
3178
3179         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3180                 raw_encap_data =
3181                         (const struct rte_flow_action_raw_encap *)action->conf;
3182                 res.size = raw_encap_data->size;
3183                 memcpy(res.buf, raw_encap_data->data, res.size);
3184         } else {
3185                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3186                         encap_data =
3187                                 ((const struct rte_flow_action_vxlan_encap *)
3188                                                 action->conf)->definition;
3189                 else
3190                         encap_data =
3191                                 ((const struct rte_flow_action_nvgre_encap *)
3192                                                 action->conf)->definition;
3193                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3194                                                &res.size, error))
3195                         return -rte_errno;
3196         }
3197         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3198                 return -rte_errno;
3199         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3200                 return rte_flow_error_set(error, EINVAL,
3201                                           RTE_FLOW_ERROR_TYPE_ACTION,
3202                                           NULL, "can't create L2 encap action");
3203         return 0;
3204 }
3205
3206 /**
3207  * Convert L2 decap action to DV specification.
3208  *
3209  * @param[in] dev
3210  *   Pointer to rte_eth_dev structure.
3211  * @param[in, out] dev_flow
3212  *   Pointer to the mlx5_flow.
3213  * @param[in] transfer
3214  *   Mark if the flow is E-Switch flow.
3215  * @param[out] error
3216  *   Pointer to the error structure.
3217  *
3218  * @return
3219  *   0 on success, a negative errno value otherwise and rte_errno is set.
3220  */
3221 static int
3222 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3223                                struct mlx5_flow *dev_flow,
3224                                uint8_t transfer,
3225                                struct rte_flow_error *error)
3226 {
3227         struct mlx5_flow_dv_encap_decap_resource res = {
3228                 .size = 0,
3229                 .reformat_type =
3230                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3231                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3232                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3233         };
3234
3235         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3236                 return rte_flow_error_set(error, EINVAL,
3237                                           RTE_FLOW_ERROR_TYPE_ACTION,
3238                                           NULL, "can't create L2 decap action");
3239         return 0;
3240 }
3241
3242 /**
3243  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3244  *
3245  * @param[in] dev
3246  *   Pointer to rte_eth_dev structure.
3247  * @param[in] action
3248  *   Pointer to action structure.
3249  * @param[in, out] dev_flow
3250  *   Pointer to the mlx5_flow.
3251  * @param[in] attr
3252  *   Pointer to the flow attributes.
3253  * @param[out] error
3254  *   Pointer to the error structure.
3255  *
3256  * @return
3257  *   0 on success, a negative errno value otherwise and rte_errno is set.
3258  */
3259 static int
3260 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3261                                 const struct rte_flow_action *action,
3262                                 struct mlx5_flow *dev_flow,
3263                                 const struct rte_flow_attr *attr,
3264                                 struct rte_flow_error *error)
3265 {
3266         const struct rte_flow_action_raw_encap *encap_data;
3267         struct mlx5_flow_dv_encap_decap_resource res;
3268
3269         memset(&res, 0, sizeof(res));
3270         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3271         res.size = encap_data->size;
3272         memcpy(res.buf, encap_data->data, res.size);
3273         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3274                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3275                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3276         if (attr->transfer)
3277                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3278         else
3279                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3280                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3281         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3282                 return rte_flow_error_set(error, EINVAL,
3283                                           RTE_FLOW_ERROR_TYPE_ACTION,
3284                                           NULL, "can't create encap action");
3285         return 0;
3286 }
3287
3288 /**
3289  * Create action push VLAN.
3290  *
3291  * @param[in] dev
3292  *   Pointer to rte_eth_dev structure.
3293  * @param[in] attr
3294  *   Pointer to the flow attributes.
3295  * @param[in] vlan
3296  *   Pointer to the vlan to push to the Ethernet header.
3297  * @param[in, out] dev_flow
3298  *   Pointer to the mlx5_flow.
3299  * @param[out] error
3300  *   Pointer to the error structure.
3301  *
3302  * @return
3303  *   0 on success, a negative errno value otherwise and rte_errno is set.
3304  */
3305 static int
3306 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3307                                 const struct rte_flow_attr *attr,
3308                                 const struct rte_vlan_hdr *vlan,
3309                                 struct mlx5_flow *dev_flow,
3310                                 struct rte_flow_error *error)
3311 {
3312         struct mlx5_flow_dv_push_vlan_action_resource res;
3313
3314         memset(&res, 0, sizeof(res));
3315         res.vlan_tag =
3316                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3317                                  vlan->vlan_tci);
3318         if (attr->transfer)
3319                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3320         else
3321                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3322                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3323         return flow_dv_push_vlan_action_resource_register
3324                                             (dev, &res, dev_flow, error);
3325 }
3326
3327 /**
3328  * Validate the modify-header actions.
3329  *
3330  * @param[in] action_flags
3331  *   Holds the actions detected until now.
3332  * @param[in] action
3333  *   Pointer to the modify action.
3334  * @param[out] error
3335  *   Pointer to error structure.
3336  *
3337  * @return
3338  *   0 on success, a negative errno value otherwise and rte_errno is set.
3339  */
3340 static int
3341 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3342                                    const struct rte_flow_action *action,
3343                                    struct rte_flow_error *error)
3344 {
3345         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3346                 return rte_flow_error_set(error, EINVAL,
3347                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3348                                           NULL, "action configuration not set");
3349         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3350                 return rte_flow_error_set(error, EINVAL,
3351                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3352                                           "can't have encap action before"
3353                                           " modify action");
3354         return 0;
3355 }
3356
3357 /**
3358  * Validate the modify-header MAC address actions.
3359  *
3360  * @param[in] action_flags
3361  *   Holds the actions detected until now.
3362  * @param[in] action
3363  *   Pointer to the modify action.
3364  * @param[in] item_flags
3365  *   Holds the items detected.
3366  * @param[out] error
3367  *   Pointer to error structure.
3368  *
3369  * @return
3370  *   0 on success, a negative errno value otherwise and rte_errno is set.
3371  */
3372 static int
3373 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3374                                    const struct rte_flow_action *action,
3375                                    const uint64_t item_flags,
3376                                    struct rte_flow_error *error)
3377 {
3378         int ret = 0;
3379
3380         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3381         if (!ret) {
3382                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3383                         return rte_flow_error_set(error, EINVAL,
3384                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3385                                                   NULL,
3386                                                   "no L2 item in pattern");
3387         }
3388         return ret;
3389 }
3390
3391 /**
3392  * Validate the modify-header IPv4 address actions.
3393  *
3394  * @param[in] action_flags
3395  *   Holds the actions detected until now.
3396  * @param[in] action
3397  *   Pointer to the modify action.
3398  * @param[in] item_flags
3399  *   Holds the items detected.
3400  * @param[out] error
3401  *   Pointer to error structure.
3402  *
3403  * @return
3404  *   0 on success, a negative errno value otherwise and rte_errno is set.
3405  */
3406 static int
3407 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3408                                     const struct rte_flow_action *action,
3409                                     const uint64_t item_flags,
3410                                     struct rte_flow_error *error)
3411 {
3412         int ret = 0;
3413         uint64_t layer;
3414
3415         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3416         if (!ret) {
3417                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3418                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3419                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3420                 if (!(item_flags & layer))
3421                         return rte_flow_error_set(error, EINVAL,
3422                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3423                                                   NULL,
3424                                                   "no ipv4 item in pattern");
3425         }
3426         return ret;
3427 }
3428
3429 /**
3430  * Validate the modify-header IPv6 address actions.
3431  *
3432  * @param[in] action_flags
3433  *   Holds the actions detected until now.
3434  * @param[in] action
3435  *   Pointer to the modify action.
3436  * @param[in] item_flags
3437  *   Holds the items detected.
3438  * @param[out] error
3439  *   Pointer to error structure.
3440  *
3441  * @return
3442  *   0 on success, a negative errno value otherwise and rte_errno is set.
3443  */
3444 static int
3445 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3446                                     const struct rte_flow_action *action,
3447                                     const uint64_t item_flags,
3448                                     struct rte_flow_error *error)
3449 {
3450         int ret = 0;
3451         uint64_t layer;
3452
3453         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3454         if (!ret) {
3455                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3456                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3457                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3458                 if (!(item_flags & layer))
3459                         return rte_flow_error_set(error, EINVAL,
3460                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3461                                                   NULL,
3462                                                   "no ipv6 item in pattern");
3463         }
3464         return ret;
3465 }
3466
3467 /**
3468  * Validate the modify-header TP actions.
3469  *
3470  * @param[in] action_flags
3471  *   Holds the actions detected until now.
3472  * @param[in] action
3473  *   Pointer to the modify action.
3474  * @param[in] item_flags
3475  *   Holds the items detected.
3476  * @param[out] error
3477  *   Pointer to error structure.
3478  *
3479  * @return
3480  *   0 on success, a negative errno value otherwise and rte_errno is set.
3481  */
3482 static int
3483 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3484                                   const struct rte_flow_action *action,
3485                                   const uint64_t item_flags,
3486                                   struct rte_flow_error *error)
3487 {
3488         int ret = 0;
3489         uint64_t layer;
3490
3491         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3492         if (!ret) {
3493                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3494                                  MLX5_FLOW_LAYER_INNER_L4 :
3495                                  MLX5_FLOW_LAYER_OUTER_L4;
3496                 if (!(item_flags & layer))
3497                         return rte_flow_error_set(error, EINVAL,
3498                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3499                                                   NULL, "no transport layer "
3500                                                   "in pattern");
3501         }
3502         return ret;
3503 }
3504
3505 /**
3506  * Validate the modify-header actions of increment/decrement
3507  * TCP Sequence-number.
3508  *
3509  * @param[in] action_flags
3510  *   Holds the actions detected until now.
3511  * @param[in] action
3512  *   Pointer to the modify action.
3513  * @param[in] item_flags
3514  *   Holds the items detected.
3515  * @param[out] error
3516  *   Pointer to error structure.
3517  *
3518  * @return
3519  *   0 on success, a negative errno value otherwise and rte_errno is set.
3520  */
3521 static int
3522 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3523                                        const struct rte_flow_action *action,
3524                                        const uint64_t item_flags,
3525                                        struct rte_flow_error *error)
3526 {
3527         int ret = 0;
3528         uint64_t layer;
3529
3530         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3531         if (!ret) {
3532                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3533                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3534                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3535                 if (!(item_flags & layer))
3536                         return rte_flow_error_set(error, EINVAL,
3537                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3538                                                   NULL, "no TCP item in"
3539                                                   " pattern");
3540                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3541                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3542                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3543                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3544                         return rte_flow_error_set(error, EINVAL,
3545                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3546                                                   NULL,
3547                                                   "cannot decrease and increase"
3548                                                   " TCP sequence number"
3549                                                   " at the same time");
3550         }
3551         return ret;
3552 }
3553
3554 /**
3555  * Validate the modify-header actions of increment/decrement
3556  * TCP Acknowledgment number.
3557  *
3558  * @param[in] action_flags
3559  *   Holds the actions detected until now.
3560  * @param[in] action
3561  *   Pointer to the modify action.
3562  * @param[in] item_flags
3563  *   Holds the items detected.
3564  * @param[out] error
3565  *   Pointer to error structure.
3566  *
3567  * @return
3568  *   0 on success, a negative errno value otherwise and rte_errno is set.
3569  */
3570 static int
3571 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3572                                        const struct rte_flow_action *action,
3573                                        const uint64_t item_flags,
3574                                        struct rte_flow_error *error)
3575 {
3576         int ret = 0;
3577         uint64_t layer;
3578
3579         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3580         if (!ret) {
3581                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3582                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3583                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3584                 if (!(item_flags & layer))
3585                         return rte_flow_error_set(error, EINVAL,
3586                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3587                                                   NULL, "no TCP item in"
3588                                                   " pattern");
3589                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3590                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3591                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3592                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3593                         return rte_flow_error_set(error, EINVAL,
3594                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3595                                                   NULL,
3596                                                   "cannot decrease and increase"
3597                                                   " TCP acknowledgment number"
3598                                                   " at the same time");
3599         }
3600         return ret;
3601 }
3602
3603 /**
3604  * Validate the modify-header TTL actions.
3605  *
3606  * @param[in] action_flags
3607  *   Holds the actions detected until now.
3608  * @param[in] action
3609  *   Pointer to the modify action.
3610  * @param[in] item_flags
3611  *   Holds the items detected.
3612  * @param[out] error
3613  *   Pointer to error structure.
3614  *
3615  * @return
3616  *   0 on success, a negative errno value otherwise and rte_errno is set.
3617  */
3618 static int
3619 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3620                                    const struct rte_flow_action *action,
3621                                    const uint64_t item_flags,
3622                                    struct rte_flow_error *error)
3623 {
3624         int ret = 0;
3625         uint64_t layer;
3626
3627         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3628         if (!ret) {
3629                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3630                                  MLX5_FLOW_LAYER_INNER_L3 :
3631                                  MLX5_FLOW_LAYER_OUTER_L3;
3632                 if (!(item_flags & layer))
3633                         return rte_flow_error_set(error, EINVAL,
3634                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3635                                                   NULL,
3636                                                   "no IP protocol in pattern");
3637         }
3638         return ret;
3639 }
3640
3641 /**
3642  * Validate jump action.
3643  *
3644  * @param[in] action
3645  *   Pointer to the jump action.
3646  * @param[in] action_flags
3647  *   Holds the actions detected until now.
3648  * @param[in] attributes
3649  *   Pointer to flow attributes
3650  * @param[in] external
3651  *   Action belongs to flow rule created by request external to PMD.
3652  * @param[out] error
3653  *   Pointer to error structure.
3654  *
3655  * @return
3656  *   0 on success, a negative errno value otherwise and rte_errno is set.
3657  */
3658 static int
3659 flow_dv_validate_action_jump(const struct rte_flow_action *action,
3660                              uint64_t action_flags,
3661                              const struct rte_flow_attr *attributes,
3662                              bool external, struct rte_flow_error *error)
3663 {
3664         uint32_t target_group, table;
3665         int ret = 0;
3666
3667         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3668                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3669                 return rte_flow_error_set(error, EINVAL,
3670                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3671                                           "can't have 2 fate actions in"
3672                                           " same flow");
3673         if (action_flags & MLX5_FLOW_ACTION_METER)
3674                 return rte_flow_error_set(error, ENOTSUP,
3675                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3676                                           "jump with meter not support");
3677         if (!action->conf)
3678                 return rte_flow_error_set(error, EINVAL,
3679                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3680                                           NULL, "action configuration not set");
3681         target_group =
3682                 ((const struct rte_flow_action_jump *)action->conf)->group;
3683         ret = mlx5_flow_group_to_table(attributes, external, target_group,
3684                                        true, &table, error);
3685         if (ret)
3686                 return ret;
3687         if (attributes->group == target_group)
3688                 return rte_flow_error_set(error, EINVAL,
3689                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3690                                           "target group must be other than"
3691                                           " the current flow group");
3692         return 0;
3693 }
3694
3695 /*
3696  * Validate the port_id action.
3697  *
3698  * @param[in] dev
3699  *   Pointer to rte_eth_dev structure.
3700  * @param[in] action_flags
3701  *   Bit-fields that holds the actions detected until now.
3702  * @param[in] action
3703  *   Port_id RTE action structure.
3704  * @param[in] attr
3705  *   Attributes of flow that includes this action.
3706  * @param[out] error
3707  *   Pointer to error structure.
3708  *
3709  * @return
3710  *   0 on success, a negative errno value otherwise and rte_errno is set.
3711  */
3712 static int
3713 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3714                                 uint64_t action_flags,
3715                                 const struct rte_flow_action *action,
3716                                 const struct rte_flow_attr *attr,
3717                                 struct rte_flow_error *error)
3718 {
3719         const struct rte_flow_action_port_id *port_id;
3720         struct mlx5_priv *act_priv;
3721         struct mlx5_priv *dev_priv;
3722         uint16_t port;
3723
3724         if (!attr->transfer)
3725                 return rte_flow_error_set(error, ENOTSUP,
3726                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3727                                           NULL,
3728                                           "port id action is valid in transfer"
3729                                           " mode only");
3730         if (!action || !action->conf)
3731                 return rte_flow_error_set(error, ENOTSUP,
3732                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3733                                           NULL,
3734                                           "port id action parameters must be"
3735                                           " specified");
3736         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3737                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3738                 return rte_flow_error_set(error, EINVAL,
3739                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3740                                           "can have only one fate actions in"
3741                                           " a flow");
3742         dev_priv = mlx5_dev_to_eswitch_info(dev);
3743         if (!dev_priv)
3744                 return rte_flow_error_set(error, rte_errno,
3745                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3746                                           NULL,
3747                                           "failed to obtain E-Switch info");
3748         port_id = action->conf;
3749         port = port_id->original ? dev->data->port_id : port_id->id;
3750         act_priv = mlx5_port_to_eswitch_info(port, false);
3751         if (!act_priv)
3752                 return rte_flow_error_set
3753                                 (error, rte_errno,
3754                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
3755                                  "failed to obtain E-Switch port id for port");
3756         if (act_priv->domain_id != dev_priv->domain_id)
3757                 return rte_flow_error_set
3758                                 (error, EINVAL,
3759                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3760                                  "port does not belong to"
3761                                  " E-Switch being configured");
3762         return 0;
3763 }
3764
3765 /**
3766  * Get the maximum number of modify header actions.
3767  *
3768  * @param dev
3769  *   Pointer to rte_eth_dev structure.
3770  * @param flags
3771  *   Flags bits to check if root level.
3772  *
3773  * @return
3774  *   Max number of modify header actions device can support.
3775  */
3776 static inline unsigned int
3777 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
3778                               uint64_t flags)
3779 {
3780         /*
3781          * There's no way to directly query the max capacity from FW.
3782          * The maximal value on root table should be assumed to be supported.
3783          */
3784         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
3785                 return MLX5_MAX_MODIFY_NUM;
3786         else
3787                 return MLX5_ROOT_TBL_MODIFY_NUM;
3788 }
3789
3790 /**
3791  * Validate the meter action.
3792  *
3793  * @param[in] dev
3794  *   Pointer to rte_eth_dev structure.
3795  * @param[in] action_flags
3796  *   Bit-fields that holds the actions detected until now.
3797  * @param[in] action
3798  *   Pointer to the meter action.
3799  * @param[in] attr
3800  *   Attributes of flow that includes this action.
3801  * @param[out] error
3802  *   Pointer to error structure.
3803  *
3804  * @return
3805  *   0 on success, a negative errno value otherwise and rte_ernno is set.
3806  */
3807 static int
3808 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
3809                                 uint64_t action_flags,
3810                                 const struct rte_flow_action *action,
3811                                 const struct rte_flow_attr *attr,
3812                                 struct rte_flow_error *error)
3813 {
3814         struct mlx5_priv *priv = dev->data->dev_private;
3815         const struct rte_flow_action_meter *am = action->conf;
3816         struct mlx5_flow_meter *fm;
3817
3818         if (!am)
3819                 return rte_flow_error_set(error, EINVAL,
3820                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3821                                           "meter action conf is NULL");
3822
3823         if (action_flags & MLX5_FLOW_ACTION_METER)
3824                 return rte_flow_error_set(error, ENOTSUP,
3825                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3826                                           "meter chaining not support");
3827         if (action_flags & MLX5_FLOW_ACTION_JUMP)
3828                 return rte_flow_error_set(error, ENOTSUP,
3829                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3830                                           "meter with jump not support");
3831         if (!priv->mtr_en)
3832                 return rte_flow_error_set(error, ENOTSUP,
3833                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3834                                           NULL,
3835                                           "meter action not supported");
3836         fm = mlx5_flow_meter_find(priv, am->mtr_id);
3837         if (!fm)
3838                 return rte_flow_error_set(error, EINVAL,
3839                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3840                                           "Meter not found");
3841         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
3842               (!fm->ingress && !attr->ingress && attr->egress) ||
3843               (!fm->egress && !attr->egress && attr->ingress))))
3844                 return rte_flow_error_set(error, EINVAL,
3845                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3846                                           "Flow attributes are either invalid "
3847                                           "or have a conflict with current "
3848                                           "meter attributes");
3849         return 0;
3850 }
3851
3852 /**
3853  * Validate the age action.
3854  *
3855  * @param[in] action_flags
3856  *   Holds the actions detected until now.
3857  * @param[in] action
3858  *   Pointer to the age action.
3859  * @param[in] dev
3860  *   Pointer to the Ethernet device structure.
3861  * @param[out] error
3862  *   Pointer to error structure.
3863  *
3864  * @return
3865  *   0 on success, a negative errno value otherwise and rte_errno is set.
3866  */
3867 static int
3868 flow_dv_validate_action_age(uint64_t action_flags,
3869                             const struct rte_flow_action *action,
3870                             struct rte_eth_dev *dev,
3871                             struct rte_flow_error *error)
3872 {
3873         struct mlx5_priv *priv = dev->data->dev_private;
3874         const struct rte_flow_action_age *age = action->conf;
3875
3876         if (!priv->config.devx || priv->counter_fallback)
3877                 return rte_flow_error_set(error, ENOTSUP,
3878                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3879                                           NULL,
3880                                           "age action not supported");
3881         if (!(action->conf))
3882                 return rte_flow_error_set(error, EINVAL,
3883                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3884                                           "configuration cannot be null");
3885         if (age->timeout >= UINT16_MAX / 2 / 10)
3886                 return rte_flow_error_set(error, ENOTSUP,
3887                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3888                                           "Max age time: 3275 seconds");
3889         if (action_flags & MLX5_FLOW_ACTION_AGE)
3890                 return rte_flow_error_set(error, EINVAL,
3891                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3892                                           "Duplicate age ctions set");
3893         return 0;
3894 }
3895
3896 /**
3897  * Validate the modify-header IPv4 DSCP actions.
3898  *
3899  * @param[in] action_flags
3900  *   Holds the actions detected until now.
3901  * @param[in] action
3902  *   Pointer to the modify action.
3903  * @param[in] item_flags
3904  *   Holds the items detected.
3905  * @param[out] error
3906  *   Pointer to error structure.
3907  *
3908  * @return
3909  *   0 on success, a negative errno value otherwise and rte_errno is set.
3910  */
3911 static int
3912 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
3913                                          const struct rte_flow_action *action,
3914                                          const uint64_t item_flags,
3915                                          struct rte_flow_error *error)
3916 {
3917         int ret = 0;
3918
3919         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3920         if (!ret) {
3921                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3922                         return rte_flow_error_set(error, EINVAL,
3923                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3924                                                   NULL,
3925                                                   "no ipv4 item in pattern");
3926         }
3927         return ret;
3928 }
3929
3930 /**
3931  * Validate the modify-header IPv6 DSCP actions.
3932  *
3933  * @param[in] action_flags
3934  *   Holds the actions detected until now.
3935  * @param[in] action
3936  *   Pointer to the modify action.
3937  * @param[in] item_flags
3938  *   Holds the items detected.
3939  * @param[out] error
3940  *   Pointer to error structure.
3941  *
3942  * @return
3943  *   0 on success, a negative errno value otherwise and rte_errno is set.
3944  */
3945 static int
3946 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
3947                                          const struct rte_flow_action *action,
3948                                          const uint64_t item_flags,
3949                                          struct rte_flow_error *error)
3950 {
3951         int ret = 0;
3952
3953         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3954         if (!ret) {
3955                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3956                         return rte_flow_error_set(error, EINVAL,
3957                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3958                                                   NULL,
3959                                                   "no ipv6 item in pattern");
3960         }
3961         return ret;
3962 }
3963
3964 /**
3965  * Find existing modify-header resource or create and register a new one.
3966  *
3967  * @param dev[in, out]
3968  *   Pointer to rte_eth_dev structure.
3969  * @param[in, out] resource
3970  *   Pointer to modify-header resource.
3971  * @parm[in, out] dev_flow
3972  *   Pointer to the dev_flow.
3973  * @param[out] error
3974  *   pointer to error structure.
3975  *
3976  * @return
3977  *   0 on success otherwise -errno and errno is set.
3978  */
3979 static int
3980 flow_dv_modify_hdr_resource_register
3981                         (struct rte_eth_dev *dev,
3982                          struct mlx5_flow_dv_modify_hdr_resource *resource,
3983                          struct mlx5_flow *dev_flow,
3984                          struct rte_flow_error *error)
3985 {
3986         struct mlx5_priv *priv = dev->data->dev_private;
3987         struct mlx5_dev_ctx_shared *sh = priv->sh;
3988         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
3989         struct mlx5dv_dr_domain *ns;
3990         uint32_t actions_len;
3991         int ret;
3992
3993         resource->flags = dev_flow->dv.group ? 0 :
3994                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
3995         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
3996                                     resource->flags))
3997                 return rte_flow_error_set(error, EOVERFLOW,
3998                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3999                                           "too many modify header items");
4000         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4001                 ns = sh->fdb_domain;
4002         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4003                 ns = sh->tx_domain;
4004         else
4005                 ns = sh->rx_domain;
4006         /* Lookup a matching resource from cache. */
4007         actions_len = resource->actions_num * sizeof(resource->actions[0]);
4008         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
4009                 if (resource->ft_type == cache_resource->ft_type &&
4010                     resource->actions_num == cache_resource->actions_num &&
4011                     resource->flags == cache_resource->flags &&
4012                     !memcmp((const void *)resource->actions,
4013                             (const void *)cache_resource->actions,
4014                             actions_len)) {
4015                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
4016                                 (void *)cache_resource,
4017                                 rte_atomic32_read(&cache_resource->refcnt));
4018                         rte_atomic32_inc(&cache_resource->refcnt);
4019                         dev_flow->handle->dvh.modify_hdr = cache_resource;
4020                         return 0;
4021                 }
4022         }
4023         /* Register new modify-header resource. */
4024         cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
4025                                     sizeof(*cache_resource) + actions_len, 0,
4026                                     SOCKET_ID_ANY);
4027         if (!cache_resource)
4028                 return rte_flow_error_set(error, ENOMEM,
4029                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4030                                           "cannot allocate resource memory");
4031         *cache_resource = *resource;
4032         rte_memcpy(cache_resource->actions, resource->actions, actions_len);
4033         ret = mlx5_flow_os_create_flow_action_modify_header
4034                                         (sh->ctx, ns, cache_resource,
4035                                          actions_len, &cache_resource->action);
4036         if (ret) {
4037                 mlx5_free(cache_resource);
4038                 return rte_flow_error_set(error, ENOMEM,
4039                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4040                                           NULL, "cannot create action");
4041         }
4042         rte_atomic32_init(&cache_resource->refcnt);
4043         rte_atomic32_inc(&cache_resource->refcnt);
4044         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
4045         dev_flow->handle->dvh.modify_hdr = cache_resource;
4046         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
4047                 (void *)cache_resource,
4048                 rte_atomic32_read(&cache_resource->refcnt));
4049         return 0;
4050 }
4051
4052 /**
4053  * Get DV flow counter by index.
4054  *
4055  * @param[in] dev
4056  *   Pointer to the Ethernet device structure.
4057  * @param[in] idx
4058  *   mlx5 flow counter index in the container.
4059  * @param[out] ppool
4060  *   mlx5 flow counter pool in the container,
4061  *
4062  * @return
4063  *   Pointer to the counter, NULL otherwise.
4064  */
4065 static struct mlx5_flow_counter *
4066 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4067                            uint32_t idx,
4068                            struct mlx5_flow_counter_pool **ppool)
4069 {
4070         struct mlx5_priv *priv = dev->data->dev_private;
4071         struct mlx5_pools_container *cont;
4072         struct mlx5_flow_counter_pool *pool;
4073         uint32_t batch = 0, age = 0;
4074
4075         idx--;
4076         age = MLX_CNT_IS_AGE(idx);
4077         idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx;
4078         if (idx >= MLX5_CNT_BATCH_OFFSET) {
4079                 idx -= MLX5_CNT_BATCH_OFFSET;
4080                 batch = 1;
4081         }
4082         cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
4083         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
4084         pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
4085         MLX5_ASSERT(pool);
4086         if (ppool)
4087                 *ppool = pool;
4088         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4089 }
4090
4091 /**
4092  * Check the devx counter belongs to the pool.
4093  *
4094  * @param[in] pool
4095  *   Pointer to the counter pool.
4096  * @param[in] id
4097  *   The counter devx ID.
4098  *
4099  * @return
4100  *   True if counter belongs to the pool, false otherwise.
4101  */
4102 static bool
4103 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4104 {
4105         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4106                    MLX5_COUNTERS_PER_POOL;
4107
4108         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4109                 return true;
4110         return false;
4111 }
4112
4113 /**
4114  * Get a pool by devx counter ID.
4115  *
4116  * @param[in] cont
4117  *   Pointer to the counter container.
4118  * @param[in] id
4119  *   The counter devx ID.
4120  *
4121  * @return
4122  *   The counter pool pointer if exists, NULL otherwise,
4123  */
4124 static struct mlx5_flow_counter_pool *
4125 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
4126 {
4127         uint32_t i;
4128
4129         /* Check last used pool. */
4130         if (cont->last_pool_idx != POOL_IDX_INVALID &&
4131             flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
4132                 return cont->pools[cont->last_pool_idx];
4133         /* ID out of range means no suitable pool in the container. */
4134         if (id > cont->max_id || id < cont->min_id)
4135                 return NULL;
4136         /*
4137          * Find the pool from the end of the container, since mostly counter
4138          * ID is sequence increasing, and the last pool should be the needed
4139          * one.
4140          */
4141         i = rte_atomic16_read(&cont->n_valid);
4142         while (i--) {
4143                 struct mlx5_flow_counter_pool *pool = cont->pools[i];
4144
4145                 if (flow_dv_is_counter_in_pool(pool, id))
4146                         return pool;
4147         }
4148         return NULL;
4149 }
4150
4151 /**
4152  * Allocate a new memory for the counter values wrapped by all the needed
4153  * management.
4154  *
4155  * @param[in] dev
4156  *   Pointer to the Ethernet device structure.
4157  * @param[in] raws_n
4158  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
4159  *
4160  * @return
4161  *   The new memory management pointer on success, otherwise NULL and rte_errno
4162  *   is set.
4163  */
4164 static struct mlx5_counter_stats_mem_mng *
4165 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
4166 {
4167         struct mlx5_priv *priv = dev->data->dev_private;
4168         struct mlx5_dev_ctx_shared *sh = priv->sh;
4169         struct mlx5_devx_mkey_attr mkey_attr;
4170         struct mlx5_counter_stats_mem_mng *mem_mng;
4171         volatile struct flow_counter_stats *raw_data;
4172         int size = (sizeof(struct flow_counter_stats) *
4173                         MLX5_COUNTERS_PER_POOL +
4174                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
4175                         sizeof(struct mlx5_counter_stats_mem_mng);
4176         size_t pgsize = rte_mem_page_size();
4177         if (pgsize == (size_t)-1) {
4178                 DRV_LOG(ERR, "Failed to get mem page size");
4179                 rte_errno = ENOMEM;
4180                 return NULL;
4181         }
4182         uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize,
4183                                   SOCKET_ID_ANY);
4184         int i;
4185
4186         if (!mem) {
4187                 rte_errno = ENOMEM;
4188                 return NULL;
4189         }
4190         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
4191         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
4192         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
4193                                                  IBV_ACCESS_LOCAL_WRITE);
4194         if (!mem_mng->umem) {
4195                 rte_errno = errno;
4196                 mlx5_free(mem);
4197                 return NULL;
4198         }
4199         mkey_attr.addr = (uintptr_t)mem;
4200         mkey_attr.size = size;
4201         mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
4202         mkey_attr.pd = sh->pdn;
4203         mkey_attr.log_entity_size = 0;
4204         mkey_attr.pg_access = 0;
4205         mkey_attr.klm_array = NULL;
4206         mkey_attr.klm_num = 0;
4207         if (priv->config.hca_attr.relaxed_ordering_write &&
4208                 priv->config.hca_attr.relaxed_ordering_read  &&
4209                 !haswell_broadwell_cpu)
4210                 mkey_attr.relaxed_ordering = 1;
4211         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
4212         if (!mem_mng->dm) {
4213                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
4214                 rte_errno = errno;
4215                 mlx5_free(mem);
4216                 return NULL;
4217         }
4218         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
4219         raw_data = (volatile struct flow_counter_stats *)mem;
4220         for (i = 0; i < raws_n; ++i) {
4221                 mem_mng->raws[i].mem_mng = mem_mng;
4222                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
4223         }
4224         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
4225         return mem_mng;
4226 }
4227
4228 /**
4229  * Resize a counter container.
4230  *
4231  * @param[in] dev
4232  *   Pointer to the Ethernet device structure.
4233  * @param[in] batch
4234  *   Whether the pool is for counter that was allocated by batch command.
4235  * @param[in] age
4236  *   Whether the pool is for Aging counter.
4237  *
4238  * @return
4239  *   0 on success, otherwise negative errno value and rte_errno is set.
4240  */
4241 static int
4242 flow_dv_container_resize(struct rte_eth_dev *dev,
4243                                 uint32_t batch, uint32_t age)
4244 {
4245         struct mlx5_priv *priv = dev->data->dev_private;
4246         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4247                                                                age);
4248         struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
4249         void *old_pools = cont->pools;
4250         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
4251         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4252         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4253
4254         if (!pools) {
4255                 rte_errno = ENOMEM;
4256                 return -ENOMEM;
4257         }
4258         if (old_pools)
4259                 memcpy(pools, old_pools, cont->n *
4260                                        sizeof(struct mlx5_flow_counter_pool *));
4261         /*
4262          * Fallback mode query the counter directly, no background query
4263          * resources are needed.
4264          */
4265         if (!priv->counter_fallback) {
4266                 int i;
4267
4268                 mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
4269                           MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
4270                 if (!mem_mng) {
4271                         mlx5_free(pools);
4272                         return -ENOMEM;
4273                 }
4274                 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
4275                         LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
4276                                          mem_mng->raws +
4277                                          MLX5_CNT_CONTAINER_RESIZE +
4278                                          i, next);
4279         }
4280         rte_spinlock_lock(&cont->resize_sl);
4281         cont->n = resize;
4282         cont->mem_mng = mem_mng;
4283         cont->pools = pools;
4284         rte_spinlock_unlock(&cont->resize_sl);
4285         if (old_pools)
4286                 mlx5_free(old_pools);
4287         return 0;
4288 }
4289
4290 /**
4291  * Query a devx flow counter.
4292  *
4293  * @param[in] dev
4294  *   Pointer to the Ethernet device structure.
4295  * @param[in] cnt
4296  *   Index to the flow counter.
4297  * @param[out] pkts
4298  *   The statistics value of packets.
4299  * @param[out] bytes
4300  *   The statistics value of bytes.
4301  *
4302  * @return
4303  *   0 on success, otherwise a negative errno value and rte_errno is set.
4304  */
4305 static inline int
4306 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4307                      uint64_t *bytes)
4308 {
4309         struct mlx5_priv *priv = dev->data->dev_private;
4310         struct mlx5_flow_counter_pool *pool = NULL;
4311         struct mlx5_flow_counter *cnt;
4312         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4313         int offset;
4314
4315         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4316         MLX5_ASSERT(pool);
4317         if (counter < MLX5_CNT_BATCH_OFFSET) {
4318                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4319                 if (priv->counter_fallback)
4320                         return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0,
4321                                         0, pkts, bytes, 0, NULL, NULL, 0);
4322         }
4323
4324         rte_spinlock_lock(&pool->sl);
4325         /*
4326          * The single counters allocation may allocate smaller ID than the
4327          * current allocated in parallel to the host reading.
4328          * In this case the new counter values must be reported as 0.
4329          */
4330         if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
4331                 *pkts = 0;
4332                 *bytes = 0;
4333         } else {
4334                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4335                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4336                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4337         }
4338         rte_spinlock_unlock(&pool->sl);
4339         return 0;
4340 }
4341
4342 /**
4343  * Create and initialize a new counter pool.
4344  *
4345  * @param[in] dev
4346  *   Pointer to the Ethernet device structure.
4347  * @param[out] dcs
4348  *   The devX counter handle.
4349  * @param[in] batch
4350  *   Whether the pool is for counter that was allocated by batch command.
4351  * @param[in] age
4352  *   Whether the pool is for counter that was allocated for aging.
4353  * @param[in/out] cont_cur
4354  *   Pointer to the container pointer, it will be update in pool resize.
4355  *
4356  * @return
4357  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4358  */
4359 static struct mlx5_flow_counter_pool *
4360 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4361                     uint32_t batch, uint32_t age)
4362 {
4363         struct mlx5_priv *priv = dev->data->dev_private;
4364         struct mlx5_flow_counter_pool *pool;
4365         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4366                                                                age);
4367         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
4368         uint32_t size = sizeof(*pool);
4369
4370         if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age))
4371                 return NULL;
4372         size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
4373         size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
4374         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
4375         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4376         if (!pool) {
4377                 rte_errno = ENOMEM;
4378                 return NULL;
4379         }
4380         pool->min_dcs = dcs;
4381         if (!priv->counter_fallback)
4382                 pool->raw = cont->mem_mng->raws + n_valid %
4383                                                       MLX5_CNT_CONTAINER_RESIZE;
4384         pool->raw_hw = NULL;
4385         pool->type = 0;
4386         pool->type |= (batch ? 0 :  CNT_POOL_TYPE_EXT);
4387         pool->type |= (!age ? 0 :  CNT_POOL_TYPE_AGE);
4388         pool->query_gen = 0;
4389         rte_spinlock_init(&pool->sl);
4390         TAILQ_INIT(&pool->counters[0]);
4391         TAILQ_INIT(&pool->counters[1]);
4392         TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
4393         pool->index = n_valid;
4394         cont->pools[n_valid] = pool;
4395         if (!batch) {
4396                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4397
4398                 if (base < cont->min_id)
4399                         cont->min_id = base;
4400                 if (base > cont->max_id)
4401                         cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4402                 cont->last_pool_idx = pool->index;
4403         }
4404         /* Pool initialization must be updated before host thread access. */
4405         rte_cio_wmb();
4406         rte_atomic16_add(&cont->n_valid, 1);
4407         return pool;
4408 }
4409
4410 /**
4411  * Update the minimum dcs-id for aged or no-aged counter pool.
4412  *
4413  * @param[in] dev
4414  *   Pointer to the Ethernet device structure.
4415  * @param[in] pool
4416  *   Current counter pool.
4417  * @param[in] batch
4418  *   Whether the pool is for counter that was allocated by batch command.
4419  * @param[in] age
4420  *   Whether the counter is for aging.
4421  */
4422 static void
4423 flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev,
4424                         struct mlx5_flow_counter_pool *pool,
4425                         uint32_t batch, uint32_t age)
4426 {
4427         struct mlx5_priv *priv = dev->data->dev_private;
4428         struct mlx5_flow_counter_pool *other;
4429         struct mlx5_pools_container *cont;
4430
4431         cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1));
4432         other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id);
4433         if (!other)
4434                 return;
4435         if (pool->min_dcs->id < other->min_dcs->id) {
4436                 rte_atomic64_set(&other->a64_dcs,
4437                         rte_atomic64_read(&pool->a64_dcs));
4438         } else {
4439                 rte_atomic64_set(&pool->a64_dcs,
4440                         rte_atomic64_read(&other->a64_dcs));
4441         }
4442 }
4443 /**
4444  * Prepare a new counter and/or a new counter pool.
4445  *
4446  * @param[in] dev
4447  *   Pointer to the Ethernet device structure.
4448  * @param[out] cnt_free
4449  *   Where to put the pointer of a new counter.
4450  * @param[in] batch
4451  *   Whether the pool is for counter that was allocated by batch command.
4452  * @param[in] age
4453  *   Whether the pool is for counter that was allocated for aging.
4454  *
4455  * @return
4456  *   The counter pool pointer and @p cnt_free is set on success,
4457  *   NULL otherwise and rte_errno is set.
4458  */
4459 static struct mlx5_flow_counter_pool *
4460 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4461                              struct mlx5_flow_counter **cnt_free,
4462                              uint32_t batch, uint32_t age)
4463 {
4464         struct mlx5_priv *priv = dev->data->dev_private;
4465         struct mlx5_pools_container *cont;
4466         struct mlx5_flow_counter_pool *pool;
4467         struct mlx5_counters tmp_tq;
4468         struct mlx5_devx_obj *dcs = NULL;
4469         struct mlx5_flow_counter *cnt;
4470         uint32_t i;
4471
4472         cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
4473         if (!batch) {
4474                 /* bulk_bitmap must be 0 for single counter allocation. */
4475                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4476                 if (!dcs)
4477                         return NULL;
4478                 pool = flow_dv_find_pool_by_id(cont, dcs->id);
4479                 if (!pool) {
4480                         pool = flow_dv_pool_create(dev, dcs, batch, age);
4481                         if (!pool) {
4482                                 mlx5_devx_cmd_destroy(dcs);
4483                                 return NULL;
4484                         }
4485                 } else if (dcs->id < pool->min_dcs->id) {
4486                         rte_atomic64_set(&pool->a64_dcs,
4487                                          (int64_t)(uintptr_t)dcs);
4488                 }
4489                 flow_dv_counter_update_min_dcs(dev,
4490                                                 pool, batch, age);
4491                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4492                 cnt = MLX5_POOL_GET_CNT(pool, i);
4493                 cnt->pool = pool;
4494                 MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
4495                 *cnt_free = cnt;
4496                 return pool;
4497         }
4498         /* bulk_bitmap is in 128 counters units. */
4499         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
4500                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4501         if (!dcs) {
4502                 rte_errno = ENODATA;
4503                 return NULL;
4504         }
4505         pool = flow_dv_pool_create(dev, dcs, batch, age);
4506         if (!pool) {
4507                 mlx5_devx_cmd_destroy(dcs);
4508                 return NULL;
4509         }
4510         TAILQ_INIT(&tmp_tq);
4511         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4512                 cnt = MLX5_POOL_GET_CNT(pool, i);
4513                 cnt->pool = pool;
4514                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4515         }
4516         rte_spinlock_lock(&cont->csl);
4517         TAILQ_CONCAT(&cont->counters, &tmp_tq, next);
4518         rte_spinlock_unlock(&cont->csl);
4519         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4520         (*cnt_free)->pool = pool;
4521         return pool;
4522 }
4523
4524 /**
4525  * Search for existed shared counter.
4526  *
4527  * @param[in] dev
4528  *   Pointer to the Ethernet device structure.
4529  * @param[in] id
4530  *   The shared counter ID to search.
4531  * @param[out] ppool
4532  *   mlx5 flow counter pool in the container,
4533  *
4534  * @return
4535  *   NULL if not existed, otherwise pointer to the shared extend counter.
4536  */
4537 static struct mlx5_flow_counter_ext *
4538 flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id,
4539                               struct mlx5_flow_counter_pool **ppool)
4540 {
4541         struct mlx5_priv *priv = dev->data->dev_private;
4542         union mlx5_l3t_data data;
4543         uint32_t cnt_idx;
4544
4545         if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword)
4546                 return NULL;
4547         cnt_idx = data.dword;
4548         /*
4549          * Shared counters don't have age info. The counter extend is after
4550          * the counter datat structure.
4551          */
4552         return (struct mlx5_flow_counter_ext *)
4553                ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1);
4554 }
4555
4556 /**
4557  * Allocate a flow counter.
4558  *
4559  * @param[in] dev
4560  *   Pointer to the Ethernet device structure.
4561  * @param[in] shared
4562  *   Indicate if this counter is shared with other flows.
4563  * @param[in] id
4564  *   Counter identifier.
4565  * @param[in] group
4566  *   Counter flow group.
4567  * @param[in] age
4568  *   Whether the counter was allocated for aging.
4569  *
4570  * @return
4571  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4572  */
4573 static uint32_t
4574 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
4575                       uint16_t group, uint32_t age)
4576 {
4577         struct mlx5_priv *priv = dev->data->dev_private;
4578         struct mlx5_flow_counter_pool *pool = NULL;
4579         struct mlx5_flow_counter *cnt_free = NULL;
4580         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4581         /*
4582          * Currently group 0 flow counter cannot be assigned to a flow if it is
4583          * not the first one in the batch counter allocation, so it is better
4584          * to allocate counters one by one for these flows in a separate
4585          * container.
4586          * A counter can be shared between different groups so need to take
4587          * shared counters from the single container.
4588          */
4589         uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
4590         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4591                                                                age);
4592         uint32_t cnt_idx;
4593
4594         if (!priv->config.devx) {
4595                 rte_errno = ENOTSUP;
4596                 return 0;
4597         }
4598         if (shared) {
4599                 cnt_ext = flow_dv_counter_shared_search(dev, id, &pool);
4600                 if (cnt_ext) {
4601                         if (cnt_ext->ref_cnt + 1 == 0) {
4602                                 rte_errno = E2BIG;
4603                                 return 0;
4604                         }
4605                         cnt_ext->ref_cnt++;
4606                         cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
4607                                   (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
4608                                   + 1;
4609                         return cnt_idx;
4610                 }
4611         }
4612         /* Get free counters from container. */
4613         rte_spinlock_lock(&cont->csl);
4614         cnt_free = TAILQ_FIRST(&cont->counters);
4615         if (cnt_free)
4616                 TAILQ_REMOVE(&cont->counters, cnt_free, next);
4617         rte_spinlock_unlock(&cont->csl);
4618         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free,
4619                                                        batch, age))
4620                 goto err;
4621         pool = cnt_free->pool;
4622         if (!batch)
4623                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
4624         /* Create a DV counter action only in the first time usage. */
4625         if (!cnt_free->action) {
4626                 uint16_t offset;
4627                 struct mlx5_devx_obj *dcs;
4628                 int ret;
4629
4630                 if (batch) {
4631                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4632                         dcs = pool->min_dcs;
4633                 } else {
4634                         offset = 0;
4635                         dcs = cnt_ext->dcs;
4636                 }
4637                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4638                                                             &cnt_free->action);
4639                 if (ret) {
4640                         rte_errno = errno;
4641                         goto err;
4642                 }
4643         }
4644         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4645                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4646         cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
4647         cnt_idx += age * MLX5_CNT_AGE_OFFSET;
4648         /* Update the counter reset values. */
4649         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4650                                  &cnt_free->bytes))
4651                 goto err;
4652         if (cnt_ext) {
4653                 cnt_ext->shared = shared;
4654                 cnt_ext->ref_cnt = 1;
4655                 cnt_ext->id = id;
4656                 if (shared) {
4657                         union mlx5_l3t_data data;
4658
4659                         data.dword = cnt_idx;
4660                         if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
4661                                 return 0;
4662                 }
4663         }
4664         if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
4665                 /* Start the asynchronous batch query by the host thread. */
4666                 mlx5_set_query_alarm(priv->sh);
4667         return cnt_idx;
4668 err:
4669         if (cnt_free) {
4670                 cnt_free->pool = pool;
4671                 rte_spinlock_lock(&cont->csl);
4672                 TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next);
4673                 rte_spinlock_unlock(&cont->csl);
4674         }
4675         return 0;
4676 }
4677
4678 /**
4679  * Get age param from counter index.
4680  *
4681  * @param[in] dev
4682  *   Pointer to the Ethernet device structure.
4683  * @param[in] counter
4684  *   Index to the counter handler.
4685  *
4686  * @return
4687  *   The aging parameter specified for the counter index.
4688  */
4689 static struct mlx5_age_param*
4690 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
4691                                 uint32_t counter)
4692 {
4693         struct mlx5_flow_counter *cnt;
4694         struct mlx5_flow_counter_pool *pool = NULL;
4695
4696         flow_dv_counter_get_by_idx(dev, counter, &pool);
4697         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
4698         cnt = MLX5_POOL_GET_CNT(pool, counter);
4699         return MLX5_CNT_TO_AGE(cnt);
4700 }
4701
4702 /**
4703  * Remove a flow counter from aged counter list.
4704  *
4705  * @param[in] dev
4706  *   Pointer to the Ethernet device structure.
4707  * @param[in] counter
4708  *   Index to the counter handler.
4709  * @param[in] cnt
4710  *   Pointer to the counter handler.
4711  */
4712 static void
4713 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
4714                                 uint32_t counter, struct mlx5_flow_counter *cnt)
4715 {
4716         struct mlx5_age_info *age_info;
4717         struct mlx5_age_param *age_param;
4718         struct mlx5_priv *priv = dev->data->dev_private;
4719
4720         age_info = GET_PORT_AGE_INFO(priv);
4721         age_param = flow_dv_counter_idx_get_age(dev, counter);
4722         if (rte_atomic16_cmpset((volatile uint16_t *)
4723                         &age_param->state,
4724                         AGE_CANDIDATE, AGE_FREE)
4725                         != AGE_CANDIDATE) {
4726                 /**
4727                  * We need the lock even it is age timeout,
4728                  * since counter may still in process.
4729                  */
4730                 rte_spinlock_lock(&age_info->aged_sl);
4731                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
4732                 rte_spinlock_unlock(&age_info->aged_sl);
4733         }
4734         rte_atomic16_set(&age_param->state, AGE_FREE);
4735 }
4736 /**
4737  * Release a flow counter.
4738  *
4739  * @param[in] dev
4740  *   Pointer to the Ethernet device structure.
4741  * @param[in] counter
4742  *   Index to the counter handler.
4743  */
4744 static void
4745 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
4746 {
4747         struct mlx5_priv *priv = dev->data->dev_private;
4748         struct mlx5_flow_counter_pool *pool = NULL;
4749         struct mlx5_flow_counter *cnt;
4750         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4751
4752         if (!counter)
4753                 return;
4754         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4755         MLX5_ASSERT(pool);
4756         if (counter < MLX5_CNT_BATCH_OFFSET) {
4757                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4758                 if (cnt_ext) {
4759                         if (--cnt_ext->ref_cnt)
4760                                 return;
4761                         if (cnt_ext->shared)
4762                                 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
4763                                                      cnt_ext->id);
4764                 }
4765         }
4766         if (IS_AGE_POOL(pool))
4767                 flow_dv_counter_remove_from_age(dev, counter, cnt);
4768         cnt->pool = pool;
4769         /*
4770          * Put the counter back to list to be updated in none fallback mode.
4771          * Currently, we are using two list alternately, while one is in query,
4772          * add the freed counter to the other list based on the pool query_gen
4773          * value. After query finishes, add counter the list to the global
4774          * container counter list. The list changes while query starts. In
4775          * this case, lock will not be needed as query callback and release
4776          * function both operate with the different list.
4777          *
4778          */
4779         if (!priv->counter_fallback)
4780                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
4781         else
4782                 TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER
4783                                   (priv->sh, 0, 0))->counters),
4784                                   cnt, next);
4785 }
4786
4787 /**
4788  * Verify the @p attributes will be correctly understood by the NIC and store
4789  * them in the @p flow if everything is correct.
4790  *
4791  * @param[in] dev
4792  *   Pointer to dev struct.
4793  * @param[in] attributes
4794  *   Pointer to flow attributes
4795  * @param[in] external
4796  *   This flow rule is created by request external to PMD.
4797  * @param[out] error
4798  *   Pointer to error structure.
4799  *
4800  * @return
4801  *   - 0 on success and non root table.
4802  *   - 1 on success and root table.
4803  *   - a negative errno value otherwise and rte_errno is set.
4804  */
4805 static int
4806 flow_dv_validate_attributes(struct rte_eth_dev *dev,
4807                             const struct rte_flow_attr *attributes,
4808                             bool external __rte_unused,
4809                             struct rte_flow_error *error)
4810 {
4811         struct mlx5_priv *priv = dev->data->dev_private;
4812         uint32_t priority_max = priv->config.flow_prio - 1;
4813         int ret = 0;
4814
4815 #ifndef HAVE_MLX5DV_DR
4816         if (attributes->group)
4817                 return rte_flow_error_set(error, ENOTSUP,
4818                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4819                                           NULL,
4820                                           "groups are not supported");
4821 #else
4822         uint32_t table = 0;
4823
4824         ret = mlx5_flow_group_to_table(attributes, external,
4825                                        attributes->group, !!priv->fdb_def_rule,
4826                                        &table, error);
4827         if (ret)
4828                 return ret;
4829         if (!table)
4830                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4831 #endif
4832         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
4833             attributes->priority >= priority_max)
4834                 return rte_flow_error_set(error, ENOTSUP,
4835                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4836                                           NULL,
4837                                           "priority out of range");
4838         if (attributes->transfer) {
4839                 if (!priv->config.dv_esw_en)
4840                         return rte_flow_error_set
4841                                 (error, ENOTSUP,
4842                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4843                                  "E-Switch dr is not supported");
4844                 if (!(priv->representor || priv->master))
4845                         return rte_flow_error_set
4846                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4847                                  NULL, "E-Switch configuration can only be"
4848                                  " done by a master or a representor device");
4849                 if (attributes->egress)
4850                         return rte_flow_error_set
4851                                 (error, ENOTSUP,
4852                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
4853                                  "egress is not supported");
4854         }
4855         if (!(attributes->egress ^ attributes->ingress))
4856                 return rte_flow_error_set(error, ENOTSUP,
4857                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4858                                           "must specify exactly one of "
4859                                           "ingress or egress");
4860         return ret;
4861 }
4862
4863 /**
4864  * Internal validation function. For validating both actions and items.
4865  *
4866  * @param[in] dev
4867  *   Pointer to the rte_eth_dev structure.
4868  * @param[in] attr
4869  *   Pointer to the flow attributes.
4870  * @param[in] items
4871  *   Pointer to the list of items.
4872  * @param[in] actions
4873  *   Pointer to the list of actions.
4874  * @param[in] external
4875  *   This flow rule is created by request external to PMD.
4876  * @param[in] hairpin
4877  *   Number of hairpin TX actions, 0 means classic flow.
4878  * @param[out] error
4879  *   Pointer to the error structure.
4880  *
4881  * @return
4882  *   0 on success, a negative errno value otherwise and rte_errno is set.
4883  */
4884 static int
4885 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4886                  const struct rte_flow_item items[],
4887                  const struct rte_flow_action actions[],
4888                  bool external, int hairpin, struct rte_flow_error *error)
4889 {
4890         int ret;
4891         uint64_t action_flags = 0;
4892         uint64_t item_flags = 0;
4893         uint64_t last_item = 0;
4894         uint8_t next_protocol = 0xff;
4895         uint16_t ether_type = 0;
4896         int actions_n = 0;
4897         uint8_t item_ipv6_proto = 0;
4898         const struct rte_flow_item *gre_item = NULL;
4899         const struct rte_flow_action_raw_decap *decap;
4900         const struct rte_flow_action_raw_encap *encap;
4901         const struct rte_flow_action_rss *rss;
4902         const struct rte_flow_item_tcp nic_tcp_mask = {
4903                 .hdr = {
4904                         .tcp_flags = 0xFF,
4905                         .src_port = RTE_BE16(UINT16_MAX),
4906                         .dst_port = RTE_BE16(UINT16_MAX),
4907                 }
4908         };
4909         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
4910                 .hdr = {
4911                         .src_addr = RTE_BE32(0xffffffff),
4912                         .dst_addr = RTE_BE32(0xffffffff),
4913                         .type_of_service = 0xff,
4914                         .next_proto_id = 0xff,
4915                         .time_to_live = 0xff,
4916                 },
4917         };
4918         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
4919                 .hdr = {
4920                         .src_addr =
4921                         "\xff\xff\xff\xff\xff\xff\xff\xff"
4922                         "\xff\xff\xff\xff\xff\xff\xff\xff",
4923                         .dst_addr =
4924                         "\xff\xff\xff\xff\xff\xff\xff\xff"
4925                         "\xff\xff\xff\xff\xff\xff\xff\xff",
4926                         .vtc_flow = RTE_BE32(0xffffffff),
4927                         .proto = 0xff,
4928                         .hop_limits = 0xff,
4929                 },
4930         };
4931         const struct rte_flow_item_ecpri nic_ecpri_mask = {
4932                 .hdr = {
4933                         .common = {
4934                                 .u32 =
4935                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
4936                                         .type = 0xFF,
4937                                         }).u32),
4938                         },
4939                         .dummy[0] = 0xffffffff,
4940                 },
4941         };
4942         struct mlx5_priv *priv = dev->data->dev_private;
4943         struct mlx5_dev_config *dev_conf = &priv->config;
4944         uint16_t queue_index = 0xFFFF;
4945         const struct rte_flow_item_vlan *vlan_m = NULL;
4946         int16_t rw_act_num = 0;
4947         uint64_t is_root;
4948
4949         if (items == NULL)
4950                 return -1;
4951         ret = flow_dv_validate_attributes(dev, attr, external, error);
4952         if (ret < 0)
4953                 return ret;
4954         is_root = (uint64_t)ret;
4955         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4956                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4957                 int type = items->type;
4958
4959                 if (!mlx5_flow_os_item_supported(type))
4960                         return rte_flow_error_set(error, ENOTSUP,
4961                                                   RTE_FLOW_ERROR_TYPE_ITEM,
4962                                                   NULL, "item not supported");
4963                 switch (type) {
4964                 case RTE_FLOW_ITEM_TYPE_VOID:
4965                         break;
4966                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4967                         ret = flow_dv_validate_item_port_id
4968                                         (dev, items, attr, item_flags, error);
4969                         if (ret < 0)
4970                                 return ret;
4971                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4972                         break;
4973                 case RTE_FLOW_ITEM_TYPE_ETH:
4974                         ret = mlx5_flow_validate_item_eth(items, item_flags,
4975                                                           error);
4976                         if (ret < 0)
4977                                 return ret;
4978                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4979                                              MLX5_FLOW_LAYER_OUTER_L2;
4980                         if (items->mask != NULL && items->spec != NULL) {
4981                                 ether_type =
4982                                         ((const struct rte_flow_item_eth *)
4983                                          items->spec)->type;
4984                                 ether_type &=
4985                                         ((const struct rte_flow_item_eth *)
4986                                          items->mask)->type;
4987                                 ether_type = rte_be_to_cpu_16(ether_type);
4988                         } else {
4989                                 ether_type = 0;
4990                         }
4991                         break;
4992                 case RTE_FLOW_ITEM_TYPE_VLAN:
4993                         ret = flow_dv_validate_item_vlan(items, item_flags,
4994                                                          dev, error);
4995                         if (ret < 0)
4996                                 return ret;
4997                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
4998                                              MLX5_FLOW_LAYER_OUTER_VLAN;
4999                         if (items->mask != NULL && items->spec != NULL) {
5000                                 ether_type =
5001                                         ((const struct rte_flow_item_vlan *)
5002                                          items->spec)->inner_type;
5003                                 ether_type &=
5004                                         ((const struct rte_flow_item_vlan *)
5005                                          items->mask)->inner_type;
5006                                 ether_type = rte_be_to_cpu_16(ether_type);
5007                         } else {
5008                                 ether_type = 0;
5009                         }
5010                         /* Store outer VLAN mask for of_push_vlan action. */
5011                         if (!tunnel)
5012                                 vlan_m = items->mask;
5013                         break;
5014                 case RTE_FLOW_ITEM_TYPE_IPV4:
5015                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5016                                                   &item_flags, &tunnel);
5017                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
5018                                                            last_item,
5019                                                            ether_type,
5020                                                            &nic_ipv4_mask,
5021                                                            error);
5022                         if (ret < 0)
5023                                 return ret;
5024                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5025                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5026                         if (items->mask != NULL &&
5027                             ((const struct rte_flow_item_ipv4 *)
5028                              items->mask)->hdr.next_proto_id) {
5029                                 next_protocol =
5030                                         ((const struct rte_flow_item_ipv4 *)
5031                                          (items->spec))->hdr.next_proto_id;
5032                                 next_protocol &=
5033                                         ((const struct rte_flow_item_ipv4 *)
5034                                          (items->mask))->hdr.next_proto_id;
5035                         } else {
5036                                 /* Reset for inner layer. */
5037                                 next_protocol = 0xff;
5038                         }
5039                         break;
5040                 case RTE_FLOW_ITEM_TYPE_IPV6:
5041                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5042                                                   &item_flags, &tunnel);
5043                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5044                                                            last_item,
5045                                                            ether_type,
5046                                                            &nic_ipv6_mask,
5047                                                            error);
5048                         if (ret < 0)
5049                                 return ret;
5050                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5051                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5052                         if (items->mask != NULL &&
5053                             ((const struct rte_flow_item_ipv6 *)
5054                              items->mask)->hdr.proto) {
5055                                 item_ipv6_proto =
5056                                         ((const struct rte_flow_item_ipv6 *)
5057                                          items->spec)->hdr.proto;
5058                                 next_protocol =
5059                                         ((const struct rte_flow_item_ipv6 *)
5060                                          items->spec)->hdr.proto;
5061                                 next_protocol &=
5062                                         ((const struct rte_flow_item_ipv6 *)
5063                                          items->mask)->hdr.proto;
5064                         } else {
5065                                 /* Reset for inner layer. */
5066                                 next_protocol = 0xff;
5067                         }
5068                         break;
5069                 case RTE_FLOW_ITEM_TYPE_TCP:
5070                         ret = mlx5_flow_validate_item_tcp
5071                                                 (items, item_flags,
5072                                                  next_protocol,
5073                                                  &nic_tcp_mask,
5074                                                  error);
5075                         if (ret < 0)
5076                                 return ret;
5077                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5078                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5079                         break;
5080                 case RTE_FLOW_ITEM_TYPE_UDP:
5081                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5082                                                           next_protocol,
5083                                                           error);
5084                         if (ret < 0)
5085                                 return ret;
5086                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5087                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5088                         break;
5089                 case RTE_FLOW_ITEM_TYPE_GRE:
5090                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5091                                                           next_protocol, error);
5092                         if (ret < 0)
5093                                 return ret;
5094                         gre_item = items;
5095                         last_item = MLX5_FLOW_LAYER_GRE;
5096                         break;
5097                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5098                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5099                                                             next_protocol,
5100                                                             error);
5101                         if (ret < 0)
5102                                 return ret;
5103                         last_item = MLX5_FLOW_LAYER_NVGRE;
5104                         break;
5105                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5106                         ret = mlx5_flow_validate_item_gre_key
5107                                 (items, item_flags, gre_item, error);
5108                         if (ret < 0)
5109                                 return ret;
5110                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5111                         break;
5112                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5113                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5114                                                             error);
5115                         if (ret < 0)
5116                                 return ret;
5117                         last_item = MLX5_FLOW_LAYER_VXLAN;
5118                         break;
5119                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5120                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5121                                                                 item_flags, dev,
5122                                                                 error);
5123                         if (ret < 0)
5124                                 return ret;
5125                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5126                         break;
5127                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5128                         ret = mlx5_flow_validate_item_geneve(items,
5129                                                              item_flags, dev,
5130                                                              error);
5131                         if (ret < 0)
5132                                 return ret;
5133                         last_item = MLX5_FLOW_LAYER_GENEVE;
5134                         break;
5135                 case RTE_FLOW_ITEM_TYPE_MPLS:
5136                         ret = mlx5_flow_validate_item_mpls(dev, items,
5137                                                            item_flags,
5138                                                            last_item, error);
5139                         if (ret < 0)
5140                                 return ret;
5141                         last_item = MLX5_FLOW_LAYER_MPLS;
5142                         break;
5143
5144                 case RTE_FLOW_ITEM_TYPE_MARK:
5145                         ret = flow_dv_validate_item_mark(dev, items, attr,
5146                                                          error);
5147                         if (ret < 0)
5148                                 return ret;
5149                         last_item = MLX5_FLOW_ITEM_MARK;
5150                         break;
5151                 case RTE_FLOW_ITEM_TYPE_META:
5152                         ret = flow_dv_validate_item_meta(dev, items, attr,
5153                                                          error);
5154                         if (ret < 0)
5155                                 return ret;
5156                         last_item = MLX5_FLOW_ITEM_METADATA;
5157                         break;
5158                 case RTE_FLOW_ITEM_TYPE_ICMP:
5159                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5160                                                            next_protocol,
5161                                                            error);
5162                         if (ret < 0)
5163                                 return ret;
5164                         last_item = MLX5_FLOW_LAYER_ICMP;
5165                         break;
5166                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5167                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5168                                                             next_protocol,
5169                                                             error);
5170                         if (ret < 0)
5171                                 return ret;
5172                         item_ipv6_proto = IPPROTO_ICMPV6;
5173                         last_item = MLX5_FLOW_LAYER_ICMP6;
5174                         break;
5175                 case RTE_FLOW_ITEM_TYPE_TAG:
5176                         ret = flow_dv_validate_item_tag(dev, items,
5177                                                         attr, error);
5178                         if (ret < 0)
5179                                 return ret;
5180                         last_item = MLX5_FLOW_ITEM_TAG;
5181                         break;
5182                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5183                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5184                         break;
5185                 case RTE_FLOW_ITEM_TYPE_GTP:
5186                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5187                                                         error);
5188                         if (ret < 0)
5189                                 return ret;
5190                         last_item = MLX5_FLOW_LAYER_GTP;
5191                         break;
5192                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5193                         /* Capacity will be checked in the translate stage. */
5194                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5195                                                             last_item,
5196                                                             ether_type,
5197                                                             &nic_ecpri_mask,
5198                                                             error);
5199                         if (ret < 0)
5200                                 return ret;
5201                         last_item = MLX5_FLOW_LAYER_ECPRI;
5202                         break;
5203                 default:
5204                         return rte_flow_error_set(error, ENOTSUP,
5205                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5206                                                   NULL, "item not supported");
5207                 }
5208                 item_flags |= last_item;
5209         }
5210         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5211                 int type = actions->type;
5212
5213                 if (!mlx5_flow_os_action_supported(type))
5214                         return rte_flow_error_set(error, ENOTSUP,
5215                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5216                                                   actions,
5217                                                   "action not supported");
5218                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5219                         return rte_flow_error_set(error, ENOTSUP,
5220                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5221                                                   actions, "too many actions");
5222                 switch (type) {
5223                 case RTE_FLOW_ACTION_TYPE_VOID:
5224                         break;
5225                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5226                         ret = flow_dv_validate_action_port_id(dev,
5227                                                               action_flags,
5228                                                               actions,
5229                                                               attr,
5230                                                               error);
5231                         if (ret)
5232                                 return ret;
5233                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5234                         ++actions_n;
5235                         break;
5236                 case RTE_FLOW_ACTION_TYPE_FLAG:
5237                         ret = flow_dv_validate_action_flag(dev, action_flags,
5238                                                            attr, error);
5239                         if (ret < 0)
5240                                 return ret;
5241                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5242                                 /* Count all modify-header actions as one. */
5243                                 if (!(action_flags &
5244                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5245                                         ++actions_n;
5246                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5247                                                 MLX5_FLOW_ACTION_MARK_EXT;
5248                         } else {
5249                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5250                                 ++actions_n;
5251                         }
5252                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5253                         break;
5254                 case RTE_FLOW_ACTION_TYPE_MARK:
5255                         ret = flow_dv_validate_action_mark(dev, actions,
5256                                                            action_flags,
5257                                                            attr, error);
5258                         if (ret < 0)
5259                                 return ret;
5260                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5261                                 /* Count all modify-header actions as one. */
5262                                 if (!(action_flags &
5263                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5264                                         ++actions_n;
5265                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5266                                                 MLX5_FLOW_ACTION_MARK_EXT;
5267                         } else {
5268                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5269                                 ++actions_n;
5270                         }
5271                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5272                         break;
5273                 case RTE_FLOW_ACTION_TYPE_SET_META:
5274                         ret = flow_dv_validate_action_set_meta(dev, actions,
5275                                                                action_flags,
5276                                                                attr, error);
5277                         if (ret < 0)
5278                                 return ret;
5279                         /* Count all modify-header actions as one action. */
5280                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5281                                 ++actions_n;
5282                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5283                         rw_act_num += MLX5_ACT_NUM_SET_META;
5284                         break;
5285                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5286                         ret = flow_dv_validate_action_set_tag(dev, actions,
5287                                                               action_flags,
5288                                                               attr, error);
5289                         if (ret < 0)
5290                                 return ret;
5291                         /* Count all modify-header actions as one action. */
5292                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5293                                 ++actions_n;
5294                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5295                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5296                         break;
5297                 case RTE_FLOW_ACTION_TYPE_DROP:
5298                         ret = mlx5_flow_validate_action_drop(action_flags,
5299                                                              attr, error);
5300                         if (ret < 0)
5301                                 return ret;
5302                         action_flags |= MLX5_FLOW_ACTION_DROP;
5303                         ++actions_n;
5304                         break;
5305                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5306                         ret = mlx5_flow_validate_action_queue(actions,
5307                                                               action_flags, dev,
5308                                                               attr, error);
5309                         if (ret < 0)
5310                                 return ret;
5311                         queue_index = ((const struct rte_flow_action_queue *)
5312                                                         (actions->conf))->index;
5313                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5314                         ++actions_n;
5315                         break;
5316                 case RTE_FLOW_ACTION_TYPE_RSS:
5317                         rss = actions->conf;
5318                         ret = mlx5_flow_validate_action_rss(actions,
5319                                                             action_flags, dev,
5320                                                             attr, item_flags,
5321                                                             error);
5322                         if (ret < 0)
5323                                 return ret;
5324                         if (rss != NULL && rss->queue_num)
5325                                 queue_index = rss->queue[0];
5326                         action_flags |= MLX5_FLOW_ACTION_RSS;
5327                         ++actions_n;
5328                         break;
5329                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5330                         ret =
5331                         mlx5_flow_validate_action_default_miss(action_flags,
5332                                         attr, error);
5333                         if (ret < 0)
5334                                 return ret;
5335                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5336                         ++actions_n;
5337                         break;
5338                 case RTE_FLOW_ACTION_TYPE_COUNT:
5339                         ret = flow_dv_validate_action_count(dev, error);
5340                         if (ret < 0)
5341                                 return ret;
5342                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5343                         ++actions_n;
5344                         break;
5345                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5346                         if (flow_dv_validate_action_pop_vlan(dev,
5347                                                              action_flags,
5348                                                              actions,
5349                                                              item_flags, attr,
5350                                                              error))
5351                                 return -rte_errno;
5352                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5353                         ++actions_n;
5354                         break;
5355                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5356                         ret = flow_dv_validate_action_push_vlan(dev,
5357                                                                 action_flags,
5358                                                                 vlan_m,
5359                                                                 actions, attr,
5360                                                                 error);
5361                         if (ret < 0)
5362                                 return ret;
5363                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5364                         ++actions_n;
5365                         break;
5366                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5367                         ret = flow_dv_validate_action_set_vlan_pcp
5368                                                 (action_flags, actions, error);
5369                         if (ret < 0)
5370                                 return ret;
5371                         /* Count PCP with push_vlan command. */
5372                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5373                         break;
5374                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5375                         ret = flow_dv_validate_action_set_vlan_vid
5376                                                 (item_flags, action_flags,
5377                                                  actions, error);
5378                         if (ret < 0)
5379                                 return ret;
5380                         /* Count VID with push_vlan command. */
5381                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5382                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5383                         break;
5384                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5385                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5386                         ret = flow_dv_validate_action_l2_encap(dev,
5387                                                                action_flags,
5388                                                                actions, attr,
5389                                                                error);
5390                         if (ret < 0)
5391                                 return ret;
5392                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5393                         ++actions_n;
5394                         break;
5395                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5396                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5397                         ret = flow_dv_validate_action_decap(dev, action_flags,
5398                                                             attr, error);
5399                         if (ret < 0)
5400                                 return ret;
5401                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5402                         ++actions_n;
5403                         break;
5404                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5405                         ret = flow_dv_validate_action_raw_encap_decap
5406                                 (dev, NULL, actions->conf, attr, &action_flags,
5407                                  &actions_n, error);
5408                         if (ret < 0)
5409                                 return ret;
5410                         break;
5411                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5412                         decap = actions->conf;
5413                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5414                                 ;
5415                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5416                                 encap = NULL;
5417                                 actions--;
5418                         } else {
5419                                 encap = actions->conf;
5420                         }
5421                         ret = flow_dv_validate_action_raw_encap_decap
5422                                            (dev,
5423                                             decap ? decap : &empty_decap, encap,
5424                                             attr, &action_flags, &actions_n,
5425                                             error);
5426                         if (ret < 0)
5427                                 return ret;
5428                         break;
5429                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5430                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5431                         ret = flow_dv_validate_action_modify_mac(action_flags,
5432                                                                  actions,
5433                                                                  item_flags,
5434                                                                  error);
5435                         if (ret < 0)
5436                                 return ret;
5437                         /* Count all modify-header actions as one action. */
5438                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5439                                 ++actions_n;
5440                         action_flags |= actions->type ==
5441                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5442                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5443                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5444                         /*
5445                          * Even if the source and destination MAC addresses have
5446                          * overlap in the header with 4B alignment, the convert
5447                          * function will handle them separately and 4 SW actions
5448                          * will be created. And 2 actions will be added each
5449                          * time no matter how many bytes of address will be set.
5450                          */
5451                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5452                         break;
5453                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5454                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5455                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5456                                                                   actions,
5457                                                                   item_flags,
5458                                                                   error);
5459                         if (ret < 0)
5460                                 return ret;
5461                         /* Count all modify-header actions as one action. */
5462                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5463                                 ++actions_n;
5464                         action_flags |= actions->type ==
5465                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5466                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5467                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5468                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5469                         break;
5470                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5471                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5472                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5473                                                                   actions,
5474                                                                   item_flags,
5475                                                                   error);
5476                         if (ret < 0)
5477                                 return ret;
5478                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5479                                 return rte_flow_error_set(error, ENOTSUP,
5480                                         RTE_FLOW_ERROR_TYPE_ACTION,
5481                                         actions,
5482                                         "Can't change header "
5483                                         "with ICMPv6 proto");
5484                         /* Count all modify-header actions as one action. */
5485                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5486                                 ++actions_n;
5487                         action_flags |= actions->type ==
5488                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5489                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5490                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5491                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5492                         break;
5493                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5494                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5495                         ret = flow_dv_validate_action_modify_tp(action_flags,
5496                                                                 actions,
5497                                                                 item_flags,
5498                                                                 error);
5499                         if (ret < 0)
5500                                 return ret;
5501                         /* Count all modify-header actions as one action. */
5502                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5503                                 ++actions_n;
5504                         action_flags |= actions->type ==
5505                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5506                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5507                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5508                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5509                         break;
5510                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5511                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5512                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5513                                                                  actions,
5514                                                                  item_flags,
5515                                                                  error);
5516                         if (ret < 0)
5517                                 return ret;
5518                         /* Count all modify-header actions as one action. */
5519                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5520                                 ++actions_n;
5521                         action_flags |= actions->type ==
5522                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5523                                                 MLX5_FLOW_ACTION_SET_TTL :
5524                                                 MLX5_FLOW_ACTION_DEC_TTL;
5525                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5526                         break;
5527                 case RTE_FLOW_ACTION_TYPE_JUMP:
5528                         ret = flow_dv_validate_action_jump(actions,
5529                                                            action_flags,
5530                                                            attr, external,
5531                                                            error);
5532                         if (ret)
5533                                 return ret;
5534                         ++actions_n;
5535                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5536                         break;
5537                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5538                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5539                         ret = flow_dv_validate_action_modify_tcp_seq
5540                                                                 (action_flags,
5541                                                                  actions,
5542                                                                  item_flags,
5543                                                                  error);
5544                         if (ret < 0)
5545                                 return ret;
5546                         /* Count all modify-header actions as one action. */
5547                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5548                                 ++actions_n;
5549                         action_flags |= actions->type ==
5550                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5551                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5552                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5553                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5554                         break;
5555                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5556                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5557                         ret = flow_dv_validate_action_modify_tcp_ack
5558                                                                 (action_flags,
5559                                                                  actions,
5560                                                                  item_flags,
5561                                                                  error);
5562                         if (ret < 0)
5563                                 return ret;
5564                         /* Count all modify-header actions as one action. */
5565                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5566                                 ++actions_n;
5567                         action_flags |= actions->type ==
5568                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5569                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5570                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5571                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5572                         break;
5573                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5574                         break;
5575                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5576                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5577                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5578                         break;
5579                 case RTE_FLOW_ACTION_TYPE_METER:
5580                         ret = mlx5_flow_validate_action_meter(dev,
5581                                                               action_flags,
5582                                                               actions, attr,
5583                                                               error);
5584                         if (ret < 0)
5585                                 return ret;
5586                         action_flags |= MLX5_FLOW_ACTION_METER;
5587                         ++actions_n;
5588                         /* Meter action will add one more TAG action. */
5589                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5590                         break;
5591                 case RTE_FLOW_ACTION_TYPE_AGE:
5592                         ret = flow_dv_validate_action_age(action_flags,
5593                                                           actions, dev,
5594                                                           error);
5595                         if (ret < 0)
5596                                 return ret;
5597                         action_flags |= MLX5_FLOW_ACTION_AGE;
5598                         ++actions_n;
5599                         break;
5600                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5601                         ret = flow_dv_validate_action_modify_ipv4_dscp
5602                                                          (action_flags,
5603                                                           actions,
5604                                                           item_flags,
5605                                                           error);
5606                         if (ret < 0)
5607                                 return ret;
5608                         /* Count all modify-header actions as one action. */
5609                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5610                                 ++actions_n;
5611                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5612                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5613                         break;
5614                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5615                         ret = flow_dv_validate_action_modify_ipv6_dscp
5616                                                                 (action_flags,
5617                                                                  actions,
5618                                                                  item_flags,
5619                                                                  error);
5620                         if (ret < 0)
5621                                 return ret;
5622                         /* Count all modify-header actions as one action. */
5623                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5624                                 ++actions_n;
5625                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5626                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5627                         break;
5628                 default:
5629                         return rte_flow_error_set(error, ENOTSUP,
5630                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5631                                                   actions,
5632                                                   "action not supported");
5633                 }
5634         }
5635         /*
5636          * Validate the drop action mutual exclusion with other actions.
5637          * Drop action is mutually-exclusive with any other action, except for
5638          * Count action.
5639          */
5640         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
5641             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
5642                 return rte_flow_error_set(error, EINVAL,
5643                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5644                                           "Drop action is mutually-exclusive "
5645                                           "with any other action, except for "
5646                                           "Count action");
5647         /* Eswitch has few restrictions on using items and actions */
5648         if (attr->transfer) {
5649                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5650                     action_flags & MLX5_FLOW_ACTION_FLAG)
5651                         return rte_flow_error_set(error, ENOTSUP,
5652                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5653                                                   NULL,
5654                                                   "unsupported action FLAG");
5655                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5656                     action_flags & MLX5_FLOW_ACTION_MARK)
5657                         return rte_flow_error_set(error, ENOTSUP,
5658                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5659                                                   NULL,
5660                                                   "unsupported action MARK");
5661                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
5662                         return rte_flow_error_set(error, ENOTSUP,
5663                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5664                                                   NULL,
5665                                                   "unsupported action QUEUE");
5666                 if (action_flags & MLX5_FLOW_ACTION_RSS)
5667                         return rte_flow_error_set(error, ENOTSUP,
5668                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5669                                                   NULL,
5670                                                   "unsupported action RSS");
5671                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5672                         return rte_flow_error_set(error, EINVAL,
5673                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5674                                                   actions,
5675                                                   "no fate action is found");
5676         } else {
5677                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
5678                         return rte_flow_error_set(error, EINVAL,
5679                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5680                                                   actions,
5681                                                   "no fate action is found");
5682         }
5683         /* Continue validation for Xcap and VLAN actions.*/
5684         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
5685                              MLX5_FLOW_VLAN_ACTIONS)) &&
5686             (queue_index == 0xFFFF ||
5687              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5688                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5689                     MLX5_FLOW_XCAP_ACTIONS)
5690                         return rte_flow_error_set(error, ENOTSUP,
5691                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5692                                                   NULL, "encap and decap "
5693                                                   "combination aren't supported");
5694                 if (!attr->transfer && attr->ingress) {
5695                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
5696                                 return rte_flow_error_set
5697                                                 (error, ENOTSUP,
5698                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5699                                                  NULL, "encap is not supported"
5700                                                  " for ingress traffic");
5701                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
5702                                 return rte_flow_error_set
5703                                                 (error, ENOTSUP,
5704                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5705                                                  NULL, "push VLAN action not "
5706                                                  "supported for ingress");
5707                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
5708                                         MLX5_FLOW_VLAN_ACTIONS)
5709                                 return rte_flow_error_set
5710                                                 (error, ENOTSUP,
5711                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5712                                                  NULL, "no support for "
5713                                                  "multiple VLAN actions");
5714                 }
5715         }
5716         /* Hairpin flow will add one more TAG action. */
5717         if (hairpin > 0)
5718                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5719         /* extra metadata enabled: one more TAG action will be add. */
5720         if (dev_conf->dv_flow_en &&
5721             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
5722             mlx5_flow_ext_mreg_supported(dev))
5723                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5724         if ((uint32_t)rw_act_num >
5725                         flow_dv_modify_hdr_action_max(dev, is_root)) {
5726                 return rte_flow_error_set(error, ENOTSUP,
5727                                           RTE_FLOW_ERROR_TYPE_ACTION,
5728                                           NULL, "too many header modify"
5729                                           " actions to support");
5730         }
5731         return 0;
5732 }
5733
5734 /**
5735  * Internal preparation function. Allocates the DV flow size,
5736  * this size is constant.
5737  *
5738  * @param[in] dev
5739  *   Pointer to the rte_eth_dev structure.
5740  * @param[in] attr
5741  *   Pointer to the flow attributes.
5742  * @param[in] items
5743  *   Pointer to the list of items.
5744  * @param[in] actions
5745  *   Pointer to the list of actions.
5746  * @param[out] error
5747  *   Pointer to the error structure.
5748  *
5749  * @return
5750  *   Pointer to mlx5_flow object on success,
5751  *   otherwise NULL and rte_errno is set.
5752  */
5753 static struct mlx5_flow *
5754 flow_dv_prepare(struct rte_eth_dev *dev,
5755                 const struct rte_flow_attr *attr __rte_unused,
5756                 const struct rte_flow_item items[] __rte_unused,
5757                 const struct rte_flow_action actions[] __rte_unused,
5758                 struct rte_flow_error *error)
5759 {
5760         uint32_t handle_idx = 0;
5761         struct mlx5_flow *dev_flow;
5762         struct mlx5_flow_handle *dev_handle;
5763         struct mlx5_priv *priv = dev->data->dev_private;
5764
5765         /* In case of corrupting the memory. */
5766         if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
5767                 rte_flow_error_set(error, ENOSPC,
5768                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5769                                    "not free temporary device flow");
5770                 return NULL;
5771         }
5772         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
5773                                    &handle_idx);
5774         if (!dev_handle) {
5775                 rte_flow_error_set(error, ENOMEM,
5776                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5777                                    "not enough memory to create flow handle");
5778                 return NULL;
5779         }
5780         /* No multi-thread supporting. */
5781         dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
5782         dev_flow->handle = dev_handle;
5783         dev_flow->handle_idx = handle_idx;
5784         /*
5785          * In some old rdma-core releases, before continuing, a check of the
5786          * length of matching parameter will be done at first. It needs to use
5787          * the length without misc4 param. If the flow has misc4 support, then
5788          * the length needs to be adjusted accordingly. Each param member is
5789          * aligned with a 64B boundary naturally.
5790          */
5791         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
5792                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
5793         /*
5794          * The matching value needs to be cleared to 0 before using. In the
5795          * past, it will be automatically cleared when using rte_*alloc
5796          * API. The time consumption will be almost the same as before.
5797          */
5798         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
5799         dev_flow->ingress = attr->ingress;
5800         dev_flow->dv.transfer = attr->transfer;
5801         return dev_flow;
5802 }
5803
5804 #ifdef RTE_LIBRTE_MLX5_DEBUG
5805 /**
5806  * Sanity check for match mask and value. Similar to check_valid_spec() in
5807  * kernel driver. If unmasked bit is present in value, it returns failure.
5808  *
5809  * @param match_mask
5810  *   pointer to match mask buffer.
5811  * @param match_value
5812  *   pointer to match value buffer.
5813  *
5814  * @return
5815  *   0 if valid, -EINVAL otherwise.
5816  */
5817 static int
5818 flow_dv_check_valid_spec(void *match_mask, void *match_value)
5819 {
5820         uint8_t *m = match_mask;
5821         uint8_t *v = match_value;
5822         unsigned int i;
5823
5824         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
5825                 if (v[i] & ~m[i]) {
5826                         DRV_LOG(ERR,
5827                                 "match_value differs from match_criteria"
5828                                 " %p[%u] != %p[%u]",
5829                                 match_value, i, match_mask, i);
5830                         return -EINVAL;
5831                 }
5832         }
5833         return 0;
5834 }
5835 #endif
5836
5837 /**
5838  * Add match of ip_version.
5839  *
5840  * @param[in] group
5841  *   Flow group.
5842  * @param[in] headers_v
5843  *   Values header pointer.
5844  * @param[in] headers_m
5845  *   Masks header pointer.
5846  * @param[in] ip_version
5847  *   The IP version to set.
5848  */
5849 static inline void
5850 flow_dv_set_match_ip_version(uint32_t group,
5851                              void *headers_v,
5852                              void *headers_m,
5853                              uint8_t ip_version)
5854 {
5855         if (group == 0)
5856                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5857         else
5858                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
5859                          ip_version);
5860         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
5861         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
5862         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
5863 }
5864
5865 /**
5866  * Add Ethernet item to matcher and to the value.
5867  *
5868  * @param[in, out] matcher
5869  *   Flow matcher.
5870  * @param[in, out] key
5871  *   Flow matcher value.
5872  * @param[in] item
5873  *   Flow pattern to translate.
5874  * @param[in] inner
5875  *   Item is inner pattern.
5876  */
5877 static void
5878 flow_dv_translate_item_eth(void *matcher, void *key,
5879                            const struct rte_flow_item *item, int inner,
5880                            uint32_t group)
5881 {
5882         const struct rte_flow_item_eth *eth_m = item->mask;
5883         const struct rte_flow_item_eth *eth_v = item->spec;
5884         const struct rte_flow_item_eth nic_mask = {
5885                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5886                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5887                 .type = RTE_BE16(0xffff),
5888         };
5889         void *headers_m;
5890         void *headers_v;
5891         char *l24_v;
5892         unsigned int i;
5893
5894         if (!eth_v)
5895                 return;
5896         if (!eth_m)
5897                 eth_m = &nic_mask;
5898         if (inner) {
5899                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5900                                          inner_headers);
5901                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5902         } else {
5903                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5904                                          outer_headers);
5905                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5906         }
5907         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
5908                &eth_m->dst, sizeof(eth_m->dst));
5909         /* The value must be in the range of the mask. */
5910         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
5911         for (i = 0; i < sizeof(eth_m->dst); ++i)
5912                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
5913         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
5914                &eth_m->src, sizeof(eth_m->src));
5915         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
5916         /* The value must be in the range of the mask. */
5917         for (i = 0; i < sizeof(eth_m->dst); ++i)
5918                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
5919         if (eth_v->type) {
5920                 /* When ethertype is present set mask for tagged VLAN. */
5921                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5922                 /* Set value for tagged VLAN if ethertype is 802.1Q. */
5923                 if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
5924                     eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
5925                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
5926                                  1);
5927                         /* Return here to avoid setting match on ethertype. */
5928                         return;
5929                 }
5930         }
5931         /*
5932          * HW supports match on one Ethertype, the Ethertype following the last
5933          * VLAN tag of the packet (see PRM).
5934          * Set match on ethertype only if ETH header is not followed by VLAN.
5935          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
5936          * ethertype, and use ip_version field instead.
5937          * eCPRI over Ether layer will use type value 0xAEFE.
5938          */
5939         if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
5940             eth_m->type == 0xFFFF) {
5941                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
5942         } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
5943                    eth_m->type == 0xFFFF) {
5944                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
5945         } else {
5946                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5947                          rte_be_to_cpu_16(eth_m->type));
5948                 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5949                                      ethertype);
5950                 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
5951         }
5952 }
5953
5954 /**
5955  * Add VLAN item to matcher and to the value.
5956  *
5957  * @param[in, out] dev_flow
5958  *   Flow descriptor.
5959  * @param[in, out] matcher
5960  *   Flow matcher.
5961  * @param[in, out] key
5962  *   Flow matcher value.
5963  * @param[in] item
5964  *   Flow pattern to translate.
5965  * @param[in] inner
5966  *   Item is inner pattern.
5967  */
5968 static void
5969 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
5970                             void *matcher, void *key,
5971                             const struct rte_flow_item *item,
5972                             int inner, uint32_t group)
5973 {
5974         const struct rte_flow_item_vlan *vlan_m = item->mask;
5975         const struct rte_flow_item_vlan *vlan_v = item->spec;
5976         void *headers_m;
5977         void *headers_v;
5978         uint16_t tci_m;
5979         uint16_t tci_v;
5980
5981         if (inner) {
5982                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5983                                          inner_headers);
5984                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5985         } else {
5986                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5987                                          outer_headers);
5988                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5989                 /*
5990                  * This is workaround, masks are not supported,
5991                  * and pre-validated.
5992                  */
5993                 if (vlan_v)
5994                         dev_flow->handle->vf_vlan.tag =
5995                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
5996         }
5997         /*
5998          * When VLAN item exists in flow, mark packet as tagged,
5999          * even if TCI is not specified.
6000          */
6001         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6002         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
6003         if (!vlan_v)
6004                 return;
6005         if (!vlan_m)
6006                 vlan_m = &rte_flow_item_vlan_mask;
6007         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6008         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6009         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
6010         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
6011         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
6012         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
6013         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
6014         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
6015         /*
6016          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6017          * ethertype, and use ip_version field instead.
6018          */
6019         if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
6020             vlan_m->inner_type == 0xFFFF) {
6021                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6022         } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
6023                    vlan_m->inner_type == 0xFFFF) {
6024                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6025         } else {
6026                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
6027                          rte_be_to_cpu_16(vlan_m->inner_type));
6028                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
6029                          rte_be_to_cpu_16(vlan_m->inner_type &
6030                                           vlan_v->inner_type));
6031         }
6032 }
6033
6034 /**
6035  * Add IPV4 item to matcher and to the value.
6036  *
6037  * @param[in, out] matcher
6038  *   Flow matcher.
6039  * @param[in, out] key
6040  *   Flow matcher value.
6041  * @param[in] item
6042  *   Flow pattern to translate.
6043  * @param[in] item_flags
6044  *   Bit-fields that holds the items detected until now.
6045  * @param[in] inner
6046  *   Item is inner pattern.
6047  * @param[in] group
6048  *   The group to insert the rule.
6049  */
6050 static void
6051 flow_dv_translate_item_ipv4(void *matcher, void *key,
6052                             const struct rte_flow_item *item,
6053                             const uint64_t item_flags,
6054                             int inner, uint32_t group)
6055 {
6056         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6057         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6058         const struct rte_flow_item_ipv4 nic_mask = {
6059                 .hdr = {
6060                         .src_addr = RTE_BE32(0xffffffff),
6061                         .dst_addr = RTE_BE32(0xffffffff),
6062                         .type_of_service = 0xff,
6063                         .next_proto_id = 0xff,
6064                         .time_to_live = 0xff,
6065                 },
6066         };
6067         void *headers_m;
6068         void *headers_v;
6069         char *l24_m;
6070         char *l24_v;
6071         uint8_t tos;
6072
6073         if (inner) {
6074                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6075                                          inner_headers);
6076                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6077         } else {
6078                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6079                                          outer_headers);
6080                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6081         }
6082         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6083         /*
6084          * On outer header (which must contains L2), or inner header with L2,
6085          * set cvlan_tag mask bit to mark this packet as untagged.
6086          * This should be done even if item->spec is empty.
6087          */
6088         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
6089                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6090         if (!ipv4_v)
6091                 return;
6092         if (!ipv4_m)
6093                 ipv4_m = &nic_mask;
6094         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6095                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6096         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6097                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6098         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6099         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6100         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6101                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6102         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6103                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6104         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6105         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6106         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6107         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6108                  ipv4_m->hdr.type_of_service);
6109         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6110         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6111                  ipv4_m->hdr.type_of_service >> 2);
6112         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6113         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6114                  ipv4_m->hdr.next_proto_id);
6115         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6116                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6117         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6118                  ipv4_m->hdr.time_to_live);
6119         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6120                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6121 }
6122
6123 /**
6124  * Add IPV6 item to matcher and to the value.
6125  *
6126  * @param[in, out] matcher
6127  *   Flow matcher.
6128  * @param[in, out] key
6129  *   Flow matcher value.
6130  * @param[in] item
6131  *   Flow pattern to translate.
6132  * @param[in] item_flags
6133  *   Bit-fields that holds the items detected until now.
6134  * @param[in] inner
6135  *   Item is inner pattern.
6136  * @param[in] group
6137  *   The group to insert the rule.
6138  */
6139 static void
6140 flow_dv_translate_item_ipv6(void *matcher, void *key,
6141                             const struct rte_flow_item *item,
6142                             const uint64_t item_flags,
6143                             int inner, uint32_t group)
6144 {
6145         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6146         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6147         const struct rte_flow_item_ipv6 nic_mask = {
6148                 .hdr = {
6149                         .src_addr =
6150                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6151                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6152                         .dst_addr =
6153                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6154                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6155                         .vtc_flow = RTE_BE32(0xffffffff),
6156                         .proto = 0xff,
6157                         .hop_limits = 0xff,
6158                 },
6159         };
6160         void *headers_m;
6161         void *headers_v;
6162         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6163         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6164         char *l24_m;
6165         char *l24_v;
6166         uint32_t vtc_m;
6167         uint32_t vtc_v;
6168         int i;
6169         int size;
6170
6171         if (inner) {
6172                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6173                                          inner_headers);
6174                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6175         } else {
6176                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6177                                          outer_headers);
6178                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6179         }
6180         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6181         /*
6182          * On outer header (which must contains L2), or inner header with L2,
6183          * set cvlan_tag mask bit to mark this packet as untagged.
6184          * This should be done even if item->spec is empty.
6185          */
6186         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
6187                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6188         if (!ipv6_v)
6189                 return;
6190         if (!ipv6_m)
6191                 ipv6_m = &nic_mask;
6192         size = sizeof(ipv6_m->hdr.dst_addr);
6193         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6194                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6195         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6196                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6197         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6198         for (i = 0; i < size; ++i)
6199                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6200         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6201                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6202         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6203                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6204         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6205         for (i = 0; i < size; ++i)
6206                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6207         /* TOS. */
6208         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6209         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6210         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6211         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6212         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6213         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6214         /* Label. */
6215         if (inner) {
6216                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6217                          vtc_m);
6218                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6219                          vtc_v);
6220         } else {
6221                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6222                          vtc_m);
6223                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6224                          vtc_v);
6225         }
6226         /* Protocol. */
6227         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6228                  ipv6_m->hdr.proto);
6229         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6230                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6231         /* Hop limit. */
6232         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6233                  ipv6_m->hdr.hop_limits);
6234         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6235                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6236 }
6237
6238 /**
6239  * Add TCP item to matcher and to the value.
6240  *
6241  * @param[in, out] matcher
6242  *   Flow matcher.
6243  * @param[in, out] key
6244  *   Flow matcher value.
6245  * @param[in] item
6246  *   Flow pattern to translate.
6247  * @param[in] inner
6248  *   Item is inner pattern.
6249  */
6250 static void
6251 flow_dv_translate_item_tcp(void *matcher, void *key,
6252                            const struct rte_flow_item *item,
6253                            int inner)
6254 {
6255         const struct rte_flow_item_tcp *tcp_m = item->mask;
6256         const struct rte_flow_item_tcp *tcp_v = item->spec;
6257         void *headers_m;
6258         void *headers_v;
6259
6260         if (inner) {
6261                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6262                                          inner_headers);
6263                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6264         } else {
6265                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6266                                          outer_headers);
6267                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6268         }
6269         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6270         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6271         if (!tcp_v)
6272                 return;
6273         if (!tcp_m)
6274                 tcp_m = &rte_flow_item_tcp_mask;
6275         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6276                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6277         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6278                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6279         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6280                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6281         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6282                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6283         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6284                  tcp_m->hdr.tcp_flags);
6285         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6286                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6287 }
6288
6289 /**
6290  * Add UDP item to matcher and to the value.
6291  *
6292  * @param[in, out] matcher
6293  *   Flow matcher.
6294  * @param[in, out] key
6295  *   Flow matcher value.
6296  * @param[in] item
6297  *   Flow pattern to translate.
6298  * @param[in] inner
6299  *   Item is inner pattern.
6300  */
6301 static void
6302 flow_dv_translate_item_udp(void *matcher, void *key,
6303                            const struct rte_flow_item *item,
6304                            int inner)
6305 {
6306         const struct rte_flow_item_udp *udp_m = item->mask;
6307         const struct rte_flow_item_udp *udp_v = item->spec;
6308         void *headers_m;
6309         void *headers_v;
6310
6311         if (inner) {
6312                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6313                                          inner_headers);
6314                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6315         } else {
6316                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6317                                          outer_headers);
6318                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6319         }
6320         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6321         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6322         if (!udp_v)
6323                 return;
6324         if (!udp_m)
6325                 udp_m = &rte_flow_item_udp_mask;
6326         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6327                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6328         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6329                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6330         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6331                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6332         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6333                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6334 }
6335
6336 /**
6337  * Add GRE optional Key item to matcher and to the value.
6338  *
6339  * @param[in, out] matcher
6340  *   Flow matcher.
6341  * @param[in, out] key
6342  *   Flow matcher value.
6343  * @param[in] item
6344  *   Flow pattern to translate.
6345  * @param[in] inner
6346  *   Item is inner pattern.
6347  */
6348 static void
6349 flow_dv_translate_item_gre_key(void *matcher, void *key,
6350                                    const struct rte_flow_item *item)
6351 {
6352         const rte_be32_t *key_m = item->mask;
6353         const rte_be32_t *key_v = item->spec;
6354         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6355         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6356         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6357
6358         /* GRE K bit must be on and should already be validated */
6359         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6360         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6361         if (!key_v)
6362                 return;
6363         if (!key_m)
6364                 key_m = &gre_key_default_mask;
6365         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6366                  rte_be_to_cpu_32(*key_m) >> 8);
6367         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6368                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6369         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6370                  rte_be_to_cpu_32(*key_m) & 0xFF);
6371         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6372                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6373 }
6374
6375 /**
6376  * Add GRE item to matcher and to the value.
6377  *
6378  * @param[in, out] matcher
6379  *   Flow matcher.
6380  * @param[in, out] key
6381  *   Flow matcher value.
6382  * @param[in] item
6383  *   Flow pattern to translate.
6384  * @param[in] inner
6385  *   Item is inner pattern.
6386  */
6387 static void
6388 flow_dv_translate_item_gre(void *matcher, void *key,
6389                            const struct rte_flow_item *item,
6390                            int inner)
6391 {
6392         const struct rte_flow_item_gre *gre_m = item->mask;
6393         const struct rte_flow_item_gre *gre_v = item->spec;
6394         void *headers_m;
6395         void *headers_v;
6396         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6397         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6398         struct {
6399                 union {
6400                         __extension__
6401                         struct {
6402                                 uint16_t version:3;
6403                                 uint16_t rsvd0:9;
6404                                 uint16_t s_present:1;
6405                                 uint16_t k_present:1;
6406                                 uint16_t rsvd_bit1:1;
6407                                 uint16_t c_present:1;
6408                         };
6409                         uint16_t value;
6410                 };
6411         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6412
6413         if (inner) {
6414                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6415                                          inner_headers);
6416                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6417         } else {
6418                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6419                                          outer_headers);
6420                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6421         }
6422         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6423         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6424         if (!gre_v)
6425                 return;
6426         if (!gre_m)
6427                 gre_m = &rte_flow_item_gre_mask;
6428         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6429                  rte_be_to_cpu_16(gre_m->protocol));
6430         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6431                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6432         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6433         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6434         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6435                  gre_crks_rsvd0_ver_m.c_present);
6436         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6437                  gre_crks_rsvd0_ver_v.c_present &
6438                  gre_crks_rsvd0_ver_m.c_present);
6439         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6440                  gre_crks_rsvd0_ver_m.k_present);
6441         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6442                  gre_crks_rsvd0_ver_v.k_present &
6443                  gre_crks_rsvd0_ver_m.k_present);
6444         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6445                  gre_crks_rsvd0_ver_m.s_present);
6446         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6447                  gre_crks_rsvd0_ver_v.s_present &
6448                  gre_crks_rsvd0_ver_m.s_present);
6449 }
6450
6451 /**
6452  * Add NVGRE item to matcher and to the value.
6453  *
6454  * @param[in, out] matcher
6455  *   Flow matcher.
6456  * @param[in, out] key
6457  *   Flow matcher value.
6458  * @param[in] item
6459  *   Flow pattern to translate.
6460  * @param[in] inner
6461  *   Item is inner pattern.
6462  */
6463 static void
6464 flow_dv_translate_item_nvgre(void *matcher, void *key,
6465                              const struct rte_flow_item *item,
6466                              int inner)
6467 {
6468         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6469         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6470         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6471         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6472         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
6473         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
6474         char *gre_key_m;
6475         char *gre_key_v;
6476         int size;
6477         int i;
6478
6479         /* For NVGRE, GRE header fields must be set with defined values. */
6480         const struct rte_flow_item_gre gre_spec = {
6481                 .c_rsvd0_ver = RTE_BE16(0x2000),
6482                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
6483         };
6484         const struct rte_flow_item_gre gre_mask = {
6485                 .c_rsvd0_ver = RTE_BE16(0xB000),
6486                 .protocol = RTE_BE16(UINT16_MAX),
6487         };
6488         const struct rte_flow_item gre_item = {
6489                 .spec = &gre_spec,
6490                 .mask = &gre_mask,
6491                 .last = NULL,
6492         };
6493         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
6494         if (!nvgre_v)
6495                 return;
6496         if (!nvgre_m)
6497                 nvgre_m = &rte_flow_item_nvgre_mask;
6498         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
6499         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
6500         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
6501         memcpy(gre_key_m, tni_flow_id_m, size);
6502         for (i = 0; i < size; ++i)
6503                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
6504 }
6505
6506 /**
6507  * Add VXLAN item to matcher and to the value.
6508  *
6509  * @param[in, out] matcher
6510  *   Flow matcher.
6511  * @param[in, out] key
6512  *   Flow matcher value.
6513  * @param[in] item
6514  *   Flow pattern to translate.
6515  * @param[in] inner
6516  *   Item is inner pattern.
6517  */
6518 static void
6519 flow_dv_translate_item_vxlan(void *matcher, void *key,
6520                              const struct rte_flow_item *item,
6521                              int inner)
6522 {
6523         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
6524         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
6525         void *headers_m;
6526         void *headers_v;
6527         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6528         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6529         char *vni_m;
6530         char *vni_v;
6531         uint16_t dport;
6532         int size;
6533         int i;
6534
6535         if (inner) {
6536                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6537                                          inner_headers);
6538                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6539         } else {
6540                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6541                                          outer_headers);
6542                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6543         }
6544         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6545                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6546         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6547                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6548                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6549         }
6550         if (!vxlan_v)
6551                 return;
6552         if (!vxlan_m)
6553                 vxlan_m = &rte_flow_item_vxlan_mask;
6554         size = sizeof(vxlan_m->vni);
6555         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
6556         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
6557         memcpy(vni_m, vxlan_m->vni, size);
6558         for (i = 0; i < size; ++i)
6559                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6560 }
6561
6562 /**
6563  * Add VXLAN-GPE item to matcher and to the value.
6564  *
6565  * @param[in, out] matcher
6566  *   Flow matcher.
6567  * @param[in, out] key
6568  *   Flow matcher value.
6569  * @param[in] item
6570  *   Flow pattern to translate.
6571  * @param[in] inner
6572  *   Item is inner pattern.
6573  */
6574
6575 static void
6576 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
6577                                  const struct rte_flow_item *item, int inner)
6578 {
6579         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
6580         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
6581         void *headers_m;
6582         void *headers_v;
6583         void *misc_m =
6584                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
6585         void *misc_v =
6586                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6587         char *vni_m;
6588         char *vni_v;
6589         uint16_t dport;
6590         int size;
6591         int i;
6592         uint8_t flags_m = 0xff;
6593         uint8_t flags_v = 0xc;
6594
6595         if (inner) {
6596                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6597                                          inner_headers);
6598                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6599         } else {
6600                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6601                                          outer_headers);
6602                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6603         }
6604         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6605                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6606         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6607                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6608                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6609         }
6610         if (!vxlan_v)
6611                 return;
6612         if (!vxlan_m)
6613                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
6614         size = sizeof(vxlan_m->vni);
6615         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
6616         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
6617         memcpy(vni_m, vxlan_m->vni, size);
6618         for (i = 0; i < size; ++i)
6619                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6620         if (vxlan_m->flags) {
6621                 flags_m = vxlan_m->flags;
6622                 flags_v = vxlan_v->flags;
6623         }
6624         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
6625         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
6626         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
6627                  vxlan_m->protocol);
6628         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
6629                  vxlan_v->protocol);
6630 }
6631
6632 /**
6633  * Add Geneve item to matcher and to the value.
6634  *
6635  * @param[in, out] matcher
6636  *   Flow matcher.
6637  * @param[in, out] key
6638  *   Flow matcher value.
6639  * @param[in] item
6640  *   Flow pattern to translate.
6641  * @param[in] inner
6642  *   Item is inner pattern.
6643  */
6644
6645 static void
6646 flow_dv_translate_item_geneve(void *matcher, void *key,
6647                               const struct rte_flow_item *item, int inner)
6648 {
6649         const struct rte_flow_item_geneve *geneve_m = item->mask;
6650         const struct rte_flow_item_geneve *geneve_v = item->spec;
6651         void *headers_m;
6652         void *headers_v;
6653         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6654         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6655         uint16_t dport;
6656         uint16_t gbhdr_m;
6657         uint16_t gbhdr_v;
6658         char *vni_m;
6659         char *vni_v;
6660         size_t size, i;
6661
6662         if (inner) {
6663                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6664                                          inner_headers);
6665                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6666         } else {
6667                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6668                                          outer_headers);
6669                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6670         }
6671         dport = MLX5_UDP_PORT_GENEVE;
6672         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6673                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6674                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6675         }
6676         if (!geneve_v)
6677                 return;
6678         if (!geneve_m)
6679                 geneve_m = &rte_flow_item_geneve_mask;
6680         size = sizeof(geneve_m->vni);
6681         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
6682         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
6683         memcpy(vni_m, geneve_m->vni, size);
6684         for (i = 0; i < size; ++i)
6685                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
6686         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
6687                  rte_be_to_cpu_16(geneve_m->protocol));
6688         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
6689                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
6690         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
6691         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
6692         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
6693                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6694         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
6695                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6696         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
6697                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6698         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
6699                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
6700                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6701 }
6702
6703 /**
6704  * Add MPLS item to matcher and to the value.
6705  *
6706  * @param[in, out] matcher
6707  *   Flow matcher.
6708  * @param[in, out] key
6709  *   Flow matcher value.
6710  * @param[in] item
6711  *   Flow pattern to translate.
6712  * @param[in] prev_layer
6713  *   The protocol layer indicated in previous item.
6714  * @param[in] inner
6715  *   Item is inner pattern.
6716  */
6717 static void
6718 flow_dv_translate_item_mpls(void *matcher, void *key,
6719                             const struct rte_flow_item *item,
6720                             uint64_t prev_layer,
6721                             int inner)
6722 {
6723         const uint32_t *in_mpls_m = item->mask;
6724         const uint32_t *in_mpls_v = item->spec;
6725         uint32_t *out_mpls_m = 0;
6726         uint32_t *out_mpls_v = 0;
6727         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6728         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6729         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
6730                                      misc_parameters_2);
6731         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6732         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
6733         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6734
6735         switch (prev_layer) {
6736         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6737                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
6738                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6739                          MLX5_UDP_PORT_MPLS);
6740                 break;
6741         case MLX5_FLOW_LAYER_GRE:
6742                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
6743                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6744                          RTE_ETHER_TYPE_MPLS);
6745                 break;
6746         default:
6747                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6748                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6749                          IPPROTO_MPLS);
6750                 break;
6751         }
6752         if (!in_mpls_v)
6753                 return;
6754         if (!in_mpls_m)
6755                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
6756         switch (prev_layer) {
6757         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6758                 out_mpls_m =
6759                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6760                                                  outer_first_mpls_over_udp);
6761                 out_mpls_v =
6762                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6763                                                  outer_first_mpls_over_udp);
6764                 break;
6765         case MLX5_FLOW_LAYER_GRE:
6766                 out_mpls_m =
6767                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6768                                                  outer_first_mpls_over_gre);
6769                 out_mpls_v =
6770                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6771                                                  outer_first_mpls_over_gre);
6772                 break;
6773         default:
6774                 /* Inner MPLS not over GRE is not supported. */
6775                 if (!inner) {
6776                         out_mpls_m =
6777                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6778                                                          misc2_m,
6779                                                          outer_first_mpls);
6780                         out_mpls_v =
6781                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6782                                                          misc2_v,
6783                                                          outer_first_mpls);
6784                 }
6785                 break;
6786         }
6787         if (out_mpls_m && out_mpls_v) {
6788                 *out_mpls_m = *in_mpls_m;
6789                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
6790         }
6791 }
6792
6793 /**
6794  * Add metadata register item to matcher
6795  *
6796  * @param[in, out] matcher
6797  *   Flow matcher.
6798  * @param[in, out] key
6799  *   Flow matcher value.
6800  * @param[in] reg_type
6801  *   Type of device metadata register
6802  * @param[in] value
6803  *   Register value
6804  * @param[in] mask
6805  *   Register mask
6806  */
6807 static void
6808 flow_dv_match_meta_reg(void *matcher, void *key,
6809                        enum modify_reg reg_type,
6810                        uint32_t data, uint32_t mask)
6811 {
6812         void *misc2_m =
6813                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
6814         void *misc2_v =
6815                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6816         uint32_t temp;
6817
6818         data &= mask;
6819         switch (reg_type) {
6820         case REG_A:
6821                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
6822                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
6823                 break;
6824         case REG_B:
6825                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
6826                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
6827                 break;
6828         case REG_C_0:
6829                 /*
6830                  * The metadata register C0 field might be divided into
6831                  * source vport index and META item value, we should set
6832                  * this field according to specified mask, not as whole one.
6833                  */
6834                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
6835                 temp |= mask;
6836                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
6837                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
6838                 temp &= ~mask;
6839                 temp |= data;
6840                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
6841                 break;
6842         case REG_C_1:
6843                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
6844                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
6845                 break;
6846         case REG_C_2:
6847                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
6848                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
6849                 break;
6850         case REG_C_3:
6851                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
6852                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
6853                 break;
6854         case REG_C_4:
6855                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
6856                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
6857                 break;
6858         case REG_C_5:
6859                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
6860                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
6861                 break;
6862         case REG_C_6:
6863                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
6864                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
6865                 break;
6866         case REG_C_7:
6867                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
6868                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
6869                 break;
6870         default:
6871                 MLX5_ASSERT(false);
6872                 break;
6873         }
6874 }
6875
6876 /**
6877  * Add MARK item to matcher
6878  *
6879  * @param[in] dev
6880  *   The device to configure through.
6881  * @param[in, out] matcher
6882  *   Flow matcher.
6883  * @param[in, out] key
6884  *   Flow matcher value.
6885  * @param[in] item
6886  *   Flow pattern to translate.
6887  */
6888 static void
6889 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
6890                             void *matcher, void *key,
6891                             const struct rte_flow_item *item)
6892 {
6893         struct mlx5_priv *priv = dev->data->dev_private;
6894         const struct rte_flow_item_mark *mark;
6895         uint32_t value;
6896         uint32_t mask;
6897
6898         mark = item->mask ? (const void *)item->mask :
6899                             &rte_flow_item_mark_mask;
6900         mask = mark->id & priv->sh->dv_mark_mask;
6901         mark = (const void *)item->spec;
6902         MLX5_ASSERT(mark);
6903         value = mark->id & priv->sh->dv_mark_mask & mask;
6904         if (mask) {
6905                 enum modify_reg reg;
6906
6907                 /* Get the metadata register index for the mark. */
6908                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
6909                 MLX5_ASSERT(reg > 0);
6910                 if (reg == REG_C_0) {
6911                         struct mlx5_priv *priv = dev->data->dev_private;
6912                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6913                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6914
6915                         mask &= msk_c0;
6916                         mask <<= shl_c0;
6917                         value <<= shl_c0;
6918                 }
6919                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6920         }
6921 }
6922
6923 /**
6924  * Add META item to matcher
6925  *
6926  * @param[in] dev
6927  *   The devich to configure through.
6928  * @param[in, out] matcher
6929  *   Flow matcher.
6930  * @param[in, out] key
6931  *   Flow matcher value.
6932  * @param[in] attr
6933  *   Attributes of flow that includes this item.
6934  * @param[in] item
6935  *   Flow pattern to translate.
6936  */
6937 static void
6938 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
6939                             void *matcher, void *key,
6940                             const struct rte_flow_attr *attr,
6941                             const struct rte_flow_item *item)
6942 {
6943         const struct rte_flow_item_meta *meta_m;
6944         const struct rte_flow_item_meta *meta_v;
6945
6946         meta_m = (const void *)item->mask;
6947         if (!meta_m)
6948                 meta_m = &rte_flow_item_meta_mask;
6949         meta_v = (const void *)item->spec;
6950         if (meta_v) {
6951                 int reg;
6952                 uint32_t value = meta_v->data;
6953                 uint32_t mask = meta_m->data;
6954
6955                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
6956                 if (reg < 0)
6957                         return;
6958                 /*
6959                  * In datapath code there is no endianness
6960                  * coversions for perfromance reasons, all
6961                  * pattern conversions are done in rte_flow.
6962                  */
6963                 value = rte_cpu_to_be_32(value);
6964                 mask = rte_cpu_to_be_32(mask);
6965                 if (reg == REG_C_0) {
6966                         struct mlx5_priv *priv = dev->data->dev_private;
6967                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6968                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6969 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6970                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
6971
6972                         value >>= shr_c0;
6973                         mask >>= shr_c0;
6974 #endif
6975                         value <<= shl_c0;
6976                         mask <<= shl_c0;
6977                         MLX5_ASSERT(msk_c0);
6978                         MLX5_ASSERT(!(~msk_c0 & mask));
6979                 }
6980                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6981         }
6982 }
6983
6984 /**
6985  * Add vport metadata Reg C0 item to matcher
6986  *
6987  * @param[in, out] matcher
6988  *   Flow matcher.
6989  * @param[in, out] key
6990  *   Flow matcher value.
6991  * @param[in] reg
6992  *   Flow pattern to translate.
6993  */
6994 static void
6995 flow_dv_translate_item_meta_vport(void *matcher, void *key,
6996                                   uint32_t value, uint32_t mask)
6997 {
6998         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
6999 }
7000
7001 /**
7002  * Add tag item to matcher
7003  *
7004  * @param[in] dev
7005  *   The devich to configure through.
7006  * @param[in, out] matcher
7007  *   Flow matcher.
7008  * @param[in, out] key
7009  *   Flow matcher value.
7010  * @param[in] item
7011  *   Flow pattern to translate.
7012  */
7013 static void
7014 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7015                                 void *matcher, void *key,
7016                                 const struct rte_flow_item *item)
7017 {
7018         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7019         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7020         uint32_t mask, value;
7021
7022         MLX5_ASSERT(tag_v);
7023         value = tag_v->data;
7024         mask = tag_m ? tag_m->data : UINT32_MAX;
7025         if (tag_v->id == REG_C_0) {
7026                 struct mlx5_priv *priv = dev->data->dev_private;
7027                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7028                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7029
7030                 mask &= msk_c0;
7031                 mask <<= shl_c0;
7032                 value <<= shl_c0;
7033         }
7034         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7035 }
7036
7037 /**
7038  * Add TAG item to matcher
7039  *
7040  * @param[in] dev
7041  *   The devich to configure through.
7042  * @param[in, out] matcher
7043  *   Flow matcher.
7044  * @param[in, out] key
7045  *   Flow matcher value.
7046  * @param[in] item
7047  *   Flow pattern to translate.
7048  */
7049 static void
7050 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7051                            void *matcher, void *key,
7052                            const struct rte_flow_item *item)
7053 {
7054         const struct rte_flow_item_tag *tag_v = item->spec;
7055         const struct rte_flow_item_tag *tag_m = item->mask;
7056         enum modify_reg reg;
7057
7058         MLX5_ASSERT(tag_v);
7059         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7060         /* Get the metadata register index for the tag. */
7061         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7062         MLX5_ASSERT(reg > 0);
7063         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7064 }
7065
7066 /**
7067  * Add source vport match to the specified matcher.
7068  *
7069  * @param[in, out] matcher
7070  *   Flow matcher.
7071  * @param[in, out] key
7072  *   Flow matcher value.
7073  * @param[in] port
7074  *   Source vport value to match
7075  * @param[in] mask
7076  *   Mask
7077  */
7078 static void
7079 flow_dv_translate_item_source_vport(void *matcher, void *key,
7080                                     int16_t port, uint16_t mask)
7081 {
7082         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7083         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7084
7085         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7086         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7087 }
7088
7089 /**
7090  * Translate port-id item to eswitch match on  port-id.
7091  *
7092  * @param[in] dev
7093  *   The devich to configure through.
7094  * @param[in, out] matcher
7095  *   Flow matcher.
7096  * @param[in, out] key
7097  *   Flow matcher value.
7098  * @param[in] item
7099  *   Flow pattern to translate.
7100  *
7101  * @return
7102  *   0 on success, a negative errno value otherwise.
7103  */
7104 static int
7105 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7106                                void *key, const struct rte_flow_item *item)
7107 {
7108         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7109         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7110         struct mlx5_priv *priv;
7111         uint16_t mask, id;
7112
7113         mask = pid_m ? pid_m->id : 0xffff;
7114         id = pid_v ? pid_v->id : dev->data->port_id;
7115         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7116         if (!priv)
7117                 return -rte_errno;
7118         /* Translate to vport field or to metadata, depending on mode. */
7119         if (priv->vport_meta_mask)
7120                 flow_dv_translate_item_meta_vport(matcher, key,
7121                                                   priv->vport_meta_tag,
7122                                                   priv->vport_meta_mask);
7123         else
7124                 flow_dv_translate_item_source_vport(matcher, key,
7125                                                     priv->vport_id, mask);
7126         return 0;
7127 }
7128
7129 /**
7130  * Add ICMP6 item to matcher and to the value.
7131  *
7132  * @param[in, out] matcher
7133  *   Flow matcher.
7134  * @param[in, out] key
7135  *   Flow matcher value.
7136  * @param[in] item
7137  *   Flow pattern to translate.
7138  * @param[in] inner
7139  *   Item is inner pattern.
7140  */
7141 static void
7142 flow_dv_translate_item_icmp6(void *matcher, void *key,
7143                               const struct rte_flow_item *item,
7144                               int inner)
7145 {
7146         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7147         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7148         void *headers_m;
7149         void *headers_v;
7150         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7151                                      misc_parameters_3);
7152         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7153         if (inner) {
7154                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7155                                          inner_headers);
7156                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7157         } else {
7158                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7159                                          outer_headers);
7160                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7161         }
7162         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7163         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7164         if (!icmp6_v)
7165                 return;
7166         if (!icmp6_m)
7167                 icmp6_m = &rte_flow_item_icmp6_mask;
7168         /*
7169          * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
7170          * If only the protocol is specified, no need to match the frag.
7171          */
7172         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7173         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
7174         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7175         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7176                  icmp6_v->type & icmp6_m->type);
7177         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7178         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7179                  icmp6_v->code & icmp6_m->code);
7180 }
7181
7182 /**
7183  * Add ICMP item to matcher and to the value.
7184  *
7185  * @param[in, out] matcher
7186  *   Flow matcher.
7187  * @param[in, out] key
7188  *   Flow matcher value.
7189  * @param[in] item
7190  *   Flow pattern to translate.
7191  * @param[in] inner
7192  *   Item is inner pattern.
7193  */
7194 static void
7195 flow_dv_translate_item_icmp(void *matcher, void *key,
7196                             const struct rte_flow_item *item,
7197                             int inner)
7198 {
7199         const struct rte_flow_item_icmp *icmp_m = item->mask;
7200         const struct rte_flow_item_icmp *icmp_v = item->spec;
7201         void *headers_m;
7202         void *headers_v;
7203         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7204                                      misc_parameters_3);
7205         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7206         if (inner) {
7207                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7208                                          inner_headers);
7209                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7210         } else {
7211                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7212                                          outer_headers);
7213                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7214         }
7215         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7216         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7217         if (!icmp_v)
7218                 return;
7219         if (!icmp_m)
7220                 icmp_m = &rte_flow_item_icmp_mask;
7221         /*
7222          * Force flow only to match the non-fragmented IPv4 ICMP packets.
7223          * If only the protocol is specified, no need to match the frag.
7224          */
7225         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7226         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
7227         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7228                  icmp_m->hdr.icmp_type);
7229         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7230                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7231         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7232                  icmp_m->hdr.icmp_code);
7233         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7234                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7235 }
7236
7237 /**
7238  * Add GTP item to matcher and to the value.
7239  *
7240  * @param[in, out] matcher
7241  *   Flow matcher.
7242  * @param[in, out] key
7243  *   Flow matcher value.
7244  * @param[in] item
7245  *   Flow pattern to translate.
7246  * @param[in] inner
7247  *   Item is inner pattern.
7248  */
7249 static void
7250 flow_dv_translate_item_gtp(void *matcher, void *key,
7251                            const struct rte_flow_item *item, int inner)
7252 {
7253         const struct rte_flow_item_gtp *gtp_m = item->mask;
7254         const struct rte_flow_item_gtp *gtp_v = item->spec;
7255         void *headers_m;
7256         void *headers_v;
7257         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7258                                      misc_parameters_3);
7259         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7260         uint16_t dport = RTE_GTPU_UDP_PORT;
7261
7262         if (inner) {
7263                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7264                                          inner_headers);
7265                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7266         } else {
7267                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7268                                          outer_headers);
7269                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7270         }
7271         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7272                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7273                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7274         }
7275         if (!gtp_v)
7276                 return;
7277         if (!gtp_m)
7278                 gtp_m = &rte_flow_item_gtp_mask;
7279         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7280                  gtp_m->v_pt_rsv_flags);
7281         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7282                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7283         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7284         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7285                  gtp_v->msg_type & gtp_m->msg_type);
7286         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7287                  rte_be_to_cpu_32(gtp_m->teid));
7288         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7289                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7290 }
7291
7292 /**
7293  * Add eCPRI item to matcher and to the value.
7294  *
7295  * @param[in] dev
7296  *   The devich to configure through.
7297  * @param[in, out] matcher
7298  *   Flow matcher.
7299  * @param[in, out] key
7300  *   Flow matcher value.
7301  * @param[in] item
7302  *   Flow pattern to translate.
7303  * @param[in] samples
7304  *   Sample IDs to be used in the matching.
7305  */
7306 static void
7307 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7308                              void *key, const struct rte_flow_item *item)
7309 {
7310         struct mlx5_priv *priv = dev->data->dev_private;
7311         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7312         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7313         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7314                                      misc_parameters_4);
7315         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7316         uint32_t *samples;
7317         void *dw_m;
7318         void *dw_v;
7319
7320         if (!ecpri_v)
7321                 return;
7322         if (!ecpri_m)
7323                 ecpri_m = &rte_flow_item_ecpri_mask;
7324         /*
7325          * Maximal four DW samples are supported in a single matching now.
7326          * Two are used now for a eCPRI matching:
7327          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7328          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7329          *    if any.
7330          */
7331         if (!ecpri_m->hdr.common.u32)
7332                 return;
7333         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7334         /* Need to take the whole DW as the mask to fill the entry. */
7335         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7336                             prog_sample_field_value_0);
7337         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7338                             prog_sample_field_value_0);
7339         /* Already big endian (network order) in the header. */
7340         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7341         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7342         /* Sample#0, used for matching type, offset 0. */
7343         MLX5_SET(fte_match_set_misc4, misc4_m,
7344                  prog_sample_field_id_0, samples[0]);
7345         /* It makes no sense to set the sample ID in the mask field. */
7346         MLX5_SET(fte_match_set_misc4, misc4_v,
7347                  prog_sample_field_id_0, samples[0]);
7348         /*
7349          * Checking if message body part needs to be matched.
7350          * Some wildcard rules only matching type field should be supported.
7351          */
7352         if (ecpri_m->hdr.dummy[0]) {
7353                 switch (ecpri_v->hdr.common.type) {
7354                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7355                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7356                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7357                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7358                                             prog_sample_field_value_1);
7359                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7360                                             prog_sample_field_value_1);
7361                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7362                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7363                         /* Sample#1, to match message body, offset 4. */
7364                         MLX5_SET(fte_match_set_misc4, misc4_m,
7365                                  prog_sample_field_id_1, samples[1]);
7366                         MLX5_SET(fte_match_set_misc4, misc4_v,
7367                                  prog_sample_field_id_1, samples[1]);
7368                         break;
7369                 default:
7370                         /* Others, do not match any sample ID. */
7371                         break;
7372                 }
7373         }
7374 }
7375
7376 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7377
7378 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7379         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7380                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7381
7382 /**
7383  * Calculate flow matcher enable bitmap.
7384  *
7385  * @param match_criteria
7386  *   Pointer to flow matcher criteria.
7387  *
7388  * @return
7389  *   Bitmap of enabled fields.
7390  */
7391 static uint8_t
7392 flow_dv_matcher_enable(uint32_t *match_criteria)
7393 {
7394         uint8_t match_criteria_enable;
7395
7396         match_criteria_enable =
7397                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7398                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7399         match_criteria_enable |=
7400                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7401                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7402         match_criteria_enable |=
7403                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7404                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7405         match_criteria_enable |=
7406                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7407                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7408         match_criteria_enable |=
7409                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7410                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7411         match_criteria_enable |=
7412                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7413                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7414         return match_criteria_enable;
7415 }
7416
7417
7418 /**
7419  * Get a flow table.
7420  *
7421  * @param[in, out] dev
7422  *   Pointer to rte_eth_dev structure.
7423  * @param[in] table_id
7424  *   Table id to use.
7425  * @param[in] egress
7426  *   Direction of the table.
7427  * @param[in] transfer
7428  *   E-Switch or NIC flow.
7429  * @param[out] error
7430  *   pointer to error structure.
7431  *
7432  * @return
7433  *   Returns tables resource based on the index, NULL in case of failed.
7434  */
7435 static struct mlx5_flow_tbl_resource *
7436 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
7437                          uint32_t table_id, uint8_t egress,
7438                          uint8_t transfer,
7439                          struct rte_flow_error *error)
7440 {
7441         struct mlx5_priv *priv = dev->data->dev_private;
7442         struct mlx5_dev_ctx_shared *sh = priv->sh;
7443         struct mlx5_flow_tbl_resource *tbl;
7444         union mlx5_flow_tbl_key table_key = {
7445                 {
7446                         .table_id = table_id,
7447                         .reserved = 0,
7448                         .domain = !!transfer,
7449                         .direction = !!egress,
7450                 }
7451         };
7452         struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
7453                                                          table_key.v64);
7454         struct mlx5_flow_tbl_data_entry *tbl_data;
7455         uint32_t idx = 0;
7456         int ret;
7457         void *domain;
7458
7459         if (pos) {
7460                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
7461                                         entry);
7462                 tbl = &tbl_data->tbl;
7463                 rte_atomic32_inc(&tbl->refcnt);
7464                 return tbl;
7465         }
7466         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7467         if (!tbl_data) {
7468                 rte_flow_error_set(error, ENOMEM,
7469                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7470                                    NULL,
7471                                    "cannot allocate flow table data entry");
7472                 return NULL;
7473         }
7474         tbl_data->idx = idx;
7475         tbl = &tbl_data->tbl;
7476         pos = &tbl_data->entry;
7477         if (transfer)
7478                 domain = sh->fdb_domain;
7479         else if (egress)
7480                 domain = sh->tx_domain;
7481         else
7482                 domain = sh->rx_domain;
7483         ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
7484         if (ret) {
7485                 rte_flow_error_set(error, ENOMEM,
7486                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7487                                    NULL, "cannot create flow table object");
7488                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7489                 return NULL;
7490         }
7491         /*
7492          * No multi-threads now, but still better to initialize the reference
7493          * count before insert it into the hash list.
7494          */
7495         rte_atomic32_init(&tbl->refcnt);
7496         /* Jump action reference count is initialized here. */
7497         rte_atomic32_init(&tbl_data->jump.refcnt);
7498         pos->key = table_key.v64;
7499         ret = mlx5_hlist_insert(sh->flow_tbls, pos);
7500         if (ret < 0) {
7501                 rte_flow_error_set(error, -ret,
7502                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7503                                    "cannot insert flow table data entry");
7504                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7505                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7506         }
7507         rte_atomic32_inc(&tbl->refcnt);
7508         return tbl;
7509 }
7510
7511 /**
7512  * Release a flow table.
7513  *
7514  * @param[in] dev
7515  *   Pointer to rte_eth_dev structure.
7516  * @param[in] tbl
7517  *   Table resource to be released.
7518  *
7519  * @return
7520  *   Returns 0 if table was released, else return 1;
7521  */
7522 static int
7523 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
7524                              struct mlx5_flow_tbl_resource *tbl)
7525 {
7526         struct mlx5_priv *priv = dev->data->dev_private;
7527         struct mlx5_dev_ctx_shared *sh = priv->sh;
7528         struct mlx5_flow_tbl_data_entry *tbl_data =
7529                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
7530
7531         if (!tbl)
7532                 return 0;
7533         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
7534                 struct mlx5_hlist_entry *pos = &tbl_data->entry;
7535
7536                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7537                 tbl->obj = NULL;
7538                 /* remove the entry from the hash list and free memory. */
7539                 mlx5_hlist_remove(sh->flow_tbls, pos);
7540                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP],
7541                                 tbl_data->idx);
7542                 return 0;
7543         }
7544         return 1;
7545 }
7546
7547 /**
7548  * Register the flow matcher.
7549  *
7550  * @param[in, out] dev
7551  *   Pointer to rte_eth_dev structure.
7552  * @param[in, out] matcher
7553  *   Pointer to flow matcher.
7554  * @param[in, out] key
7555  *   Pointer to flow table key.
7556  * @parm[in, out] dev_flow
7557  *   Pointer to the dev_flow.
7558  * @param[out] error
7559  *   pointer to error structure.
7560  *
7561  * @return
7562  *   0 on success otherwise -errno and errno is set.
7563  */
7564 static int
7565 flow_dv_matcher_register(struct rte_eth_dev *dev,
7566                          struct mlx5_flow_dv_matcher *matcher,
7567                          union mlx5_flow_tbl_key *key,
7568                          struct mlx5_flow *dev_flow,
7569                          struct rte_flow_error *error)
7570 {
7571         struct mlx5_priv *priv = dev->data->dev_private;
7572         struct mlx5_dev_ctx_shared *sh = priv->sh;
7573         struct mlx5_flow_dv_matcher *cache_matcher;
7574         struct mlx5dv_flow_matcher_attr dv_attr = {
7575                 .type = IBV_FLOW_ATTR_NORMAL,
7576                 .match_mask = (void *)&matcher->mask,
7577         };
7578         struct mlx5_flow_tbl_resource *tbl;
7579         struct mlx5_flow_tbl_data_entry *tbl_data;
7580         int ret;
7581
7582         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
7583                                        key->domain, error);
7584         if (!tbl)
7585                 return -rte_errno;      /* No need to refill the error info */
7586         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
7587         /* Lookup from cache. */
7588         LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
7589                 if (matcher->crc == cache_matcher->crc &&
7590                     matcher->priority == cache_matcher->priority &&
7591                     !memcmp((const void *)matcher->mask.buf,
7592                             (const void *)cache_matcher->mask.buf,
7593                             cache_matcher->mask.size)) {
7594                         DRV_LOG(DEBUG,
7595                                 "%s group %u priority %hd use %s "
7596                                 "matcher %p: refcnt %d++",
7597                                 key->domain ? "FDB" : "NIC", key->table_id,
7598                                 cache_matcher->priority,
7599                                 key->direction ? "tx" : "rx",
7600                                 (void *)cache_matcher,
7601                                 rte_atomic32_read(&cache_matcher->refcnt));
7602                         rte_atomic32_inc(&cache_matcher->refcnt);
7603                         dev_flow->handle->dvh.matcher = cache_matcher;
7604                         /* old matcher should not make the table ref++. */
7605                         flow_dv_tbl_resource_release(dev, tbl);
7606                         return 0;
7607                 }
7608         }
7609         /* Register new matcher. */
7610         cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
7611                                     SOCKET_ID_ANY);
7612         if (!cache_matcher) {
7613                 flow_dv_tbl_resource_release(dev, tbl);
7614                 return rte_flow_error_set(error, ENOMEM,
7615                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7616                                           "cannot allocate matcher memory");
7617         }
7618         *cache_matcher = *matcher;
7619         dv_attr.match_criteria_enable =
7620                 flow_dv_matcher_enable(cache_matcher->mask.buf);
7621         dv_attr.priority = matcher->priority;
7622         if (key->direction)
7623                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
7624         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
7625                                                &cache_matcher->matcher_object);
7626         if (ret) {
7627                 mlx5_free(cache_matcher);
7628 #ifdef HAVE_MLX5DV_DR
7629                 flow_dv_tbl_resource_release(dev, tbl);
7630 #endif
7631                 return rte_flow_error_set(error, ENOMEM,
7632                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7633                                           NULL, "cannot create matcher");
7634         }
7635         /* Save the table information */
7636         cache_matcher->tbl = tbl;
7637         rte_atomic32_init(&cache_matcher->refcnt);
7638         /* only matcher ref++, table ref++ already done above in get API. */
7639         rte_atomic32_inc(&cache_matcher->refcnt);
7640         LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
7641         dev_flow->handle->dvh.matcher = cache_matcher;
7642         DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
7643                 key->domain ? "FDB" : "NIC", key->table_id,
7644                 cache_matcher->priority,
7645                 key->direction ? "tx" : "rx", (void *)cache_matcher,
7646                 rte_atomic32_read(&cache_matcher->refcnt));
7647         return 0;
7648 }
7649
7650 /**
7651  * Find existing tag resource or create and register a new one.
7652  *
7653  * @param dev[in, out]
7654  *   Pointer to rte_eth_dev structure.
7655  * @param[in, out] tag_be24
7656  *   Tag value in big endian then R-shift 8.
7657  * @parm[in, out] dev_flow
7658  *   Pointer to the dev_flow.
7659  * @param[out] error
7660  *   pointer to error structure.
7661  *
7662  * @return
7663  *   0 on success otherwise -errno and errno is set.
7664  */
7665 static int
7666 flow_dv_tag_resource_register
7667                         (struct rte_eth_dev *dev,
7668                          uint32_t tag_be24,
7669                          struct mlx5_flow *dev_flow,
7670                          struct rte_flow_error *error)
7671 {
7672         struct mlx5_priv *priv = dev->data->dev_private;
7673         struct mlx5_dev_ctx_shared *sh = priv->sh;
7674         struct mlx5_flow_dv_tag_resource *cache_resource;
7675         struct mlx5_hlist_entry *entry;
7676         int ret;
7677
7678         /* Lookup a matching resource from cache. */
7679         entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
7680         if (entry) {
7681                 cache_resource = container_of
7682                         (entry, struct mlx5_flow_dv_tag_resource, entry);
7683                 rte_atomic32_inc(&cache_resource->refcnt);
7684                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
7685                 dev_flow->dv.tag_resource = cache_resource;
7686                 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
7687                         (void *)cache_resource,
7688                         rte_atomic32_read(&cache_resource->refcnt));
7689                 return 0;
7690         }
7691         /* Register new resource. */
7692         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
7693                                        &dev_flow->handle->dvh.rix_tag);
7694         if (!cache_resource)
7695                 return rte_flow_error_set(error, ENOMEM,
7696                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7697                                           "cannot allocate resource memory");
7698         cache_resource->entry.key = (uint64_t)tag_be24;
7699         ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
7700                                                   &cache_resource->action);
7701         if (ret) {
7702                 mlx5_free(cache_resource);
7703                 return rte_flow_error_set(error, ENOMEM,
7704                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7705                                           NULL, "cannot create action");
7706         }
7707         rte_atomic32_init(&cache_resource->refcnt);
7708         rte_atomic32_inc(&cache_resource->refcnt);
7709         if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
7710                 mlx5_flow_os_destroy_flow_action(cache_resource->action);
7711                 mlx5_free(cache_resource);
7712                 return rte_flow_error_set(error, EEXIST,
7713                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7714                                           NULL, "cannot insert tag");
7715         }
7716         dev_flow->dv.tag_resource = cache_resource;
7717         DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
7718                 (void *)cache_resource,
7719                 rte_atomic32_read(&cache_resource->refcnt));
7720         return 0;
7721 }
7722
7723 /**
7724  * Release the tag.
7725  *
7726  * @param dev
7727  *   Pointer to Ethernet device.
7728  * @param tag_idx
7729  *   Tag index.
7730  *
7731  * @return
7732  *   1 while a reference on it exists, 0 when freed.
7733  */
7734 static int
7735 flow_dv_tag_release(struct rte_eth_dev *dev,
7736                     uint32_t tag_idx)
7737 {
7738         struct mlx5_priv *priv = dev->data->dev_private;
7739         struct mlx5_dev_ctx_shared *sh = priv->sh;
7740         struct mlx5_flow_dv_tag_resource *tag;
7741
7742         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
7743         if (!tag)
7744                 return 0;
7745         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
7746                 dev->data->port_id, (void *)tag,
7747                 rte_atomic32_read(&tag->refcnt));
7748         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
7749                 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
7750                 mlx5_hlist_remove(sh->tag_table, &tag->entry);
7751                 DRV_LOG(DEBUG, "port %u tag %p: removed",
7752                         dev->data->port_id, (void *)tag);
7753                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
7754                 return 0;
7755         }
7756         return 1;
7757 }
7758
7759 /**
7760  * Translate port ID action to vport.
7761  *
7762  * @param[in] dev
7763  *   Pointer to rte_eth_dev structure.
7764  * @param[in] action
7765  *   Pointer to the port ID action.
7766  * @param[out] dst_port_id
7767  *   The target port ID.
7768  * @param[out] error
7769  *   Pointer to the error structure.
7770  *
7771  * @return
7772  *   0 on success, a negative errno value otherwise and rte_errno is set.
7773  */
7774 static int
7775 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
7776                                  const struct rte_flow_action *action,
7777                                  uint32_t *dst_port_id,
7778                                  struct rte_flow_error *error)
7779 {
7780         uint32_t port;
7781         struct mlx5_priv *priv;
7782         const struct rte_flow_action_port_id *conf =
7783                         (const struct rte_flow_action_port_id *)action->conf;
7784
7785         port = conf->original ? dev->data->port_id : conf->id;
7786         priv = mlx5_port_to_eswitch_info(port, false);
7787         if (!priv)
7788                 return rte_flow_error_set(error, -rte_errno,
7789                                           RTE_FLOW_ERROR_TYPE_ACTION,
7790                                           NULL,
7791                                           "No eswitch info was found for port");
7792 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
7793         /*
7794          * This parameter is transferred to
7795          * mlx5dv_dr_action_create_dest_ib_port().
7796          */
7797         *dst_port_id = priv->dev_port;
7798 #else
7799         /*
7800          * Legacy mode, no LAG configurations is supported.
7801          * This parameter is transferred to
7802          * mlx5dv_dr_action_create_dest_vport().
7803          */
7804         *dst_port_id = priv->vport_id;
7805 #endif
7806         return 0;
7807 }
7808
7809 /**
7810  * Create a counter with aging configuration.
7811  *
7812  * @param[in] dev
7813  *   Pointer to rte_eth_dev structure.
7814  * @param[out] count
7815  *   Pointer to the counter action configuration.
7816  * @param[in] age
7817  *   Pointer to the aging action configuration.
7818  *
7819  * @return
7820  *   Index to flow counter on success, 0 otherwise.
7821  */
7822 static uint32_t
7823 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
7824                                 struct mlx5_flow *dev_flow,
7825                                 const struct rte_flow_action_count *count,
7826                                 const struct rte_flow_action_age *age)
7827 {
7828         uint32_t counter;
7829         struct mlx5_age_param *age_param;
7830
7831         counter = flow_dv_counter_alloc(dev,
7832                                 count ? count->shared : 0,
7833                                 count ? count->id : 0,
7834                                 dev_flow->dv.group, !!age);
7835         if (!counter || age == NULL)
7836                 return counter;
7837         age_param  = flow_dv_counter_idx_get_age(dev, counter);
7838         /*
7839          * The counter age accuracy may have a bit delay. Have 3/4
7840          * second bias on the timeount in order to let it age in time.
7841          */
7842         age_param->context = age->context ? age->context :
7843                 (void *)(uintptr_t)(dev_flow->flow_idx);
7844         /*
7845          * The counter age accuracy may have a bit delay. Have 3/4
7846          * second bias on the timeount in order to let it age in time.
7847          */
7848         age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY;
7849         /* Set expire time in unit of 0.1 sec. */
7850         age_param->port_id = dev->data->port_id;
7851         age_param->expire = age_param->timeout +
7852                         rte_rdtsc() / (rte_get_tsc_hz() / 10);
7853         rte_atomic16_set(&age_param->state, AGE_CANDIDATE);
7854         return counter;
7855 }
7856 /**
7857  * Add Tx queue matcher
7858  *
7859  * @param[in] dev
7860  *   Pointer to the dev struct.
7861  * @param[in, out] matcher
7862  *   Flow matcher.
7863  * @param[in, out] key
7864  *   Flow matcher value.
7865  * @param[in] item
7866  *   Flow pattern to translate.
7867  * @param[in] inner
7868  *   Item is inner pattern.
7869  */
7870 static void
7871 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
7872                                 void *matcher, void *key,
7873                                 const struct rte_flow_item *item)
7874 {
7875         const struct mlx5_rte_flow_item_tx_queue *queue_m;
7876         const struct mlx5_rte_flow_item_tx_queue *queue_v;
7877         void *misc_m =
7878                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7879         void *misc_v =
7880                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7881         struct mlx5_txq_ctrl *txq;
7882         uint32_t queue;
7883
7884
7885         queue_m = (const void *)item->mask;
7886         if (!queue_m)
7887                 return;
7888         queue_v = (const void *)item->spec;
7889         if (!queue_v)
7890                 return;
7891         txq = mlx5_txq_get(dev, queue_v->queue);
7892         if (!txq)
7893                 return;
7894         queue = txq->obj->sq->id;
7895         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
7896         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
7897                  queue & queue_m->queue);
7898         mlx5_txq_release(dev, queue_v->queue);
7899 }
7900
7901 /**
7902  * Set the hash fields according to the @p flow information.
7903  *
7904  * @param[in] dev_flow
7905  *   Pointer to the mlx5_flow.
7906  * @param[in] rss_desc
7907  *   Pointer to the mlx5_flow_rss_desc.
7908  */
7909 static void
7910 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
7911                        struct mlx5_flow_rss_desc *rss_desc)
7912 {
7913         uint64_t items = dev_flow->handle->layers;
7914         int rss_inner = 0;
7915         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
7916
7917         dev_flow->hash_fields = 0;
7918 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
7919         if (rss_desc->level >= 2) {
7920                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
7921                 rss_inner = 1;
7922         }
7923 #endif
7924         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
7925             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
7926                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
7927                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
7928                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
7929                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
7930                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
7931                         else
7932                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
7933                 }
7934         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
7935                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
7936                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
7937                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
7938                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
7939                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
7940                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
7941                         else
7942                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
7943                 }
7944         }
7945         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
7946             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
7947                 if (rss_types & ETH_RSS_UDP) {
7948                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
7949                                 dev_flow->hash_fields |=
7950                                                 IBV_RX_HASH_SRC_PORT_UDP;
7951                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
7952                                 dev_flow->hash_fields |=
7953                                                 IBV_RX_HASH_DST_PORT_UDP;
7954                         else
7955                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
7956                 }
7957         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
7958                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
7959                 if (rss_types & ETH_RSS_TCP) {
7960                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
7961                                 dev_flow->hash_fields |=
7962                                                 IBV_RX_HASH_SRC_PORT_TCP;
7963                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
7964                                 dev_flow->hash_fields |=
7965                                                 IBV_RX_HASH_DST_PORT_TCP;
7966                         else
7967                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
7968                 }
7969         }
7970 }
7971
7972 /**
7973  * Fill the flow with DV spec, lock free
7974  * (mutex should be acquired by caller).
7975  *
7976  * @param[in] dev
7977  *   Pointer to rte_eth_dev structure.
7978  * @param[in, out] dev_flow
7979  *   Pointer to the sub flow.
7980  * @param[in] attr
7981  *   Pointer to the flow attributes.
7982  * @param[in] items
7983  *   Pointer to the list of items.
7984  * @param[in] actions
7985  *   Pointer to the list of actions.
7986  * @param[out] error
7987  *   Pointer to the error structure.
7988  *
7989  * @return
7990  *   0 on success, a negative errno value otherwise and rte_errno is set.
7991  */
7992 static int
7993 __flow_dv_translate(struct rte_eth_dev *dev,
7994                     struct mlx5_flow *dev_flow,
7995                     const struct rte_flow_attr *attr,
7996                     const struct rte_flow_item items[],
7997                     const struct rte_flow_action actions[],
7998                     struct rte_flow_error *error)
7999 {
8000         struct mlx5_priv *priv = dev->data->dev_private;
8001         struct mlx5_dev_config *dev_conf = &priv->config;
8002         struct rte_flow *flow = dev_flow->flow;
8003         struct mlx5_flow_handle *handle = dev_flow->handle;
8004         struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
8005                                               priv->rss_desc)
8006                                               [!!priv->flow_nested_idx];
8007         uint64_t item_flags = 0;
8008         uint64_t last_item = 0;
8009         uint64_t action_flags = 0;
8010         uint64_t priority = attr->priority;
8011         struct mlx5_flow_dv_matcher matcher = {
8012                 .mask = {
8013                         .size = sizeof(matcher.mask.buf) -
8014                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
8015                 },
8016         };
8017         int actions_n = 0;
8018         bool actions_end = false;
8019         union {
8020                 struct mlx5_flow_dv_modify_hdr_resource res;
8021                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
8022                             sizeof(struct mlx5_modification_cmd) *
8023                             (MLX5_MAX_MODIFY_NUM + 1)];
8024         } mhdr_dummy;
8025         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
8026         const struct rte_flow_action_count *count = NULL;
8027         const struct rte_flow_action_age *age = NULL;
8028         union flow_dv_attr flow_attr = { .attr = 0 };
8029         uint32_t tag_be;
8030         union mlx5_flow_tbl_key tbl_key;
8031         uint32_t modify_action_position = UINT32_MAX;
8032         void *match_mask = matcher.mask.buf;
8033         void *match_value = dev_flow->dv.value.buf;
8034         uint8_t next_protocol = 0xff;
8035         struct rte_vlan_hdr vlan = { 0 };
8036         uint32_t table;
8037         int ret = 0;
8038
8039         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
8040                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
8041         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
8042                                        !!priv->fdb_def_rule, &table, error);
8043         if (ret)
8044                 return ret;
8045         dev_flow->dv.group = table;
8046         if (attr->transfer)
8047                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
8048         if (priority == MLX5_FLOW_PRIO_RSVD)
8049                 priority = dev_conf->flow_prio - 1;
8050         /* number of actions must be set to 0 in case of dirty stack. */
8051         mhdr_res->actions_num = 0;
8052         for (; !actions_end ; actions++) {
8053                 const struct rte_flow_action_queue *queue;
8054                 const struct rte_flow_action_rss *rss;
8055                 const struct rte_flow_action *action = actions;
8056                 const uint8_t *rss_key;
8057                 const struct rte_flow_action_jump *jump_data;
8058                 const struct rte_flow_action_meter *mtr;
8059                 struct mlx5_flow_tbl_resource *tbl;
8060                 uint32_t port_id = 0;
8061                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
8062                 int action_type = actions->type;
8063                 const struct rte_flow_action *found_action = NULL;
8064                 struct mlx5_flow_meter *fm = NULL;
8065
8066                 if (!mlx5_flow_os_action_supported(action_type))
8067                         return rte_flow_error_set(error, ENOTSUP,
8068                                                   RTE_FLOW_ERROR_TYPE_ACTION,
8069                                                   actions,
8070                                                   "action not supported");
8071                 switch (action_type) {
8072                 case RTE_FLOW_ACTION_TYPE_VOID:
8073                         break;
8074                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
8075                         if (flow_dv_translate_action_port_id(dev, action,
8076                                                              &port_id, error))
8077                                 return -rte_errno;
8078                         port_id_resource.port_id = port_id;
8079                         MLX5_ASSERT(!handle->rix_port_id_action);
8080                         if (flow_dv_port_id_action_resource_register
8081                             (dev, &port_id_resource, dev_flow, error))
8082                                 return -rte_errno;
8083                         dev_flow->dv.actions[actions_n++] =
8084                                         dev_flow->dv.port_id_action->action;
8085                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
8086                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
8087                         break;
8088                 case RTE_FLOW_ACTION_TYPE_FLAG:
8089                         action_flags |= MLX5_FLOW_ACTION_FLAG;
8090                         dev_flow->handle->mark = 1;
8091                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
8092                                 struct rte_flow_action_mark mark = {
8093                                         .id = MLX5_FLOW_MARK_DEFAULT,
8094                                 };
8095
8096                                 if (flow_dv_convert_action_mark(dev, &mark,
8097                                                                 mhdr_res,
8098                                                                 error))
8099                                         return -rte_errno;
8100                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
8101                                 break;
8102                         }
8103                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
8104                         /*
8105                          * Only one FLAG or MARK is supported per device flow
8106                          * right now. So the pointer to the tag resource must be
8107                          * zero before the register process.
8108                          */
8109                         MLX5_ASSERT(!handle->dvh.rix_tag);
8110                         if (flow_dv_tag_resource_register(dev, tag_be,
8111                                                           dev_flow, error))
8112                                 return -rte_errno;
8113                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8114                         dev_flow->dv.actions[actions_n++] =
8115                                         dev_flow->dv.tag_resource->action;
8116                         break;
8117                 case RTE_FLOW_ACTION_TYPE_MARK:
8118                         action_flags |= MLX5_FLOW_ACTION_MARK;
8119                         dev_flow->handle->mark = 1;
8120                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
8121                                 const struct rte_flow_action_mark *mark =
8122                                         (const struct rte_flow_action_mark *)
8123                                                 actions->conf;
8124
8125                                 if (flow_dv_convert_action_mark(dev, mark,
8126                                                                 mhdr_res,
8127                                                                 error))
8128                                         return -rte_errno;
8129                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
8130                                 break;
8131                         }
8132                         /* Fall-through */
8133                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
8134                         /* Legacy (non-extensive) MARK action. */
8135                         tag_be = mlx5_flow_mark_set
8136                               (((const struct rte_flow_action_mark *)
8137                                (actions->conf))->id);
8138                         MLX5_ASSERT(!handle->dvh.rix_tag);
8139                         if (flow_dv_tag_resource_register(dev, tag_be,
8140                                                           dev_flow, error))
8141                                 return -rte_errno;
8142                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8143                         dev_flow->dv.actions[actions_n++] =
8144                                         dev_flow->dv.tag_resource->action;
8145                         break;
8146                 case RTE_FLOW_ACTION_TYPE_SET_META:
8147                         if (flow_dv_convert_action_set_meta
8148                                 (dev, mhdr_res, attr,
8149                                  (const struct rte_flow_action_set_meta *)
8150                                   actions->conf, error))
8151                                 return -rte_errno;
8152                         action_flags |= MLX5_FLOW_ACTION_SET_META;
8153                         break;
8154                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
8155                         if (flow_dv_convert_action_set_tag
8156                                 (dev, mhdr_res,
8157                                  (const struct rte_flow_action_set_tag *)
8158                                   actions->conf, error))
8159                                 return -rte_errno;
8160                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8161                         break;
8162                 case RTE_FLOW_ACTION_TYPE_DROP:
8163                         action_flags |= MLX5_FLOW_ACTION_DROP;
8164                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
8165                         break;
8166                 case RTE_FLOW_ACTION_TYPE_QUEUE:
8167                         queue = actions->conf;
8168                         rss_desc->queue_num = 1;
8169                         rss_desc->queue[0] = queue->index;
8170                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
8171                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
8172                         break;
8173                 case RTE_FLOW_ACTION_TYPE_RSS:
8174                         rss = actions->conf;
8175                         memcpy(rss_desc->queue, rss->queue,
8176                                rss->queue_num * sizeof(uint16_t));
8177                         rss_desc->queue_num = rss->queue_num;
8178                         /* NULL RSS key indicates default RSS key. */
8179                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
8180                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
8181                         /*
8182                          * rss->level and rss.types should be set in advance
8183                          * when expanding items for RSS.
8184                          */
8185                         action_flags |= MLX5_FLOW_ACTION_RSS;
8186                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
8187                         break;
8188                 case RTE_FLOW_ACTION_TYPE_AGE:
8189                 case RTE_FLOW_ACTION_TYPE_COUNT:
8190                         if (!dev_conf->devx) {
8191                                 return rte_flow_error_set
8192                                               (error, ENOTSUP,
8193                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8194                                                NULL,
8195                                                "count action not supported");
8196                         }
8197                         /* Save information first, will apply later. */
8198                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
8199                                 count = action->conf;
8200                         else
8201                                 age = action->conf;
8202                         action_flags |= MLX5_FLOW_ACTION_COUNT;
8203                         break;
8204                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
8205                         dev_flow->dv.actions[actions_n++] =
8206                                                 priv->sh->pop_vlan_action;
8207                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
8208                         break;
8209                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
8210                         if (!(action_flags &
8211                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
8212                                 flow_dev_get_vlan_info_from_items(items, &vlan);
8213                         vlan.eth_proto = rte_be_to_cpu_16
8214                              ((((const struct rte_flow_action_of_push_vlan *)
8215                                                    actions->conf)->ethertype));
8216                         found_action = mlx5_flow_find_action
8217                                         (actions + 1,
8218                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
8219                         if (found_action)
8220                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
8221                         found_action = mlx5_flow_find_action
8222                                         (actions + 1,
8223                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
8224                         if (found_action)
8225                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
8226                         if (flow_dv_create_action_push_vlan
8227                                             (dev, attr, &vlan, dev_flow, error))
8228                                 return -rte_errno;
8229                         dev_flow->dv.actions[actions_n++] =
8230                                         dev_flow->dv.push_vlan_res->action;
8231                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
8232                         break;
8233                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
8234                         /* of_vlan_push action handled this action */
8235                         MLX5_ASSERT(action_flags &
8236                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
8237                         break;
8238                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
8239                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8240                                 break;
8241                         flow_dev_get_vlan_info_from_items(items, &vlan);
8242                         mlx5_update_vlan_vid_pcp(actions, &vlan);
8243                         /* If no VLAN push - this is a modify header action */
8244                         if (flow_dv_convert_action_modify_vlan_vid
8245                                                 (mhdr_res, actions, error))
8246                                 return -rte_errno;
8247                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
8248                         break;
8249                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
8250                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
8251                         if (flow_dv_create_action_l2_encap(dev, actions,
8252                                                            dev_flow,
8253                                                            attr->transfer,
8254                                                            error))
8255                                 return -rte_errno;
8256                         dev_flow->dv.actions[actions_n++] =
8257                                         dev_flow->dv.encap_decap->action;
8258                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
8259                         break;
8260                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
8261                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
8262                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
8263                                                            attr->transfer,
8264                                                            error))
8265                                 return -rte_errno;
8266                         dev_flow->dv.actions[actions_n++] =
8267                                         dev_flow->dv.encap_decap->action;
8268                         action_flags |= MLX5_FLOW_ACTION_DECAP;
8269                         break;
8270                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
8271                         /* Handle encap with preceding decap. */
8272                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
8273                                 if (flow_dv_create_action_raw_encap
8274                                         (dev, actions, dev_flow, attr, error))
8275                                         return -rte_errno;
8276                                 dev_flow->dv.actions[actions_n++] =
8277                                         dev_flow->dv.encap_decap->action;
8278                         } else {
8279                                 /* Handle encap without preceding decap. */
8280                                 if (flow_dv_create_action_l2_encap
8281                                     (dev, actions, dev_flow, attr->transfer,
8282                                      error))
8283                                         return -rte_errno;
8284                                 dev_flow->dv.actions[actions_n++] =
8285                                         dev_flow->dv.encap_decap->action;
8286                         }
8287                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
8288                         break;
8289                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
8290                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
8291                                 ;
8292                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
8293                                 if (flow_dv_create_action_l2_decap
8294                                     (dev, dev_flow, attr->transfer, error))
8295                                         return -rte_errno;
8296                                 dev_flow->dv.actions[actions_n++] =
8297                                         dev_flow->dv.encap_decap->action;
8298                         }
8299                         /* If decap is followed by encap, handle it at encap. */
8300                         action_flags |= MLX5_FLOW_ACTION_DECAP;
8301                         break;
8302                 case RTE_FLOW_ACTION_TYPE_JUMP:
8303                         jump_data = action->conf;
8304                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
8305                                                        jump_data->group,
8306                                                        !!priv->fdb_def_rule,
8307                                                        &table, error);
8308                         if (ret)
8309                                 return ret;
8310                         tbl = flow_dv_tbl_resource_get(dev, table,
8311                                                        attr->egress,
8312                                                        attr->transfer, error);
8313                         if (!tbl)
8314                                 return rte_flow_error_set
8315                                                 (error, errno,
8316                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8317                                                  NULL,
8318                                                  "cannot create jump action.");
8319                         if (flow_dv_jump_tbl_resource_register
8320                             (dev, tbl, dev_flow, error)) {
8321                                 flow_dv_tbl_resource_release(dev, tbl);
8322                                 return rte_flow_error_set
8323                                                 (error, errno,
8324                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8325                                                  NULL,
8326                                                  "cannot create jump action.");
8327                         }
8328                         dev_flow->dv.actions[actions_n++] =
8329                                         dev_flow->dv.jump->action;
8330                         action_flags |= MLX5_FLOW_ACTION_JUMP;
8331                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
8332                         break;
8333                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
8334                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
8335                         if (flow_dv_convert_action_modify_mac
8336                                         (mhdr_res, actions, error))
8337                                 return -rte_errno;
8338                         action_flags |= actions->type ==
8339                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
8340                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
8341                                         MLX5_FLOW_ACTION_SET_MAC_DST;
8342                         break;
8343                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
8344                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
8345                         if (flow_dv_convert_action_modify_ipv4
8346                                         (mhdr_res, actions, error))
8347                                 return -rte_errno;
8348                         action_flags |= actions->type ==
8349                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
8350                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
8351                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
8352                         break;
8353                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
8354                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
8355                         if (flow_dv_convert_action_modify_ipv6
8356                                         (mhdr_res, actions, error))
8357                                 return -rte_errno;
8358                         action_flags |= actions->type ==
8359                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
8360                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
8361                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
8362                         break;
8363                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
8364                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
8365                         if (flow_dv_convert_action_modify_tp
8366                                         (mhdr_res, actions, items,
8367                                          &flow_attr, dev_flow, !!(action_flags &
8368                                          MLX5_FLOW_ACTION_DECAP), error))
8369                                 return -rte_errno;
8370                         action_flags |= actions->type ==
8371                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
8372                                         MLX5_FLOW_ACTION_SET_TP_SRC :
8373                                         MLX5_FLOW_ACTION_SET_TP_DST;
8374                         break;
8375                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
8376                         if (flow_dv_convert_action_modify_dec_ttl
8377                                         (mhdr_res, items, &flow_attr, dev_flow,
8378                                          !!(action_flags &
8379                                          MLX5_FLOW_ACTION_DECAP), error))
8380                                 return -rte_errno;
8381                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
8382                         break;
8383                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
8384                         if (flow_dv_convert_action_modify_ttl
8385                                         (mhdr_res, actions, items, &flow_attr,
8386                                          dev_flow, !!(action_flags &
8387                                          MLX5_FLOW_ACTION_DECAP), error))
8388                                 return -rte_errno;
8389                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
8390                         break;
8391                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
8392                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
8393                         if (flow_dv_convert_action_modify_tcp_seq
8394                                         (mhdr_res, actions, error))
8395                                 return -rte_errno;
8396                         action_flags |= actions->type ==
8397                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
8398                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
8399                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
8400                         break;
8401
8402                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
8403                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
8404                         if (flow_dv_convert_action_modify_tcp_ack
8405                                         (mhdr_res, actions, error))
8406                                 return -rte_errno;
8407                         action_flags |= actions->type ==
8408                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
8409                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
8410                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
8411                         break;
8412                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
8413                         if (flow_dv_convert_action_set_reg
8414                                         (mhdr_res, actions, error))
8415                                 return -rte_errno;
8416                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8417                         break;
8418                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
8419                         if (flow_dv_convert_action_copy_mreg
8420                                         (dev, mhdr_res, actions, error))
8421                                 return -rte_errno;
8422                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8423                         break;
8424                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
8425                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
8426                         dev_flow->handle->fate_action =
8427                                         MLX5_FLOW_FATE_DEFAULT_MISS;
8428                         break;
8429                 case RTE_FLOW_ACTION_TYPE_METER:
8430                         mtr = actions->conf;
8431                         if (!flow->meter) {
8432                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
8433                                                             attr, error);
8434                                 if (!fm)
8435                                         return rte_flow_error_set(error,
8436                                                 rte_errno,
8437                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8438                                                 NULL,
8439                                                 "meter not found "
8440                                                 "or invalid parameters");
8441                                 flow->meter = fm->idx;
8442                         }
8443                         /* Set the meter action. */
8444                         if (!fm) {
8445                                 fm = mlx5_ipool_get(priv->sh->ipool
8446                                                 [MLX5_IPOOL_MTR], flow->meter);
8447                                 if (!fm)
8448                                         return rte_flow_error_set(error,
8449                                                 rte_errno,
8450                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8451                                                 NULL,
8452                                                 "meter not found "
8453                                                 "or invalid parameters");
8454                         }
8455                         dev_flow->dv.actions[actions_n++] =
8456                                 fm->mfts->meter_action;
8457                         action_flags |= MLX5_FLOW_ACTION_METER;
8458                         break;
8459                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
8460                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
8461                                                               actions, error))
8462                                 return -rte_errno;
8463                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
8464                         break;
8465                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
8466                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
8467                                                               actions, error))
8468                                 return -rte_errno;
8469                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
8470                         break;
8471                 case RTE_FLOW_ACTION_TYPE_END:
8472                         actions_end = true;
8473                         if (mhdr_res->actions_num) {
8474                                 /* create modify action if needed. */
8475                                 if (flow_dv_modify_hdr_resource_register
8476                                         (dev, mhdr_res, dev_flow, error))
8477                                         return -rte_errno;
8478                                 dev_flow->dv.actions[modify_action_position] =
8479                                         handle->dvh.modify_hdr->action;
8480                         }
8481                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
8482                                 flow->counter =
8483                                         flow_dv_translate_create_counter(dev,
8484                                                 dev_flow, count, age);
8485
8486                                 if (!flow->counter)
8487                                         return rte_flow_error_set
8488                                                 (error, rte_errno,
8489                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8490                                                 NULL,
8491                                                 "cannot create counter"
8492                                                 " object.");
8493                                 dev_flow->dv.actions[actions_n++] =
8494                                           (flow_dv_counter_get_by_idx(dev,
8495                                           flow->counter, NULL))->action;
8496                         }
8497                         break;
8498                 default:
8499                         break;
8500                 }
8501                 if (mhdr_res->actions_num &&
8502                     modify_action_position == UINT32_MAX)
8503                         modify_action_position = actions_n++;
8504         }
8505         dev_flow->dv.actions_n = actions_n;
8506         dev_flow->act_flags = action_flags;
8507         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
8508                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
8509                 int item_type = items->type;
8510
8511                 if (!mlx5_flow_os_item_supported(item_type))
8512                         return rte_flow_error_set(error, ENOTSUP,
8513                                                   RTE_FLOW_ERROR_TYPE_ITEM,
8514                                                   NULL, "item not supported");
8515                 switch (item_type) {
8516                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
8517                         flow_dv_translate_item_port_id(dev, match_mask,
8518                                                        match_value, items);
8519                         last_item = MLX5_FLOW_ITEM_PORT_ID;
8520                         break;
8521                 case RTE_FLOW_ITEM_TYPE_ETH:
8522                         flow_dv_translate_item_eth(match_mask, match_value,
8523                                                    items, tunnel,
8524                                                    dev_flow->dv.group);
8525                         matcher.priority = action_flags &
8526                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
8527                                         !dev_flow->external ?
8528                                         MLX5_PRIORITY_MAP_L3 :
8529                                         MLX5_PRIORITY_MAP_L2;
8530                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
8531                                              MLX5_FLOW_LAYER_OUTER_L2;
8532                         break;
8533                 case RTE_FLOW_ITEM_TYPE_VLAN:
8534                         flow_dv_translate_item_vlan(dev_flow,
8535                                                     match_mask, match_value,
8536                                                     items, tunnel,
8537                                                     dev_flow->dv.group);
8538                         matcher.priority = MLX5_PRIORITY_MAP_L2;
8539                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
8540                                               MLX5_FLOW_LAYER_INNER_VLAN) :
8541                                              (MLX5_FLOW_LAYER_OUTER_L2 |
8542                                               MLX5_FLOW_LAYER_OUTER_VLAN);
8543                         break;
8544                 case RTE_FLOW_ITEM_TYPE_IPV4:
8545                         mlx5_flow_tunnel_ip_check(items, next_protocol,
8546                                                   &item_flags, &tunnel);
8547                         flow_dv_translate_item_ipv4(match_mask, match_value,
8548                                                     items, item_flags, tunnel,
8549                                                     dev_flow->dv.group);
8550                         matcher.priority = MLX5_PRIORITY_MAP_L3;
8551                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
8552                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
8553                         if (items->mask != NULL &&
8554                             ((const struct rte_flow_item_ipv4 *)
8555                              items->mask)->hdr.next_proto_id) {
8556                                 next_protocol =
8557                                         ((const struct rte_flow_item_ipv4 *)
8558                                          (items->spec))->hdr.next_proto_id;
8559                                 next_protocol &=
8560                                         ((const struct rte_flow_item_ipv4 *)
8561                                          (items->mask))->hdr.next_proto_id;
8562                         } else {
8563                                 /* Reset for inner layer. */
8564                                 next_protocol = 0xff;
8565                         }
8566                         break;
8567                 case RTE_FLOW_ITEM_TYPE_IPV6:
8568                         mlx5_flow_tunnel_ip_check(items, next_protocol,
8569                                                   &item_flags, &tunnel);
8570                         flow_dv_translate_item_ipv6(match_mask, match_value,
8571                                                     items, item_flags, tunnel,
8572                                                     dev_flow->dv.group);
8573                         matcher.priority = MLX5_PRIORITY_MAP_L3;
8574                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
8575                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
8576                         if (items->mask != NULL &&
8577                             ((const struct rte_flow_item_ipv6 *)
8578                              items->mask)->hdr.proto) {
8579                                 next_protocol =
8580                                         ((const struct rte_flow_item_ipv6 *)
8581                                          items->spec)->hdr.proto;
8582                                 next_protocol &=
8583                                         ((const struct rte_flow_item_ipv6 *)
8584                                          items->mask)->hdr.proto;
8585                         } else {
8586                                 /* Reset for inner layer. */
8587                                 next_protocol = 0xff;
8588                         }
8589                         break;
8590                 case RTE_FLOW_ITEM_TYPE_TCP:
8591                         flow_dv_translate_item_tcp(match_mask, match_value,
8592                                                    items, tunnel);
8593                         matcher.priority = MLX5_PRIORITY_MAP_L4;
8594                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
8595                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
8596                         break;
8597                 case RTE_FLOW_ITEM_TYPE_UDP:
8598                         flow_dv_translate_item_udp(match_mask, match_value,
8599                                                    items, tunnel);
8600                         matcher.priority = MLX5_PRIORITY_MAP_L4;
8601                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
8602                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
8603                         break;
8604                 case RTE_FLOW_ITEM_TYPE_GRE:
8605                         flow_dv_translate_item_gre(match_mask, match_value,
8606                                                    items, tunnel);
8607                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8608                         last_item = MLX5_FLOW_LAYER_GRE;
8609                         break;
8610                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
8611                         flow_dv_translate_item_gre_key(match_mask,
8612                                                        match_value, items);
8613                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
8614                         break;
8615                 case RTE_FLOW_ITEM_TYPE_NVGRE:
8616                         flow_dv_translate_item_nvgre(match_mask, match_value,
8617                                                      items, tunnel);
8618                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8619                         last_item = MLX5_FLOW_LAYER_GRE;
8620                         break;
8621                 case RTE_FLOW_ITEM_TYPE_VXLAN:
8622                         flow_dv_translate_item_vxlan(match_mask, match_value,
8623                                                      items, tunnel);
8624                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8625                         last_item = MLX5_FLOW_LAYER_VXLAN;
8626                         break;
8627                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
8628                         flow_dv_translate_item_vxlan_gpe(match_mask,
8629                                                          match_value, items,
8630                                                          tunnel);
8631                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8632                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
8633                         break;
8634                 case RTE_FLOW_ITEM_TYPE_GENEVE:
8635                         flow_dv_translate_item_geneve(match_mask, match_value,
8636                                                       items, tunnel);
8637                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8638                         last_item = MLX5_FLOW_LAYER_GENEVE;
8639                         break;
8640                 case RTE_FLOW_ITEM_TYPE_MPLS:
8641                         flow_dv_translate_item_mpls(match_mask, match_value,
8642                                                     items, last_item, tunnel);
8643                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8644                         last_item = MLX5_FLOW_LAYER_MPLS;
8645                         break;
8646                 case RTE_FLOW_ITEM_TYPE_MARK:
8647                         flow_dv_translate_item_mark(dev, match_mask,
8648                                                     match_value, items);
8649                         last_item = MLX5_FLOW_ITEM_MARK;
8650                         break;
8651                 case RTE_FLOW_ITEM_TYPE_META:
8652                         flow_dv_translate_item_meta(dev, match_mask,
8653                                                     match_value, attr, items);
8654                         last_item = MLX5_FLOW_ITEM_METADATA;
8655                         break;
8656                 case RTE_FLOW_ITEM_TYPE_ICMP:
8657                         flow_dv_translate_item_icmp(match_mask, match_value,
8658                                                     items, tunnel);
8659                         last_item = MLX5_FLOW_LAYER_ICMP;
8660                         break;
8661                 case RTE_FLOW_ITEM_TYPE_ICMP6:
8662                         flow_dv_translate_item_icmp6(match_mask, match_value,
8663                                                       items, tunnel);
8664                         last_item = MLX5_FLOW_LAYER_ICMP6;
8665                         break;
8666                 case RTE_FLOW_ITEM_TYPE_TAG:
8667                         flow_dv_translate_item_tag(dev, match_mask,
8668                                                    match_value, items);
8669                         last_item = MLX5_FLOW_ITEM_TAG;
8670                         break;
8671                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
8672                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
8673                                                         match_value, items);
8674                         last_item = MLX5_FLOW_ITEM_TAG;
8675                         break;
8676                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
8677                         flow_dv_translate_item_tx_queue(dev, match_mask,
8678                                                         match_value,
8679                                                         items);
8680                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
8681                         break;
8682                 case RTE_FLOW_ITEM_TYPE_GTP:
8683                         flow_dv_translate_item_gtp(match_mask, match_value,
8684                                                    items, tunnel);
8685                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8686                         last_item = MLX5_FLOW_LAYER_GTP;
8687                         break;
8688                 case RTE_FLOW_ITEM_TYPE_ECPRI:
8689                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
8690                                 /* Create it only the first time to be used. */
8691                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
8692                                 if (ret)
8693                                         return rte_flow_error_set
8694                                                 (error, -ret,
8695                                                 RTE_FLOW_ERROR_TYPE_ITEM,
8696                                                 NULL,
8697                                                 "cannot create eCPRI parser");
8698                         }
8699                         /* Adjust the length matcher and device flow value. */
8700                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
8701                         dev_flow->dv.value.size =
8702                                         MLX5_ST_SZ_BYTES(fte_match_param);
8703                         flow_dv_translate_item_ecpri(dev, match_mask,
8704                                                      match_value, items);
8705                         /* No other protocol should follow eCPRI layer. */
8706                         last_item = MLX5_FLOW_LAYER_ECPRI;
8707                         break;
8708                 default:
8709                         break;
8710                 }
8711                 item_flags |= last_item;
8712         }
8713         /*
8714          * When E-Switch mode is enabled, we have two cases where we need to
8715          * set the source port manually.
8716          * The first one, is in case of Nic steering rule, and the second is
8717          * E-Switch rule where no port_id item was found. In both cases
8718          * the source port is set according the current port in use.
8719          */
8720         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
8721             (priv->representor || priv->master)) {
8722                 if (flow_dv_translate_item_port_id(dev, match_mask,
8723                                                    match_value, NULL))
8724                         return -rte_errno;
8725         }
8726 #ifdef RTE_LIBRTE_MLX5_DEBUG
8727         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
8728                                               dev_flow->dv.value.buf));
8729 #endif
8730         /*
8731          * Layers may be already initialized from prefix flow if this dev_flow
8732          * is the suffix flow.
8733          */
8734         handle->layers |= item_flags;
8735         if (action_flags & MLX5_FLOW_ACTION_RSS)
8736                 flow_dv_hashfields_set(dev_flow, rss_desc);
8737         /* Register matcher. */
8738         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
8739                                     matcher.mask.size);
8740         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
8741                                                      matcher.priority);
8742         /* reserved field no needs to be set to 0 here. */
8743         tbl_key.domain = attr->transfer;
8744         tbl_key.direction = attr->egress;
8745         tbl_key.table_id = dev_flow->dv.group;
8746         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
8747                 return -rte_errno;
8748         return 0;
8749 }
8750
8751 /**
8752  * Apply the flow to the NIC, lock free,
8753  * (mutex should be acquired by caller).
8754  *
8755  * @param[in] dev
8756  *   Pointer to the Ethernet device structure.
8757  * @param[in, out] flow
8758  *   Pointer to flow structure.
8759  * @param[out] error
8760  *   Pointer to error structure.
8761  *
8762  * @return
8763  *   0 on success, a negative errno value otherwise and rte_errno is set.
8764  */
8765 static int
8766 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
8767                 struct rte_flow_error *error)
8768 {
8769         struct mlx5_flow_dv_workspace *dv;
8770         struct mlx5_flow_handle *dh;
8771         struct mlx5_flow_handle_dv *dv_h;
8772         struct mlx5_flow *dev_flow;
8773         struct mlx5_priv *priv = dev->data->dev_private;
8774         uint32_t handle_idx;
8775         int n;
8776         int err;
8777         int idx;
8778
8779         for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
8780                 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
8781                 dv = &dev_flow->dv;
8782                 dh = dev_flow->handle;
8783                 dv_h = &dh->dvh;
8784                 n = dv->actions_n;
8785                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
8786                         if (dv->transfer) {
8787                                 dv->actions[n++] = priv->sh->esw_drop_action;
8788                         } else {
8789                                 struct mlx5_hrxq *drop_hrxq;
8790                                 drop_hrxq = mlx5_hrxq_drop_new(dev);
8791                                 if (!drop_hrxq) {
8792                                         rte_flow_error_set
8793                                                 (error, errno,
8794                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8795                                                  NULL,
8796                                                  "cannot get drop hash queue");
8797                                         goto error;
8798                                 }
8799                                 /*
8800                                  * Drop queues will be released by the specify
8801                                  * mlx5_hrxq_drop_release() function. Assign
8802                                  * the special index to hrxq to mark the queue
8803                                  * has been allocated.
8804                                  */
8805                                 dh->rix_hrxq = UINT32_MAX;
8806                                 dv->actions[n++] = drop_hrxq->action;
8807                         }
8808                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
8809                         struct mlx5_hrxq *hrxq;
8810                         uint32_t hrxq_idx;
8811                         struct mlx5_flow_rss_desc *rss_desc =
8812                                 &((struct mlx5_flow_rss_desc *)priv->rss_desc)
8813                                 [!!priv->flow_nested_idx];
8814
8815                         MLX5_ASSERT(rss_desc->queue_num);
8816                         hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
8817                                                  MLX5_RSS_HASH_KEY_LEN,
8818                                                  dev_flow->hash_fields,
8819                                                  rss_desc->queue,
8820                                                  rss_desc->queue_num);
8821                         if (!hrxq_idx) {
8822                                 hrxq_idx = mlx5_hrxq_new
8823                                                 (dev, rss_desc->key,
8824                                                 MLX5_RSS_HASH_KEY_LEN,
8825                                                 dev_flow->hash_fields,
8826                                                 rss_desc->queue,
8827                                                 rss_desc->queue_num,
8828                                                 !!(dh->layers &
8829                                                 MLX5_FLOW_LAYER_TUNNEL));
8830                         }
8831                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8832                                               hrxq_idx);
8833                         if (!hrxq) {
8834                                 rte_flow_error_set
8835                                         (error, rte_errno,
8836                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8837                                          "cannot get hash queue");
8838                                 goto error;
8839                         }
8840                         dh->rix_hrxq = hrxq_idx;
8841                         dv->actions[n++] = hrxq->action;
8842                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
8843                         if (flow_dv_default_miss_resource_register
8844                                         (dev, error)) {
8845                                 rte_flow_error_set
8846                                         (error, rte_errno,
8847                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8848                                          "cannot create default miss resource");
8849                                 goto error_default_miss;
8850                         }
8851                         dh->rix_default_fate =  MLX5_FLOW_FATE_DEFAULT_MISS;
8852                         dv->actions[n++] = priv->sh->default_miss.action;
8853                 }
8854                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
8855                                                (void *)&dv->value, n,
8856                                                dv->actions, &dh->drv_flow);
8857                 if (err) {
8858                         rte_flow_error_set(error, errno,
8859                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8860                                            NULL,
8861                                            "hardware refuses to create flow");
8862                         goto error;
8863                 }
8864                 if (priv->vmwa_context &&
8865                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
8866                         /*
8867                          * The rule contains the VLAN pattern.
8868                          * For VF we are going to create VLAN
8869                          * interface to make hypervisor set correct
8870                          * e-Switch vport context.
8871                          */
8872                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
8873                 }
8874         }
8875         return 0;
8876 error:
8877         if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
8878                 flow_dv_default_miss_resource_release(dev);
8879 error_default_miss:
8880         err = rte_errno; /* Save rte_errno before cleanup. */
8881         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
8882                        handle_idx, dh, next) {
8883                 /* hrxq is union, don't clear it if the flag is not set. */
8884                 if (dh->rix_hrxq) {
8885                         if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
8886                                 mlx5_hrxq_drop_release(dev);
8887                                 dh->rix_hrxq = 0;
8888                         } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
8889                                 mlx5_hrxq_release(dev, dh->rix_hrxq);
8890                                 dh->rix_hrxq = 0;
8891                         }
8892                 }
8893                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
8894                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
8895         }
8896         rte_errno = err; /* Restore rte_errno. */
8897         return -rte_errno;
8898 }
8899
8900 /**
8901  * Release the flow matcher.
8902  *
8903  * @param dev
8904  *   Pointer to Ethernet device.
8905  * @param handle
8906  *   Pointer to mlx5_flow_handle.
8907  *
8908  * @return
8909  *   1 while a reference on it exists, 0 when freed.
8910  */
8911 static int
8912 flow_dv_matcher_release(struct rte_eth_dev *dev,
8913                         struct mlx5_flow_handle *handle)
8914 {
8915         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
8916
8917         MLX5_ASSERT(matcher->matcher_object);
8918         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
8919                 dev->data->port_id, (void *)matcher,
8920                 rte_atomic32_read(&matcher->refcnt));
8921         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
8922                 claim_zero(mlx5_flow_os_destroy_flow_matcher
8923                            (matcher->matcher_object));
8924                 LIST_REMOVE(matcher, next);
8925                 /* table ref-- in release interface. */
8926                 flow_dv_tbl_resource_release(dev, matcher->tbl);
8927                 mlx5_free(matcher);
8928                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
8929                         dev->data->port_id, (void *)matcher);
8930                 return 0;
8931         }
8932         return 1;
8933 }
8934
8935 /**
8936  * Release an encap/decap resource.
8937  *
8938  * @param dev
8939  *   Pointer to Ethernet device.
8940  * @param handle
8941  *   Pointer to mlx5_flow_handle.
8942  *
8943  * @return
8944  *   1 while a reference on it exists, 0 when freed.
8945  */
8946 static int
8947 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
8948                                      struct mlx5_flow_handle *handle)
8949 {
8950         struct mlx5_priv *priv = dev->data->dev_private;
8951         uint32_t idx = handle->dvh.rix_encap_decap;
8952         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
8953
8954         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
8955                          idx);
8956         if (!cache_resource)
8957                 return 0;
8958         MLX5_ASSERT(cache_resource->action);
8959         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
8960                 (void *)cache_resource,
8961                 rte_atomic32_read(&cache_resource->refcnt));
8962         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8963                 claim_zero(mlx5_flow_os_destroy_flow_action
8964                                                 (cache_resource->action));
8965                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
8966                              &priv->sh->encaps_decaps, idx,
8967                              cache_resource, next);
8968                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
8969                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
8970                         (void *)cache_resource);
8971                 return 0;
8972         }
8973         return 1;
8974 }
8975
8976 /**
8977  * Release an jump to table action resource.
8978  *
8979  * @param dev
8980  *   Pointer to Ethernet device.
8981  * @param handle
8982  *   Pointer to mlx5_flow_handle.
8983  *
8984  * @return
8985  *   1 while a reference on it exists, 0 when freed.
8986  */
8987 static int
8988 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
8989                                   struct mlx5_flow_handle *handle)
8990 {
8991         struct mlx5_priv *priv = dev->data->dev_private;
8992         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
8993         struct mlx5_flow_tbl_data_entry *tbl_data;
8994
8995         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
8996                              handle->rix_jump);
8997         if (!tbl_data)
8998                 return 0;
8999         cache_resource = &tbl_data->jump;
9000         MLX5_ASSERT(cache_resource->action);
9001         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
9002                 (void *)cache_resource,
9003                 rte_atomic32_read(&cache_resource->refcnt));
9004         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9005                 claim_zero(mlx5_flow_os_destroy_flow_action
9006                                                 (cache_resource->action));
9007                 /* jump action memory free is inside the table release. */
9008                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
9009                 DRV_LOG(DEBUG, "jump table resource %p: removed",
9010                         (void *)cache_resource);
9011                 return 0;
9012         }
9013         return 1;
9014 }
9015
9016 /**
9017  * Release a default miss resource.
9018  *
9019  * @param dev
9020  *   Pointer to Ethernet device.
9021  * @return
9022  *   1 while a reference on it exists, 0 when freed.
9023  */
9024 static int
9025 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev)
9026 {
9027         struct mlx5_priv *priv = dev->data->dev_private;
9028         struct mlx5_dev_ctx_shared *sh = priv->sh;
9029         struct mlx5_flow_default_miss_resource *cache_resource =
9030                         &sh->default_miss;
9031
9032         MLX5_ASSERT(cache_resource->action);
9033         DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--",
9034                         (void *)cache_resource->action,
9035                         rte_atomic32_read(&cache_resource->refcnt));
9036         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9037                 claim_zero(mlx5_glue->destroy_flow_action
9038                                 (cache_resource->action));
9039                 DRV_LOG(DEBUG, "default miss resource %p: removed",
9040                                 (void *)cache_resource->action);
9041                 return 0;
9042         }
9043         return 1;
9044 }
9045
9046 /**
9047  * Release a modify-header resource.
9048  *
9049  * @param handle
9050  *   Pointer to mlx5_flow_handle.
9051  *
9052  * @return
9053  *   1 while a reference on it exists, 0 when freed.
9054  */
9055 static int
9056 flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
9057 {
9058         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
9059                                                         handle->dvh.modify_hdr;
9060
9061         MLX5_ASSERT(cache_resource->action);
9062         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
9063                 (void *)cache_resource,
9064                 rte_atomic32_read(&cache_resource->refcnt));
9065         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9066                 claim_zero(mlx5_flow_os_destroy_flow_action
9067                                                 (cache_resource->action));
9068                 LIST_REMOVE(cache_resource, next);
9069                 mlx5_free(cache_resource);
9070                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
9071                         (void *)cache_resource);
9072                 return 0;
9073         }
9074         return 1;
9075 }
9076
9077 /**
9078  * Release port ID action resource.
9079  *
9080  * @param dev
9081  *   Pointer to Ethernet device.
9082  * @param handle
9083  *   Pointer to mlx5_flow_handle.
9084  *
9085  * @return
9086  *   1 while a reference on it exists, 0 when freed.
9087  */
9088 static int
9089 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
9090                                         struct mlx5_flow_handle *handle)
9091 {
9092         struct mlx5_priv *priv = dev->data->dev_private;
9093         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
9094         uint32_t idx = handle->rix_port_id_action;
9095
9096         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
9097                                         idx);
9098         if (!cache_resource)
9099                 return 0;
9100         MLX5_ASSERT(cache_resource->action);
9101         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
9102                 (void *)cache_resource,
9103                 rte_atomic32_read(&cache_resource->refcnt));
9104         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9105                 claim_zero(mlx5_flow_os_destroy_flow_action
9106                                                 (cache_resource->action));
9107                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
9108                              &priv->sh->port_id_action_list, idx,
9109                              cache_resource, next);
9110                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx);
9111                 DRV_LOG(DEBUG, "port id action resource %p: removed",
9112                         (void *)cache_resource);
9113                 return 0;
9114         }
9115         return 1;
9116 }
9117
9118 /**
9119  * Release push vlan action resource.
9120  *
9121  * @param dev
9122  *   Pointer to Ethernet device.
9123  * @param handle
9124  *   Pointer to mlx5_flow_handle.
9125  *
9126  * @return
9127  *   1 while a reference on it exists, 0 when freed.
9128  */
9129 static int
9130 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
9131                                           struct mlx5_flow_handle *handle)
9132 {
9133         struct mlx5_priv *priv = dev->data->dev_private;
9134         uint32_t idx = handle->dvh.rix_push_vlan;
9135         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
9136
9137         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
9138                                         idx);
9139         if (!cache_resource)
9140                 return 0;
9141         MLX5_ASSERT(cache_resource->action);
9142         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
9143                 (void *)cache_resource,
9144                 rte_atomic32_read(&cache_resource->refcnt));
9145         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9146                 claim_zero(mlx5_flow_os_destroy_flow_action
9147                                                 (cache_resource->action));
9148                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
9149                              &priv->sh->push_vlan_action_list, idx,
9150                              cache_resource, next);
9151                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
9152                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
9153                         (void *)cache_resource);
9154                 return 0;
9155         }
9156         return 1;
9157 }
9158
9159 /**
9160  * Release the fate resource.
9161  *
9162  * @param dev
9163  *   Pointer to Ethernet device.
9164  * @param handle
9165  *   Pointer to mlx5_flow_handle.
9166  */
9167 static void
9168 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
9169                                struct mlx5_flow_handle *handle)
9170 {
9171         if (!handle->rix_fate)
9172                 return;
9173         switch (handle->fate_action) {
9174         case MLX5_FLOW_FATE_DROP:
9175                 mlx5_hrxq_drop_release(dev);
9176                 break;
9177         case MLX5_FLOW_FATE_QUEUE:
9178                 mlx5_hrxq_release(dev, handle->rix_hrxq);
9179                 break;
9180         case MLX5_FLOW_FATE_JUMP:
9181                 flow_dv_jump_tbl_resource_release(dev, handle);
9182                 break;
9183         case MLX5_FLOW_FATE_PORT_ID:
9184                 flow_dv_port_id_action_resource_release(dev, handle);
9185                 break;
9186         case MLX5_FLOW_FATE_DEFAULT_MISS:
9187                 flow_dv_default_miss_resource_release(dev);
9188                 break;
9189         default:
9190                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
9191                 break;
9192         }
9193         handle->rix_fate = 0;
9194 }
9195
9196 /**
9197  * Remove the flow from the NIC but keeps it in memory.
9198  * Lock free, (mutex should be acquired by caller).
9199  *
9200  * @param[in] dev
9201  *   Pointer to Ethernet device.
9202  * @param[in, out] flow
9203  *   Pointer to flow structure.
9204  */
9205 static void
9206 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
9207 {
9208         struct mlx5_flow_handle *dh;
9209         uint32_t handle_idx;
9210         struct mlx5_priv *priv = dev->data->dev_private;
9211
9212         if (!flow)
9213                 return;
9214         handle_idx = flow->dev_handles;
9215         while (handle_idx) {
9216                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9217                                     handle_idx);
9218                 if (!dh)
9219                         return;
9220                 if (dh->drv_flow) {
9221                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
9222                         dh->drv_flow = NULL;
9223                 }
9224                 if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
9225                     dh->fate_action == MLX5_FLOW_FATE_QUEUE ||
9226                     dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
9227                         flow_dv_fate_resource_release(dev, dh);
9228                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
9229                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
9230                 handle_idx = dh->next.next;
9231         }
9232 }
9233
9234 /**
9235  * Remove the flow from the NIC and the memory.
9236  * Lock free, (mutex should be acquired by caller).
9237  *
9238  * @param[in] dev
9239  *   Pointer to the Ethernet device structure.
9240  * @param[in, out] flow
9241  *   Pointer to flow structure.
9242  */
9243 static void
9244 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
9245 {
9246         struct mlx5_flow_handle *dev_handle;
9247         struct mlx5_priv *priv = dev->data->dev_private;
9248
9249         if (!flow)
9250                 return;
9251         __flow_dv_remove(dev, flow);
9252         if (flow->counter) {
9253                 flow_dv_counter_release(dev, flow->counter);
9254                 flow->counter = 0;
9255         }
9256         if (flow->meter) {
9257                 struct mlx5_flow_meter *fm;
9258
9259                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
9260                                     flow->meter);
9261                 if (fm)
9262                         mlx5_flow_meter_detach(fm);
9263                 flow->meter = 0;
9264         }
9265         while (flow->dev_handles) {
9266                 uint32_t tmp_idx = flow->dev_handles;
9267
9268                 dev_handle = mlx5_ipool_get(priv->sh->ipool
9269                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
9270                 if (!dev_handle)
9271                         return;
9272                 flow->dev_handles = dev_handle->next.next;
9273                 if (dev_handle->dvh.matcher)
9274                         flow_dv_matcher_release(dev, dev_handle);
9275                 if (dev_handle->dvh.rix_encap_decap)
9276                         flow_dv_encap_decap_resource_release(dev, dev_handle);
9277                 if (dev_handle->dvh.modify_hdr)
9278                         flow_dv_modify_hdr_resource_release(dev_handle);
9279                 if (dev_handle->dvh.rix_push_vlan)
9280                         flow_dv_push_vlan_action_resource_release(dev,
9281                                                                   dev_handle);
9282                 if (dev_handle->dvh.rix_tag)
9283                         flow_dv_tag_release(dev,
9284                                             dev_handle->dvh.rix_tag);
9285                 flow_dv_fate_resource_release(dev, dev_handle);
9286                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9287                            tmp_idx);
9288         }
9289 }
9290
9291 /**
9292  * Query a dv flow  rule for its statistics via devx.
9293  *
9294  * @param[in] dev
9295  *   Pointer to Ethernet device.
9296  * @param[in] flow
9297  *   Pointer to the sub flow.
9298  * @param[out] data
9299  *   data retrieved by the query.
9300  * @param[out] error
9301  *   Perform verbose error reporting if not NULL.
9302  *
9303  * @return
9304  *   0 on success, a negative errno value otherwise and rte_errno is set.
9305  */
9306 static int
9307 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
9308                     void *data, struct rte_flow_error *error)
9309 {
9310         struct mlx5_priv *priv = dev->data->dev_private;
9311         struct rte_flow_query_count *qc = data;
9312
9313         if (!priv->config.devx)
9314                 return rte_flow_error_set(error, ENOTSUP,
9315                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9316                                           NULL,
9317                                           "counters are not supported");
9318         if (flow->counter) {
9319                 uint64_t pkts, bytes;
9320                 struct mlx5_flow_counter *cnt;
9321
9322                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
9323                                                  NULL);
9324                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
9325                                                &bytes);
9326
9327                 if (err)
9328                         return rte_flow_error_set(error, -err,
9329                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9330                                         NULL, "cannot read counters");
9331                 qc->hits_set = 1;
9332                 qc->bytes_set = 1;
9333                 qc->hits = pkts - cnt->hits;
9334                 qc->bytes = bytes - cnt->bytes;
9335                 if (qc->reset) {
9336                         cnt->hits = pkts;
9337                         cnt->bytes = bytes;
9338                 }
9339                 return 0;
9340         }
9341         return rte_flow_error_set(error, EINVAL,
9342                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9343                                   NULL,
9344                                   "counters are not available");
9345 }
9346
9347 /**
9348  * Query a flow.
9349  *
9350  * @see rte_flow_query()
9351  * @see rte_flow_ops
9352  */
9353 static int
9354 flow_dv_query(struct rte_eth_dev *dev,
9355               struct rte_flow *flow __rte_unused,
9356               const struct rte_flow_action *actions __rte_unused,
9357               void *data __rte_unused,
9358               struct rte_flow_error *error __rte_unused)
9359 {
9360         int ret = -EINVAL;
9361
9362         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
9363                 switch (actions->type) {
9364                 case RTE_FLOW_ACTION_TYPE_VOID:
9365                         break;
9366                 case RTE_FLOW_ACTION_TYPE_COUNT:
9367                         ret = flow_dv_query_count(dev, flow, data, error);
9368                         break;
9369                 default:
9370                         return rte_flow_error_set(error, ENOTSUP,
9371                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9372                                                   actions,
9373                                                   "action not supported");
9374                 }
9375         }
9376         return ret;
9377 }
9378
9379 /**
9380  * Destroy the meter table set.
9381  * Lock free, (mutex should be acquired by caller).
9382  *
9383  * @param[in] dev
9384  *   Pointer to Ethernet device.
9385  * @param[in] tbl
9386  *   Pointer to the meter table set.
9387  *
9388  * @return
9389  *   Always 0.
9390  */
9391 static int
9392 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
9393                         struct mlx5_meter_domains_infos *tbl)
9394 {
9395         struct mlx5_priv *priv = dev->data->dev_private;
9396         struct mlx5_meter_domains_infos *mtd =
9397                                 (struct mlx5_meter_domains_infos *)tbl;
9398
9399         if (!mtd || !priv->config.dv_flow_en)
9400                 return 0;
9401         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
9402                 claim_zero(mlx5_flow_os_destroy_flow
9403                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
9404         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
9405                 claim_zero(mlx5_flow_os_destroy_flow
9406                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
9407         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
9408                 claim_zero(mlx5_flow_os_destroy_flow
9409                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
9410         if (mtd->egress.color_matcher)
9411                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9412                            (mtd->egress.color_matcher));
9413         if (mtd->egress.any_matcher)
9414                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9415                            (mtd->egress.any_matcher));
9416         if (mtd->egress.tbl)
9417                 flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
9418         if (mtd->egress.sfx_tbl)
9419                 flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
9420         if (mtd->ingress.color_matcher)
9421                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9422                            (mtd->ingress.color_matcher));
9423         if (mtd->ingress.any_matcher)
9424                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9425                            (mtd->ingress.any_matcher));
9426         if (mtd->ingress.tbl)
9427                 flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
9428         if (mtd->ingress.sfx_tbl)
9429                 flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
9430         if (mtd->transfer.color_matcher)
9431                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9432                            (mtd->transfer.color_matcher));
9433         if (mtd->transfer.any_matcher)
9434                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9435                            (mtd->transfer.any_matcher));
9436         if (mtd->transfer.tbl)
9437                 flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
9438         if (mtd->transfer.sfx_tbl)
9439                 flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
9440         if (mtd->drop_actn)
9441                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
9442         mlx5_free(mtd);
9443         return 0;
9444 }
9445
9446 /* Number of meter flow actions, count and jump or count and drop. */
9447 #define METER_ACTIONS 2
9448
9449 /**
9450  * Create specify domain meter table and suffix table.
9451  *
9452  * @param[in] dev
9453  *   Pointer to Ethernet device.
9454  * @param[in,out] mtb
9455  *   Pointer to DV meter table set.
9456  * @param[in] egress
9457  *   Table attribute.
9458  * @param[in] transfer
9459  *   Table attribute.
9460  * @param[in] color_reg_c_idx
9461  *   Reg C index for color match.
9462  *
9463  * @return
9464  *   0 on success, -1 otherwise and rte_errno is set.
9465  */
9466 static int
9467 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
9468                            struct mlx5_meter_domains_infos *mtb,
9469                            uint8_t egress, uint8_t transfer,
9470                            uint32_t color_reg_c_idx)
9471 {
9472         struct mlx5_priv *priv = dev->data->dev_private;
9473         struct mlx5_dev_ctx_shared *sh = priv->sh;
9474         struct mlx5_flow_dv_match_params mask = {
9475                 .size = sizeof(mask.buf),
9476         };
9477         struct mlx5_flow_dv_match_params value = {
9478                 .size = sizeof(value.buf),
9479         };
9480         struct mlx5dv_flow_matcher_attr dv_attr = {
9481                 .type = IBV_FLOW_ATTR_NORMAL,
9482                 .priority = 0,
9483                 .match_criteria_enable = 0,
9484                 .match_mask = (void *)&mask,
9485         };
9486         void *actions[METER_ACTIONS];
9487         struct mlx5_meter_domain_info *dtb;
9488         struct rte_flow_error error;
9489         int i = 0;
9490         int ret;
9491
9492         if (transfer)
9493                 dtb = &mtb->transfer;
9494         else if (egress)
9495                 dtb = &mtb->egress;
9496         else
9497                 dtb = &mtb->ingress;
9498         /* Create the meter table with METER level. */
9499         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
9500                                             egress, transfer, &error);
9501         if (!dtb->tbl) {
9502                 DRV_LOG(ERR, "Failed to create meter policer table.");
9503                 return -1;
9504         }
9505         /* Create the meter suffix table with SUFFIX level. */
9506         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
9507                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
9508                                             egress, transfer, &error);
9509         if (!dtb->sfx_tbl) {
9510                 DRV_LOG(ERR, "Failed to create meter suffix table.");
9511                 return -1;
9512         }
9513         /* Create matchers, Any and Color. */
9514         dv_attr.priority = 3;
9515         dv_attr.match_criteria_enable = 0;
9516         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
9517                                                &dtb->any_matcher);
9518         if (ret) {
9519                 DRV_LOG(ERR, "Failed to create meter"
9520                              " policer default matcher.");
9521                 goto error_exit;
9522         }
9523         dv_attr.priority = 0;
9524         dv_attr.match_criteria_enable =
9525                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9526         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
9527                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
9528         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
9529                                                &dtb->color_matcher);
9530         if (ret) {
9531                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
9532                 goto error_exit;
9533         }
9534         if (mtb->count_actns[RTE_MTR_DROPPED])
9535                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
9536         actions[i++] = mtb->drop_actn;
9537         /* Default rule: lowest priority, match any, actions: drop. */
9538         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
9539                                        actions,
9540                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
9541         if (ret) {
9542                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
9543                 goto error_exit;
9544         }
9545         return 0;
9546 error_exit:
9547         return -1;
9548 }
9549
9550 /**
9551  * Create the needed meter and suffix tables.
9552  * Lock free, (mutex should be acquired by caller).
9553  *
9554  * @param[in] dev
9555  *   Pointer to Ethernet device.
9556  * @param[in] fm
9557  *   Pointer to the flow meter.
9558  *
9559  * @return
9560  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
9561  */
9562 static struct mlx5_meter_domains_infos *
9563 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
9564                        const struct mlx5_flow_meter *fm)
9565 {
9566         struct mlx5_priv *priv = dev->data->dev_private;
9567         struct mlx5_meter_domains_infos *mtb;
9568         int ret;
9569         int i;
9570
9571         if (!priv->mtr_en) {
9572                 rte_errno = ENOTSUP;
9573                 return NULL;
9574         }
9575         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
9576         if (!mtb) {
9577                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
9578                 return NULL;
9579         }
9580         /* Create meter count actions */
9581         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
9582                 struct mlx5_flow_counter *cnt;
9583                 if (!fm->policer_stats.cnt[i])
9584                         continue;
9585                 cnt = flow_dv_counter_get_by_idx(dev,
9586                       fm->policer_stats.cnt[i], NULL);
9587                 mtb->count_actns[i] = cnt->action;
9588         }
9589         /* Create drop action. */
9590         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
9591         if (ret) {
9592                 DRV_LOG(ERR, "Failed to create drop action.");
9593                 goto error_exit;
9594         }
9595         /* Egress meter table. */
9596         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
9597         if (ret) {
9598                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
9599                 goto error_exit;
9600         }
9601         /* Ingress meter table. */
9602         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
9603         if (ret) {
9604                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
9605                 goto error_exit;
9606         }
9607         /* FDB meter table. */
9608         if (priv->config.dv_esw_en) {
9609                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
9610                                                  priv->mtr_color_reg);
9611                 if (ret) {
9612                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
9613                         goto error_exit;
9614                 }
9615         }
9616         return mtb;
9617 error_exit:
9618         flow_dv_destroy_mtr_tbl(dev, mtb);
9619         return NULL;
9620 }
9621
9622 /**
9623  * Destroy domain policer rule.
9624  *
9625  * @param[in] dt
9626  *   Pointer to domain table.
9627  */
9628 static void
9629 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
9630 {
9631         int i;
9632
9633         for (i = 0; i < RTE_MTR_DROPPED; i++) {
9634                 if (dt->policer_rules[i]) {
9635                         claim_zero(mlx5_flow_os_destroy_flow
9636                                    (dt->policer_rules[i]));
9637                         dt->policer_rules[i] = NULL;
9638                 }
9639         }
9640         if (dt->jump_actn) {
9641                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
9642                 dt->jump_actn = NULL;
9643         }
9644 }
9645
9646 /**
9647  * Destroy policer rules.
9648  *
9649  * @param[in] dev
9650  *   Pointer to Ethernet device.
9651  * @param[in] fm
9652  *   Pointer to flow meter structure.
9653  * @param[in] attr
9654  *   Pointer to flow attributes.
9655  *
9656  * @return
9657  *   Always 0.
9658  */
9659 static int
9660 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
9661                               const struct mlx5_flow_meter *fm,
9662                               const struct rte_flow_attr *attr)
9663 {
9664         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
9665
9666         if (!mtb)
9667                 return 0;
9668         if (attr->egress)
9669                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
9670         if (attr->ingress)
9671                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
9672         if (attr->transfer)
9673                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
9674         return 0;
9675 }
9676
9677 /**
9678  * Create specify domain meter policer rule.
9679  *
9680  * @param[in] fm
9681  *   Pointer to flow meter structure.
9682  * @param[in] mtb
9683  *   Pointer to DV meter table set.
9684  * @param[in] mtr_reg_c
9685  *   Color match REG_C.
9686  *
9687  * @return
9688  *   0 on success, -1 otherwise.
9689  */
9690 static int
9691 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
9692                                     struct mlx5_meter_domain_info *dtb,
9693                                     uint8_t mtr_reg_c)
9694 {
9695         struct mlx5_flow_dv_match_params matcher = {
9696                 .size = sizeof(matcher.buf),
9697         };
9698         struct mlx5_flow_dv_match_params value = {
9699                 .size = sizeof(value.buf),
9700         };
9701         struct mlx5_meter_domains_infos *mtb = fm->mfts;
9702         void *actions[METER_ACTIONS];
9703         int i;
9704         int ret = 0;
9705
9706         /* Create jump action. */
9707         if (!dtb->jump_actn)
9708                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9709                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
9710         if (ret) {
9711                 DRV_LOG(ERR, "Failed to create policer jump action.");
9712                 goto error;
9713         }
9714         for (i = 0; i < RTE_MTR_DROPPED; i++) {
9715                 int j = 0;
9716
9717                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
9718                                        rte_col_2_mlx5_col(i), UINT8_MAX);
9719                 if (mtb->count_actns[i])
9720                         actions[j++] = mtb->count_actns[i];
9721                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
9722                         actions[j++] = mtb->drop_actn;
9723                 else
9724                         actions[j++] = dtb->jump_actn;
9725                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
9726                                                (void *)&value, j, actions,
9727                                                &dtb->policer_rules[i]);
9728                 if (ret) {
9729                         DRV_LOG(ERR, "Failed to create policer rule.");
9730                         goto error;
9731                 }
9732         }
9733         return 0;
9734 error:
9735         rte_errno = errno;
9736         return -1;
9737 }
9738
9739 /**
9740  * Create policer rules.
9741  *
9742  * @param[in] dev
9743  *   Pointer to Ethernet device.
9744  * @param[in] fm
9745  *   Pointer to flow meter structure.
9746  * @param[in] attr
9747  *   Pointer to flow attributes.
9748  *
9749  * @return
9750  *   0 on success, -1 otherwise.
9751  */
9752 static int
9753 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
9754                              struct mlx5_flow_meter *fm,
9755                              const struct rte_flow_attr *attr)
9756 {
9757         struct mlx5_priv *priv = dev->data->dev_private;
9758         struct mlx5_meter_domains_infos *mtb = fm->mfts;
9759         int ret;
9760
9761         if (attr->egress) {
9762                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
9763                                                 priv->mtr_color_reg);
9764                 if (ret) {
9765                         DRV_LOG(ERR, "Failed to create egress policer.");
9766                         goto error;
9767                 }
9768         }
9769         if (attr->ingress) {
9770                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
9771                                                 priv->mtr_color_reg);
9772                 if (ret) {
9773                         DRV_LOG(ERR, "Failed to create ingress policer.");
9774                         goto error;
9775                 }
9776         }
9777         if (attr->transfer) {
9778                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
9779                                                 priv->mtr_color_reg);
9780                 if (ret) {
9781                         DRV_LOG(ERR, "Failed to create transfer policer.");
9782                         goto error;
9783                 }
9784         }
9785         return 0;
9786 error:
9787         flow_dv_destroy_policer_rules(dev, fm, attr);
9788         return -1;
9789 }
9790
9791 /**
9792  * Query a devx counter.
9793  *
9794  * @param[in] dev
9795  *   Pointer to the Ethernet device structure.
9796  * @param[in] cnt
9797  *   Index to the flow counter.
9798  * @param[in] clear
9799  *   Set to clear the counter statistics.
9800  * @param[out] pkts
9801  *   The statistics value of packets.
9802  * @param[out] bytes
9803  *   The statistics value of bytes.
9804  *
9805  * @return
9806  *   0 on success, otherwise return -1.
9807  */
9808 static int
9809 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
9810                       uint64_t *pkts, uint64_t *bytes)
9811 {
9812         struct mlx5_priv *priv = dev->data->dev_private;
9813         struct mlx5_flow_counter *cnt;
9814         uint64_t inn_pkts, inn_bytes;
9815         int ret;
9816
9817         if (!priv->config.devx)
9818                 return -1;
9819
9820         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
9821         if (ret)
9822                 return -1;
9823         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
9824         *pkts = inn_pkts - cnt->hits;
9825         *bytes = inn_bytes - cnt->bytes;
9826         if (clear) {
9827                 cnt->hits = inn_pkts;
9828                 cnt->bytes = inn_bytes;
9829         }
9830         return 0;
9831 }
9832
9833 /**
9834  * Get aged-out flows.
9835  *
9836  * @param[in] dev
9837  *   Pointer to the Ethernet device structure.
9838  * @param[in] context
9839  *   The address of an array of pointers to the aged-out flows contexts.
9840  * @param[in] nb_contexts
9841  *   The length of context array pointers.
9842  * @param[out] error
9843  *   Perform verbose error reporting if not NULL. Initialized in case of
9844  *   error only.
9845  *
9846  * @return
9847  *   how many contexts get in success, otherwise negative errno value.
9848  *   if nb_contexts is 0, return the amount of all aged contexts.
9849  *   if nb_contexts is not 0 , return the amount of aged flows reported
9850  *   in the context array.
9851  * @note: only stub for now
9852  */
9853 static int
9854 flow_get_aged_flows(struct rte_eth_dev *dev,
9855                     void **context,
9856                     uint32_t nb_contexts,
9857                     struct rte_flow_error *error)
9858 {
9859         struct mlx5_priv *priv = dev->data->dev_private;
9860         struct mlx5_age_info *age_info;
9861         struct mlx5_age_param *age_param;
9862         struct mlx5_flow_counter *counter;
9863         int nb_flows = 0;
9864
9865         if (nb_contexts && !context)
9866                 return rte_flow_error_set(error, EINVAL,
9867                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9868                                           NULL,
9869                                           "Should assign at least one flow or"
9870                                           " context to get if nb_contexts != 0");
9871         age_info = GET_PORT_AGE_INFO(priv);
9872         rte_spinlock_lock(&age_info->aged_sl);
9873         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
9874                 nb_flows++;
9875                 if (nb_contexts) {
9876                         age_param = MLX5_CNT_TO_AGE(counter);
9877                         context[nb_flows - 1] = age_param->context;
9878                         if (!(--nb_contexts))
9879                                 break;
9880                 }
9881         }
9882         rte_spinlock_unlock(&age_info->aged_sl);
9883         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
9884         return nb_flows;
9885 }
9886
9887 /*
9888  * Mutex-protected thunk to lock-free  __flow_dv_translate().
9889  */
9890 static int
9891 flow_dv_translate(struct rte_eth_dev *dev,
9892                   struct mlx5_flow *dev_flow,
9893                   const struct rte_flow_attr *attr,
9894                   const struct rte_flow_item items[],
9895                   const struct rte_flow_action actions[],
9896                   struct rte_flow_error *error)
9897 {
9898         int ret;
9899
9900         flow_dv_shared_lock(dev);
9901         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
9902         flow_dv_shared_unlock(dev);
9903         return ret;
9904 }
9905
9906 /*
9907  * Mutex-protected thunk to lock-free  __flow_dv_apply().
9908  */
9909 static int
9910 flow_dv_apply(struct rte_eth_dev *dev,
9911               struct rte_flow *flow,
9912               struct rte_flow_error *error)
9913 {
9914         int ret;
9915
9916         flow_dv_shared_lock(dev);
9917         ret = __flow_dv_apply(dev, flow, error);
9918         flow_dv_shared_unlock(dev);
9919         return ret;
9920 }
9921
9922 /*
9923  * Mutex-protected thunk to lock-free __flow_dv_remove().
9924  */
9925 static void
9926 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
9927 {
9928         flow_dv_shared_lock(dev);
9929         __flow_dv_remove(dev, flow);
9930         flow_dv_shared_unlock(dev);
9931 }
9932
9933 /*
9934  * Mutex-protected thunk to lock-free __flow_dv_destroy().
9935  */
9936 static void
9937 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
9938 {
9939         flow_dv_shared_lock(dev);
9940         __flow_dv_destroy(dev, flow);
9941         flow_dv_shared_unlock(dev);
9942 }
9943
9944 /*
9945  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
9946  */
9947 static uint32_t
9948 flow_dv_counter_allocate(struct rte_eth_dev *dev)
9949 {
9950         uint32_t cnt;
9951
9952         flow_dv_shared_lock(dev);
9953         cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0);
9954         flow_dv_shared_unlock(dev);
9955         return cnt;
9956 }
9957
9958 /*
9959  * Mutex-protected thunk to lock-free flow_dv_counter_release().
9960  */
9961 static void
9962 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
9963 {
9964         flow_dv_shared_lock(dev);
9965         flow_dv_counter_release(dev, cnt);
9966         flow_dv_shared_unlock(dev);
9967 }
9968
9969 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
9970         .validate = flow_dv_validate,
9971         .prepare = flow_dv_prepare,
9972         .translate = flow_dv_translate,
9973         .apply = flow_dv_apply,
9974         .remove = flow_dv_remove,
9975         .destroy = flow_dv_destroy,
9976         .query = flow_dv_query,
9977         .create_mtr_tbls = flow_dv_create_mtr_tbl,
9978         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
9979         .create_policer_rules = flow_dv_create_policer_rules,
9980         .destroy_policer_rules = flow_dv_destroy_policer_rules,
9981         .counter_alloc = flow_dv_counter_allocate,
9982         .counter_free = flow_dv_counter_free,
9983         .counter_query = flow_dv_counter_query,
9984         .get_aged_flows = flow_get_aged_flows,
9985 };
9986
9987 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */