778a766cdfc193f8c2cc3d7ae29d95fc39fa5b61
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23
24 #include <mlx5_glue.h>
25 #include <mlx5_devx_cmds.h>
26 #include <mlx5_prm.h>
27 #include <mlx5_malloc.h>
28
29 #include "mlx5_defs.h"
30 #include "mlx5.h"
31 #include "mlx5_common_os.h"
32 #include "mlx5_flow.h"
33 #include "mlx5_flow_os.h"
34 #include "mlx5_rxtx.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
40 #endif
41
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
45 #endif
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR
49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
50 #endif
51
52 /* VLAN header definitions */
53 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
54 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
55 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
56 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
57 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
58
59 union flow_dv_attr {
60         struct {
61                 uint32_t valid:1;
62                 uint32_t ipv4:1;
63                 uint32_t ipv6:1;
64                 uint32_t tcp:1;
65                 uint32_t udp:1;
66                 uint32_t reserved:27;
67         };
68         uint32_t attr;
69 };
70
71 static int
72 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
73                              struct mlx5_flow_tbl_resource *tbl);
74
75 static int
76 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev);
77
78 /**
79  * Initialize flow attributes structure according to flow items' types.
80  *
81  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
82  * mode. For tunnel mode, the items to be modified are the outermost ones.
83  *
84  * @param[in] item
85  *   Pointer to item specification.
86  * @param[out] attr
87  *   Pointer to flow attributes structure.
88  * @param[in] dev_flow
89  *   Pointer to the sub flow.
90  * @param[in] tunnel_decap
91  *   Whether action is after tunnel decapsulation.
92  */
93 static void
94 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
95                   struct mlx5_flow *dev_flow, bool tunnel_decap)
96 {
97         uint64_t layers = dev_flow->handle->layers;
98
99         /*
100          * If layers is already initialized, it means this dev_flow is the
101          * suffix flow, the layers flags is set by the prefix flow. Need to
102          * use the layer flags from prefix flow as the suffix flow may not
103          * have the user defined items as the flow is split.
104          */
105         if (layers) {
106                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
107                         attr->ipv4 = 1;
108                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
109                         attr->ipv6 = 1;
110                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
111                         attr->tcp = 1;
112                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
113                         attr->udp = 1;
114                 attr->valid = 1;
115                 return;
116         }
117         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
118                 uint8_t next_protocol = 0xff;
119                 switch (item->type) {
120                 case RTE_FLOW_ITEM_TYPE_GRE:
121                 case RTE_FLOW_ITEM_TYPE_NVGRE:
122                 case RTE_FLOW_ITEM_TYPE_VXLAN:
123                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
124                 case RTE_FLOW_ITEM_TYPE_GENEVE:
125                 case RTE_FLOW_ITEM_TYPE_MPLS:
126                         if (tunnel_decap)
127                                 attr->attr = 0;
128                         break;
129                 case RTE_FLOW_ITEM_TYPE_IPV4:
130                         if (!attr->ipv6)
131                                 attr->ipv4 = 1;
132                         if (item->mask != NULL &&
133                             ((const struct rte_flow_item_ipv4 *)
134                             item->mask)->hdr.next_proto_id)
135                                 next_protocol =
136                                     ((const struct rte_flow_item_ipv4 *)
137                                       (item->spec))->hdr.next_proto_id &
138                                     ((const struct rte_flow_item_ipv4 *)
139                                       (item->mask))->hdr.next_proto_id;
140                         if ((next_protocol == IPPROTO_IPIP ||
141                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
142                                 attr->attr = 0;
143                         break;
144                 case RTE_FLOW_ITEM_TYPE_IPV6:
145                         if (!attr->ipv4)
146                                 attr->ipv6 = 1;
147                         if (item->mask != NULL &&
148                             ((const struct rte_flow_item_ipv6 *)
149                             item->mask)->hdr.proto)
150                                 next_protocol =
151                                     ((const struct rte_flow_item_ipv6 *)
152                                       (item->spec))->hdr.proto &
153                                     ((const struct rte_flow_item_ipv6 *)
154                                       (item->mask))->hdr.proto;
155                         if ((next_protocol == IPPROTO_IPIP ||
156                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
157                                 attr->attr = 0;
158                         break;
159                 case RTE_FLOW_ITEM_TYPE_UDP:
160                         if (!attr->tcp)
161                                 attr->udp = 1;
162                         break;
163                 case RTE_FLOW_ITEM_TYPE_TCP:
164                         if (!attr->udp)
165                                 attr->tcp = 1;
166                         break;
167                 default:
168                         break;
169                 }
170         }
171         attr->valid = 1;
172 }
173
174 /**
175  * Convert rte_mtr_color to mlx5 color.
176  *
177  * @param[in] rcol
178  *   rte_mtr_color.
179  *
180  * @return
181  *   mlx5 color.
182  */
183 static int
184 rte_col_2_mlx5_col(enum rte_color rcol)
185 {
186         switch (rcol) {
187         case RTE_COLOR_GREEN:
188                 return MLX5_FLOW_COLOR_GREEN;
189         case RTE_COLOR_YELLOW:
190                 return MLX5_FLOW_COLOR_YELLOW;
191         case RTE_COLOR_RED:
192                 return MLX5_FLOW_COLOR_RED;
193         default:
194                 break;
195         }
196         return MLX5_FLOW_COLOR_UNDEFINED;
197 }
198
199 struct field_modify_info {
200         uint32_t size; /* Size of field in protocol header, in bytes. */
201         uint32_t offset; /* Offset of field in protocol header, in bytes. */
202         enum mlx5_modification_field id;
203 };
204
205 struct field_modify_info modify_eth[] = {
206         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
207         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
208         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
209         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
210         {0, 0, 0},
211 };
212
213 struct field_modify_info modify_vlan_out_first_vid[] = {
214         /* Size in bits !!! */
215         {12, 0, MLX5_MODI_OUT_FIRST_VID},
216         {0, 0, 0},
217 };
218
219 struct field_modify_info modify_ipv4[] = {
220         {1,  1, MLX5_MODI_OUT_IP_DSCP},
221         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
222         {4, 12, MLX5_MODI_OUT_SIPV4},
223         {4, 16, MLX5_MODI_OUT_DIPV4},
224         {0, 0, 0},
225 };
226
227 struct field_modify_info modify_ipv6[] = {
228         {1,  0, MLX5_MODI_OUT_IP_DSCP},
229         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
230         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
231         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
232         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
233         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
234         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
235         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
236         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
237         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
238         {0, 0, 0},
239 };
240
241 struct field_modify_info modify_udp[] = {
242         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
243         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
244         {0, 0, 0},
245 };
246
247 struct field_modify_info modify_tcp[] = {
248         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
249         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
250         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
251         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
252         {0, 0, 0},
253 };
254
255 static void
256 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
257                           uint8_t next_protocol, uint64_t *item_flags,
258                           int *tunnel)
259 {
260         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
261                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
262         if (next_protocol == IPPROTO_IPIP) {
263                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
264                 *tunnel = 1;
265         }
266         if (next_protocol == IPPROTO_IPV6) {
267                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
268                 *tunnel = 1;
269         }
270 }
271
272 /**
273  * Acquire the synchronizing object to protect multithreaded access
274  * to shared dv context. Lock occurs only if context is actually
275  * shared, i.e. we have multiport IB device and representors are
276  * created.
277  *
278  * @param[in] dev
279  *   Pointer to the rte_eth_dev structure.
280  */
281 static void
282 flow_dv_shared_lock(struct rte_eth_dev *dev)
283 {
284         struct mlx5_priv *priv = dev->data->dev_private;
285         struct mlx5_dev_ctx_shared *sh = priv->sh;
286
287         if (sh->dv_refcnt > 1) {
288                 int ret;
289
290                 ret = pthread_mutex_lock(&sh->dv_mutex);
291                 MLX5_ASSERT(!ret);
292                 (void)ret;
293         }
294 }
295
296 static void
297 flow_dv_shared_unlock(struct rte_eth_dev *dev)
298 {
299         struct mlx5_priv *priv = dev->data->dev_private;
300         struct mlx5_dev_ctx_shared *sh = priv->sh;
301
302         if (sh->dv_refcnt > 1) {
303                 int ret;
304
305                 ret = pthread_mutex_unlock(&sh->dv_mutex);
306                 MLX5_ASSERT(!ret);
307                 (void)ret;
308         }
309 }
310
311 /* Update VLAN's VID/PCP based on input rte_flow_action.
312  *
313  * @param[in] action
314  *   Pointer to struct rte_flow_action.
315  * @param[out] vlan
316  *   Pointer to struct rte_vlan_hdr.
317  */
318 static void
319 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
320                          struct rte_vlan_hdr *vlan)
321 {
322         uint16_t vlan_tci;
323         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
324                 vlan_tci =
325                     ((const struct rte_flow_action_of_set_vlan_pcp *)
326                                                action->conf)->vlan_pcp;
327                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
328                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
329                 vlan->vlan_tci |= vlan_tci;
330         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
331                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
332                 vlan->vlan_tci |= rte_be_to_cpu_16
333                     (((const struct rte_flow_action_of_set_vlan_vid *)
334                                              action->conf)->vlan_vid);
335         }
336 }
337
338 /**
339  * Fetch 1, 2, 3 or 4 byte field from the byte array
340  * and return as unsigned integer in host-endian format.
341  *
342  * @param[in] data
343  *   Pointer to data array.
344  * @param[in] size
345  *   Size of field to extract.
346  *
347  * @return
348  *   converted field in host endian format.
349  */
350 static inline uint32_t
351 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
352 {
353         uint32_t ret;
354
355         switch (size) {
356         case 1:
357                 ret = *data;
358                 break;
359         case 2:
360                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
361                 break;
362         case 3:
363                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
364                 ret = (ret << 8) | *(data + sizeof(uint16_t));
365                 break;
366         case 4:
367                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
368                 break;
369         default:
370                 MLX5_ASSERT(false);
371                 ret = 0;
372                 break;
373         }
374         return ret;
375 }
376
377 /**
378  * Convert modify-header action to DV specification.
379  *
380  * Data length of each action is determined by provided field description
381  * and the item mask. Data bit offset and width of each action is determined
382  * by provided item mask.
383  *
384  * @param[in] item
385  *   Pointer to item specification.
386  * @param[in] field
387  *   Pointer to field modification information.
388  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
389  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
390  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
391  * @param[in] dcopy
392  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
393  *   Negative offset value sets the same offset as source offset.
394  *   size field is ignored, value is taken from source field.
395  * @param[in,out] resource
396  *   Pointer to the modify-header resource.
397  * @param[in] type
398  *   Type of modification.
399  * @param[out] error
400  *   Pointer to the error structure.
401  *
402  * @return
403  *   0 on success, a negative errno value otherwise and rte_errno is set.
404  */
405 static int
406 flow_dv_convert_modify_action(struct rte_flow_item *item,
407                               struct field_modify_info *field,
408                               struct field_modify_info *dcopy,
409                               struct mlx5_flow_dv_modify_hdr_resource *resource,
410                               uint32_t type, struct rte_flow_error *error)
411 {
412         uint32_t i = resource->actions_num;
413         struct mlx5_modification_cmd *actions = resource->actions;
414
415         /*
416          * The item and mask are provided in big-endian format.
417          * The fields should be presented as in big-endian format either.
418          * Mask must be always present, it defines the actual field width.
419          */
420         MLX5_ASSERT(item->mask);
421         MLX5_ASSERT(field->size);
422         do {
423                 unsigned int size_b;
424                 unsigned int off_b;
425                 uint32_t mask;
426                 uint32_t data;
427
428                 if (i >= MLX5_MAX_MODIFY_NUM)
429                         return rte_flow_error_set(error, EINVAL,
430                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
431                                  "too many items to modify");
432                 /* Fetch variable byte size mask from the array. */
433                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
434                                            field->offset, field->size);
435                 if (!mask) {
436                         ++field;
437                         continue;
438                 }
439                 /* Deduce actual data width in bits from mask value. */
440                 off_b = rte_bsf32(mask);
441                 size_b = sizeof(uint32_t) * CHAR_BIT -
442                          off_b - __builtin_clz(mask);
443                 MLX5_ASSERT(size_b);
444                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
445                 actions[i] = (struct mlx5_modification_cmd) {
446                         .action_type = type,
447                         .field = field->id,
448                         .offset = off_b,
449                         .length = size_b,
450                 };
451                 /* Convert entire record to expected big-endian format. */
452                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
453                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
454                         MLX5_ASSERT(dcopy);
455                         actions[i].dst_field = dcopy->id;
456                         actions[i].dst_offset =
457                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
458                         /* Convert entire record to big-endian format. */
459                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
460                 } else {
461                         MLX5_ASSERT(item->spec);
462                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
463                                                    field->offset, field->size);
464                         /* Shift out the trailing masked bits from data. */
465                         data = (data & mask) >> off_b;
466                         actions[i].data1 = rte_cpu_to_be_32(data);
467                 }
468                 ++i;
469                 ++field;
470         } while (field->size);
471         if (resource->actions_num == i)
472                 return rte_flow_error_set(error, EINVAL,
473                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
474                                           "invalid modification flow item");
475         resource->actions_num = i;
476         return 0;
477 }
478
479 /**
480  * Convert modify-header set IPv4 address action to DV specification.
481  *
482  * @param[in,out] resource
483  *   Pointer to the modify-header resource.
484  * @param[in] action
485  *   Pointer to action specification.
486  * @param[out] error
487  *   Pointer to the error structure.
488  *
489  * @return
490  *   0 on success, a negative errno value otherwise and rte_errno is set.
491  */
492 static int
493 flow_dv_convert_action_modify_ipv4
494                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
495                          const struct rte_flow_action *action,
496                          struct rte_flow_error *error)
497 {
498         const struct rte_flow_action_set_ipv4 *conf =
499                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
500         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
501         struct rte_flow_item_ipv4 ipv4;
502         struct rte_flow_item_ipv4 ipv4_mask;
503
504         memset(&ipv4, 0, sizeof(ipv4));
505         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
506         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
507                 ipv4.hdr.src_addr = conf->ipv4_addr;
508                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
509         } else {
510                 ipv4.hdr.dst_addr = conf->ipv4_addr;
511                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
512         }
513         item.spec = &ipv4;
514         item.mask = &ipv4_mask;
515         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
516                                              MLX5_MODIFICATION_TYPE_SET, error);
517 }
518
519 /**
520  * Convert modify-header set IPv6 address action to DV specification.
521  *
522  * @param[in,out] resource
523  *   Pointer to the modify-header resource.
524  * @param[in] action
525  *   Pointer to action specification.
526  * @param[out] error
527  *   Pointer to the error structure.
528  *
529  * @return
530  *   0 on success, a negative errno value otherwise and rte_errno is set.
531  */
532 static int
533 flow_dv_convert_action_modify_ipv6
534                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
535                          const struct rte_flow_action *action,
536                          struct rte_flow_error *error)
537 {
538         const struct rte_flow_action_set_ipv6 *conf =
539                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
540         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
541         struct rte_flow_item_ipv6 ipv6;
542         struct rte_flow_item_ipv6 ipv6_mask;
543
544         memset(&ipv6, 0, sizeof(ipv6));
545         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
546         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
547                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
548                        sizeof(ipv6.hdr.src_addr));
549                 memcpy(&ipv6_mask.hdr.src_addr,
550                        &rte_flow_item_ipv6_mask.hdr.src_addr,
551                        sizeof(ipv6.hdr.src_addr));
552         } else {
553                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
554                        sizeof(ipv6.hdr.dst_addr));
555                 memcpy(&ipv6_mask.hdr.dst_addr,
556                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
557                        sizeof(ipv6.hdr.dst_addr));
558         }
559         item.spec = &ipv6;
560         item.mask = &ipv6_mask;
561         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
562                                              MLX5_MODIFICATION_TYPE_SET, error);
563 }
564
565 /**
566  * Convert modify-header set MAC address action to DV specification.
567  *
568  * @param[in,out] resource
569  *   Pointer to the modify-header resource.
570  * @param[in] action
571  *   Pointer to action specification.
572  * @param[out] error
573  *   Pointer to the error structure.
574  *
575  * @return
576  *   0 on success, a negative errno value otherwise and rte_errno is set.
577  */
578 static int
579 flow_dv_convert_action_modify_mac
580                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
581                          const struct rte_flow_action *action,
582                          struct rte_flow_error *error)
583 {
584         const struct rte_flow_action_set_mac *conf =
585                 (const struct rte_flow_action_set_mac *)(action->conf);
586         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
587         struct rte_flow_item_eth eth;
588         struct rte_flow_item_eth eth_mask;
589
590         memset(&eth, 0, sizeof(eth));
591         memset(&eth_mask, 0, sizeof(eth_mask));
592         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
593                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
594                        sizeof(eth.src.addr_bytes));
595                 memcpy(&eth_mask.src.addr_bytes,
596                        &rte_flow_item_eth_mask.src.addr_bytes,
597                        sizeof(eth_mask.src.addr_bytes));
598         } else {
599                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
600                        sizeof(eth.dst.addr_bytes));
601                 memcpy(&eth_mask.dst.addr_bytes,
602                        &rte_flow_item_eth_mask.dst.addr_bytes,
603                        sizeof(eth_mask.dst.addr_bytes));
604         }
605         item.spec = &eth;
606         item.mask = &eth_mask;
607         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
608                                              MLX5_MODIFICATION_TYPE_SET, error);
609 }
610
611 /**
612  * Convert modify-header set VLAN VID action to DV specification.
613  *
614  * @param[in,out] resource
615  *   Pointer to the modify-header resource.
616  * @param[in] action
617  *   Pointer to action specification.
618  * @param[out] error
619  *   Pointer to the error structure.
620  *
621  * @return
622  *   0 on success, a negative errno value otherwise and rte_errno is set.
623  */
624 static int
625 flow_dv_convert_action_modify_vlan_vid
626                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
627                          const struct rte_flow_action *action,
628                          struct rte_flow_error *error)
629 {
630         const struct rte_flow_action_of_set_vlan_vid *conf =
631                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
632         int i = resource->actions_num;
633         struct mlx5_modification_cmd *actions = resource->actions;
634         struct field_modify_info *field = modify_vlan_out_first_vid;
635
636         if (i >= MLX5_MAX_MODIFY_NUM)
637                 return rte_flow_error_set(error, EINVAL,
638                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
639                          "too many items to modify");
640         actions[i] = (struct mlx5_modification_cmd) {
641                 .action_type = MLX5_MODIFICATION_TYPE_SET,
642                 .field = field->id,
643                 .length = field->size,
644                 .offset = field->offset,
645         };
646         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
647         actions[i].data1 = conf->vlan_vid;
648         actions[i].data1 = actions[i].data1 << 16;
649         resource->actions_num = ++i;
650         return 0;
651 }
652
653 /**
654  * Convert modify-header set TP action to DV specification.
655  *
656  * @param[in,out] resource
657  *   Pointer to the modify-header resource.
658  * @param[in] action
659  *   Pointer to action specification.
660  * @param[in] items
661  *   Pointer to rte_flow_item objects list.
662  * @param[in] attr
663  *   Pointer to flow attributes structure.
664  * @param[in] dev_flow
665  *   Pointer to the sub flow.
666  * @param[in] tunnel_decap
667  *   Whether action is after tunnel decapsulation.
668  * @param[out] error
669  *   Pointer to the error structure.
670  *
671  * @return
672  *   0 on success, a negative errno value otherwise and rte_errno is set.
673  */
674 static int
675 flow_dv_convert_action_modify_tp
676                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
677                          const struct rte_flow_action *action,
678                          const struct rte_flow_item *items,
679                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
680                          bool tunnel_decap, struct rte_flow_error *error)
681 {
682         const struct rte_flow_action_set_tp *conf =
683                 (const struct rte_flow_action_set_tp *)(action->conf);
684         struct rte_flow_item item;
685         struct rte_flow_item_udp udp;
686         struct rte_flow_item_udp udp_mask;
687         struct rte_flow_item_tcp tcp;
688         struct rte_flow_item_tcp tcp_mask;
689         struct field_modify_info *field;
690
691         if (!attr->valid)
692                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
693         if (attr->udp) {
694                 memset(&udp, 0, sizeof(udp));
695                 memset(&udp_mask, 0, sizeof(udp_mask));
696                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
697                         udp.hdr.src_port = conf->port;
698                         udp_mask.hdr.src_port =
699                                         rte_flow_item_udp_mask.hdr.src_port;
700                 } else {
701                         udp.hdr.dst_port = conf->port;
702                         udp_mask.hdr.dst_port =
703                                         rte_flow_item_udp_mask.hdr.dst_port;
704                 }
705                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
706                 item.spec = &udp;
707                 item.mask = &udp_mask;
708                 field = modify_udp;
709         } else {
710                 MLX5_ASSERT(attr->tcp);
711                 memset(&tcp, 0, sizeof(tcp));
712                 memset(&tcp_mask, 0, sizeof(tcp_mask));
713                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
714                         tcp.hdr.src_port = conf->port;
715                         tcp_mask.hdr.src_port =
716                                         rte_flow_item_tcp_mask.hdr.src_port;
717                 } else {
718                         tcp.hdr.dst_port = conf->port;
719                         tcp_mask.hdr.dst_port =
720                                         rte_flow_item_tcp_mask.hdr.dst_port;
721                 }
722                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
723                 item.spec = &tcp;
724                 item.mask = &tcp_mask;
725                 field = modify_tcp;
726         }
727         return flow_dv_convert_modify_action(&item, field, NULL, resource,
728                                              MLX5_MODIFICATION_TYPE_SET, error);
729 }
730
731 /**
732  * Convert modify-header set TTL action to DV specification.
733  *
734  * @param[in,out] resource
735  *   Pointer to the modify-header resource.
736  * @param[in] action
737  *   Pointer to action specification.
738  * @param[in] items
739  *   Pointer to rte_flow_item objects list.
740  * @param[in] attr
741  *   Pointer to flow attributes structure.
742  * @param[in] dev_flow
743  *   Pointer to the sub flow.
744  * @param[in] tunnel_decap
745  *   Whether action is after tunnel decapsulation.
746  * @param[out] error
747  *   Pointer to the error structure.
748  *
749  * @return
750  *   0 on success, a negative errno value otherwise and rte_errno is set.
751  */
752 static int
753 flow_dv_convert_action_modify_ttl
754                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
755                          const struct rte_flow_action *action,
756                          const struct rte_flow_item *items,
757                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
758                          bool tunnel_decap, struct rte_flow_error *error)
759 {
760         const struct rte_flow_action_set_ttl *conf =
761                 (const struct rte_flow_action_set_ttl *)(action->conf);
762         struct rte_flow_item item;
763         struct rte_flow_item_ipv4 ipv4;
764         struct rte_flow_item_ipv4 ipv4_mask;
765         struct rte_flow_item_ipv6 ipv6;
766         struct rte_flow_item_ipv6 ipv6_mask;
767         struct field_modify_info *field;
768
769         if (!attr->valid)
770                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
771         if (attr->ipv4) {
772                 memset(&ipv4, 0, sizeof(ipv4));
773                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
774                 ipv4.hdr.time_to_live = conf->ttl_value;
775                 ipv4_mask.hdr.time_to_live = 0xFF;
776                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
777                 item.spec = &ipv4;
778                 item.mask = &ipv4_mask;
779                 field = modify_ipv4;
780         } else {
781                 MLX5_ASSERT(attr->ipv6);
782                 memset(&ipv6, 0, sizeof(ipv6));
783                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
784                 ipv6.hdr.hop_limits = conf->ttl_value;
785                 ipv6_mask.hdr.hop_limits = 0xFF;
786                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
787                 item.spec = &ipv6;
788                 item.mask = &ipv6_mask;
789                 field = modify_ipv6;
790         }
791         return flow_dv_convert_modify_action(&item, field, NULL, resource,
792                                              MLX5_MODIFICATION_TYPE_SET, error);
793 }
794
795 /**
796  * Convert modify-header decrement TTL action to DV specification.
797  *
798  * @param[in,out] resource
799  *   Pointer to the modify-header resource.
800  * @param[in] action
801  *   Pointer to action specification.
802  * @param[in] items
803  *   Pointer to rte_flow_item objects list.
804  * @param[in] attr
805  *   Pointer to flow attributes structure.
806  * @param[in] dev_flow
807  *   Pointer to the sub flow.
808  * @param[in] tunnel_decap
809  *   Whether action is after tunnel decapsulation.
810  * @param[out] error
811  *   Pointer to the error structure.
812  *
813  * @return
814  *   0 on success, a negative errno value otherwise and rte_errno is set.
815  */
816 static int
817 flow_dv_convert_action_modify_dec_ttl
818                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
819                          const struct rte_flow_item *items,
820                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
821                          bool tunnel_decap, struct rte_flow_error *error)
822 {
823         struct rte_flow_item item;
824         struct rte_flow_item_ipv4 ipv4;
825         struct rte_flow_item_ipv4 ipv4_mask;
826         struct rte_flow_item_ipv6 ipv6;
827         struct rte_flow_item_ipv6 ipv6_mask;
828         struct field_modify_info *field;
829
830         if (!attr->valid)
831                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
832         if (attr->ipv4) {
833                 memset(&ipv4, 0, sizeof(ipv4));
834                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
835                 ipv4.hdr.time_to_live = 0xFF;
836                 ipv4_mask.hdr.time_to_live = 0xFF;
837                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
838                 item.spec = &ipv4;
839                 item.mask = &ipv4_mask;
840                 field = modify_ipv4;
841         } else {
842                 MLX5_ASSERT(attr->ipv6);
843                 memset(&ipv6, 0, sizeof(ipv6));
844                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
845                 ipv6.hdr.hop_limits = 0xFF;
846                 ipv6_mask.hdr.hop_limits = 0xFF;
847                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
848                 item.spec = &ipv6;
849                 item.mask = &ipv6_mask;
850                 field = modify_ipv6;
851         }
852         return flow_dv_convert_modify_action(&item, field, NULL, resource,
853                                              MLX5_MODIFICATION_TYPE_ADD, error);
854 }
855
856 /**
857  * Convert modify-header increment/decrement TCP Sequence number
858  * to DV specification.
859  *
860  * @param[in,out] resource
861  *   Pointer to the modify-header resource.
862  * @param[in] action
863  *   Pointer to action specification.
864  * @param[out] error
865  *   Pointer to the error structure.
866  *
867  * @return
868  *   0 on success, a negative errno value otherwise and rte_errno is set.
869  */
870 static int
871 flow_dv_convert_action_modify_tcp_seq
872                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
873                          const struct rte_flow_action *action,
874                          struct rte_flow_error *error)
875 {
876         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
877         uint64_t value = rte_be_to_cpu_32(*conf);
878         struct rte_flow_item item;
879         struct rte_flow_item_tcp tcp;
880         struct rte_flow_item_tcp tcp_mask;
881
882         memset(&tcp, 0, sizeof(tcp));
883         memset(&tcp_mask, 0, sizeof(tcp_mask));
884         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
885                 /*
886                  * The HW has no decrement operation, only increment operation.
887                  * To simulate decrement X from Y using increment operation
888                  * we need to add UINT32_MAX X times to Y.
889                  * Each adding of UINT32_MAX decrements Y by 1.
890                  */
891                 value *= UINT32_MAX;
892         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
893         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
894         item.type = RTE_FLOW_ITEM_TYPE_TCP;
895         item.spec = &tcp;
896         item.mask = &tcp_mask;
897         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
898                                              MLX5_MODIFICATION_TYPE_ADD, error);
899 }
900
901 /**
902  * Convert modify-header increment/decrement TCP Acknowledgment number
903  * to DV specification.
904  *
905  * @param[in,out] resource
906  *   Pointer to the modify-header resource.
907  * @param[in] action
908  *   Pointer to action specification.
909  * @param[out] error
910  *   Pointer to the error structure.
911  *
912  * @return
913  *   0 on success, a negative errno value otherwise and rte_errno is set.
914  */
915 static int
916 flow_dv_convert_action_modify_tcp_ack
917                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
918                          const struct rte_flow_action *action,
919                          struct rte_flow_error *error)
920 {
921         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
922         uint64_t value = rte_be_to_cpu_32(*conf);
923         struct rte_flow_item item;
924         struct rte_flow_item_tcp tcp;
925         struct rte_flow_item_tcp tcp_mask;
926
927         memset(&tcp, 0, sizeof(tcp));
928         memset(&tcp_mask, 0, sizeof(tcp_mask));
929         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
930                 /*
931                  * The HW has no decrement operation, only increment operation.
932                  * To simulate decrement X from Y using increment operation
933                  * we need to add UINT32_MAX X times to Y.
934                  * Each adding of UINT32_MAX decrements Y by 1.
935                  */
936                 value *= UINT32_MAX;
937         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
938         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
939         item.type = RTE_FLOW_ITEM_TYPE_TCP;
940         item.spec = &tcp;
941         item.mask = &tcp_mask;
942         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
943                                              MLX5_MODIFICATION_TYPE_ADD, error);
944 }
945
946 static enum mlx5_modification_field reg_to_field[] = {
947         [REG_NONE] = MLX5_MODI_OUT_NONE,
948         [REG_A] = MLX5_MODI_META_DATA_REG_A,
949         [REG_B] = MLX5_MODI_META_DATA_REG_B,
950         [REG_C_0] = MLX5_MODI_META_REG_C_0,
951         [REG_C_1] = MLX5_MODI_META_REG_C_1,
952         [REG_C_2] = MLX5_MODI_META_REG_C_2,
953         [REG_C_3] = MLX5_MODI_META_REG_C_3,
954         [REG_C_4] = MLX5_MODI_META_REG_C_4,
955         [REG_C_5] = MLX5_MODI_META_REG_C_5,
956         [REG_C_6] = MLX5_MODI_META_REG_C_6,
957         [REG_C_7] = MLX5_MODI_META_REG_C_7,
958 };
959
960 /**
961  * Convert register set to DV specification.
962  *
963  * @param[in,out] resource
964  *   Pointer to the modify-header resource.
965  * @param[in] action
966  *   Pointer to action specification.
967  * @param[out] error
968  *   Pointer to the error structure.
969  *
970  * @return
971  *   0 on success, a negative errno value otherwise and rte_errno is set.
972  */
973 static int
974 flow_dv_convert_action_set_reg
975                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
976                          const struct rte_flow_action *action,
977                          struct rte_flow_error *error)
978 {
979         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
980         struct mlx5_modification_cmd *actions = resource->actions;
981         uint32_t i = resource->actions_num;
982
983         if (i >= MLX5_MAX_MODIFY_NUM)
984                 return rte_flow_error_set(error, EINVAL,
985                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
986                                           "too many items to modify");
987         MLX5_ASSERT(conf->id != REG_NONE);
988         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
989         actions[i] = (struct mlx5_modification_cmd) {
990                 .action_type = MLX5_MODIFICATION_TYPE_SET,
991                 .field = reg_to_field[conf->id],
992         };
993         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
994         actions[i].data1 = rte_cpu_to_be_32(conf->data);
995         ++i;
996         resource->actions_num = i;
997         return 0;
998 }
999
1000 /**
1001  * Convert SET_TAG action to DV specification.
1002  *
1003  * @param[in] dev
1004  *   Pointer to the rte_eth_dev structure.
1005  * @param[in,out] resource
1006  *   Pointer to the modify-header resource.
1007  * @param[in] conf
1008  *   Pointer to action specification.
1009  * @param[out] error
1010  *   Pointer to the error structure.
1011  *
1012  * @return
1013  *   0 on success, a negative errno value otherwise and rte_errno is set.
1014  */
1015 static int
1016 flow_dv_convert_action_set_tag
1017                         (struct rte_eth_dev *dev,
1018                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1019                          const struct rte_flow_action_set_tag *conf,
1020                          struct rte_flow_error *error)
1021 {
1022         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1023         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1024         struct rte_flow_item item = {
1025                 .spec = &data,
1026                 .mask = &mask,
1027         };
1028         struct field_modify_info reg_c_x[] = {
1029                 [1] = {0, 0, 0},
1030         };
1031         enum mlx5_modification_field reg_type;
1032         int ret;
1033
1034         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1035         if (ret < 0)
1036                 return ret;
1037         MLX5_ASSERT(ret != REG_NONE);
1038         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1039         reg_type = reg_to_field[ret];
1040         MLX5_ASSERT(reg_type > 0);
1041         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1042         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1043                                              MLX5_MODIFICATION_TYPE_SET, error);
1044 }
1045
1046 /**
1047  * Convert internal COPY_REG action to DV specification.
1048  *
1049  * @param[in] dev
1050  *   Pointer to the rte_eth_dev structure.
1051  * @param[in,out] res
1052  *   Pointer to the modify-header resource.
1053  * @param[in] action
1054  *   Pointer to action specification.
1055  * @param[out] error
1056  *   Pointer to the error structure.
1057  *
1058  * @return
1059  *   0 on success, a negative errno value otherwise and rte_errno is set.
1060  */
1061 static int
1062 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1063                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1064                                  const struct rte_flow_action *action,
1065                                  struct rte_flow_error *error)
1066 {
1067         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1068         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1069         struct rte_flow_item item = {
1070                 .spec = NULL,
1071                 .mask = &mask,
1072         };
1073         struct field_modify_info reg_src[] = {
1074                 {4, 0, reg_to_field[conf->src]},
1075                 {0, 0, 0},
1076         };
1077         struct field_modify_info reg_dst = {
1078                 .offset = 0,
1079                 .id = reg_to_field[conf->dst],
1080         };
1081         /* Adjust reg_c[0] usage according to reported mask. */
1082         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1083                 struct mlx5_priv *priv = dev->data->dev_private;
1084                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1085
1086                 MLX5_ASSERT(reg_c0);
1087                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1088                 if (conf->dst == REG_C_0) {
1089                         /* Copy to reg_c[0], within mask only. */
1090                         reg_dst.offset = rte_bsf32(reg_c0);
1091                         /*
1092                          * Mask is ignoring the enianness, because
1093                          * there is no conversion in datapath.
1094                          */
1095 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1096                         /* Copy from destination lower bits to reg_c[0]. */
1097                         mask = reg_c0 >> reg_dst.offset;
1098 #else
1099                         /* Copy from destination upper bits to reg_c[0]. */
1100                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1101                                           rte_fls_u32(reg_c0));
1102 #endif
1103                 } else {
1104                         mask = rte_cpu_to_be_32(reg_c0);
1105 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1106                         /* Copy from reg_c[0] to destination lower bits. */
1107                         reg_dst.offset = 0;
1108 #else
1109                         /* Copy from reg_c[0] to destination upper bits. */
1110                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1111                                          (rte_fls_u32(reg_c0) -
1112                                           rte_bsf32(reg_c0));
1113 #endif
1114                 }
1115         }
1116         return flow_dv_convert_modify_action(&item,
1117                                              reg_src, &reg_dst, res,
1118                                              MLX5_MODIFICATION_TYPE_COPY,
1119                                              error);
1120 }
1121
1122 /**
1123  * Convert MARK action to DV specification. This routine is used
1124  * in extensive metadata only and requires metadata register to be
1125  * handled. In legacy mode hardware tag resource is engaged.
1126  *
1127  * @param[in] dev
1128  *   Pointer to the rte_eth_dev structure.
1129  * @param[in] conf
1130  *   Pointer to MARK action specification.
1131  * @param[in,out] resource
1132  *   Pointer to the modify-header resource.
1133  * @param[out] error
1134  *   Pointer to the error structure.
1135  *
1136  * @return
1137  *   0 on success, a negative errno value otherwise and rte_errno is set.
1138  */
1139 static int
1140 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1141                             const struct rte_flow_action_mark *conf,
1142                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1143                             struct rte_flow_error *error)
1144 {
1145         struct mlx5_priv *priv = dev->data->dev_private;
1146         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1147                                            priv->sh->dv_mark_mask);
1148         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1149         struct rte_flow_item item = {
1150                 .spec = &data,
1151                 .mask = &mask,
1152         };
1153         struct field_modify_info reg_c_x[] = {
1154                 {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
1155                 {0, 0, 0},
1156         };
1157         int reg;
1158
1159         if (!mask)
1160                 return rte_flow_error_set(error, EINVAL,
1161                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1162                                           NULL, "zero mark action mask");
1163         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1164         if (reg < 0)
1165                 return reg;
1166         MLX5_ASSERT(reg > 0);
1167         if (reg == REG_C_0) {
1168                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1169                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1170
1171                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1172                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1173                 mask = rte_cpu_to_be_32(mask << shl_c0);
1174         }
1175         reg_c_x[0].id = reg_to_field[reg];
1176         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1177                                              MLX5_MODIFICATION_TYPE_SET, error);
1178 }
1179
1180 /**
1181  * Get metadata register index for specified steering domain.
1182  *
1183  * @param[in] dev
1184  *   Pointer to the rte_eth_dev structure.
1185  * @param[in] attr
1186  *   Attributes of flow to determine steering domain.
1187  * @param[out] error
1188  *   Pointer to the error structure.
1189  *
1190  * @return
1191  *   positive index on success, a negative errno value otherwise
1192  *   and rte_errno is set.
1193  */
1194 static enum modify_reg
1195 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1196                          const struct rte_flow_attr *attr,
1197                          struct rte_flow_error *error)
1198 {
1199         int reg =
1200                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1201                                           MLX5_METADATA_FDB :
1202                                             attr->egress ?
1203                                             MLX5_METADATA_TX :
1204                                             MLX5_METADATA_RX, 0, error);
1205         if (reg < 0)
1206                 return rte_flow_error_set(error,
1207                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1208                                           NULL, "unavailable "
1209                                           "metadata register");
1210         return reg;
1211 }
1212
1213 /**
1214  * Convert SET_META action to DV specification.
1215  *
1216  * @param[in] dev
1217  *   Pointer to the rte_eth_dev structure.
1218  * @param[in,out] resource
1219  *   Pointer to the modify-header resource.
1220  * @param[in] attr
1221  *   Attributes of flow that includes this item.
1222  * @param[in] conf
1223  *   Pointer to action specification.
1224  * @param[out] error
1225  *   Pointer to the error structure.
1226  *
1227  * @return
1228  *   0 on success, a negative errno value otherwise and rte_errno is set.
1229  */
1230 static int
1231 flow_dv_convert_action_set_meta
1232                         (struct rte_eth_dev *dev,
1233                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1234                          const struct rte_flow_attr *attr,
1235                          const struct rte_flow_action_set_meta *conf,
1236                          struct rte_flow_error *error)
1237 {
1238         uint32_t data = conf->data;
1239         uint32_t mask = conf->mask;
1240         struct rte_flow_item item = {
1241                 .spec = &data,
1242                 .mask = &mask,
1243         };
1244         struct field_modify_info reg_c_x[] = {
1245                 [1] = {0, 0, 0},
1246         };
1247         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1248
1249         if (reg < 0)
1250                 return reg;
1251         /*
1252          * In datapath code there is no endianness
1253          * coversions for perfromance reasons, all
1254          * pattern conversions are done in rte_flow.
1255          */
1256         if (reg == REG_C_0) {
1257                 struct mlx5_priv *priv = dev->data->dev_private;
1258                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1259                 uint32_t shl_c0;
1260
1261                 MLX5_ASSERT(msk_c0);
1262 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1263                 shl_c0 = rte_bsf32(msk_c0);
1264 #else
1265                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1266 #endif
1267                 mask <<= shl_c0;
1268                 data <<= shl_c0;
1269                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1270         }
1271         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1272         /* The routine expects parameters in memory as big-endian ones. */
1273         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1274                                              MLX5_MODIFICATION_TYPE_SET, error);
1275 }
1276
1277 /**
1278  * Convert modify-header set IPv4 DSCP action to DV specification.
1279  *
1280  * @param[in,out] resource
1281  *   Pointer to the modify-header resource.
1282  * @param[in] action
1283  *   Pointer to action specification.
1284  * @param[out] error
1285  *   Pointer to the error structure.
1286  *
1287  * @return
1288  *   0 on success, a negative errno value otherwise and rte_errno is set.
1289  */
1290 static int
1291 flow_dv_convert_action_modify_ipv4_dscp
1292                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1293                          const struct rte_flow_action *action,
1294                          struct rte_flow_error *error)
1295 {
1296         const struct rte_flow_action_set_dscp *conf =
1297                 (const struct rte_flow_action_set_dscp *)(action->conf);
1298         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1299         struct rte_flow_item_ipv4 ipv4;
1300         struct rte_flow_item_ipv4 ipv4_mask;
1301
1302         memset(&ipv4, 0, sizeof(ipv4));
1303         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1304         ipv4.hdr.type_of_service = conf->dscp;
1305         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1306         item.spec = &ipv4;
1307         item.mask = &ipv4_mask;
1308         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1309                                              MLX5_MODIFICATION_TYPE_SET, error);
1310 }
1311
1312 /**
1313  * Convert modify-header set IPv6 DSCP action to DV specification.
1314  *
1315  * @param[in,out] resource
1316  *   Pointer to the modify-header resource.
1317  * @param[in] action
1318  *   Pointer to action specification.
1319  * @param[out] error
1320  *   Pointer to the error structure.
1321  *
1322  * @return
1323  *   0 on success, a negative errno value otherwise and rte_errno is set.
1324  */
1325 static int
1326 flow_dv_convert_action_modify_ipv6_dscp
1327                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1328                          const struct rte_flow_action *action,
1329                          struct rte_flow_error *error)
1330 {
1331         const struct rte_flow_action_set_dscp *conf =
1332                 (const struct rte_flow_action_set_dscp *)(action->conf);
1333         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1334         struct rte_flow_item_ipv6 ipv6;
1335         struct rte_flow_item_ipv6 ipv6_mask;
1336
1337         memset(&ipv6, 0, sizeof(ipv6));
1338         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1339         /*
1340          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1341          * rdma-core only accept the DSCP bits byte aligned start from
1342          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1343          * bits in IPv6 case as rdma-core requires byte aligned value.
1344          */
1345         ipv6.hdr.vtc_flow = conf->dscp;
1346         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1347         item.spec = &ipv6;
1348         item.mask = &ipv6_mask;
1349         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1350                                              MLX5_MODIFICATION_TYPE_SET, error);
1351 }
1352
1353 /**
1354  * Validate MARK item.
1355  *
1356  * @param[in] dev
1357  *   Pointer to the rte_eth_dev structure.
1358  * @param[in] item
1359  *   Item specification.
1360  * @param[in] attr
1361  *   Attributes of flow that includes this item.
1362  * @param[out] error
1363  *   Pointer to error structure.
1364  *
1365  * @return
1366  *   0 on success, a negative errno value otherwise and rte_errno is set.
1367  */
1368 static int
1369 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1370                            const struct rte_flow_item *item,
1371                            const struct rte_flow_attr *attr __rte_unused,
1372                            struct rte_flow_error *error)
1373 {
1374         struct mlx5_priv *priv = dev->data->dev_private;
1375         struct mlx5_dev_config *config = &priv->config;
1376         const struct rte_flow_item_mark *spec = item->spec;
1377         const struct rte_flow_item_mark *mask = item->mask;
1378         const struct rte_flow_item_mark nic_mask = {
1379                 .id = priv->sh->dv_mark_mask,
1380         };
1381         int ret;
1382
1383         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1384                 return rte_flow_error_set(error, ENOTSUP,
1385                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1386                                           "extended metadata feature"
1387                                           " isn't enabled");
1388         if (!mlx5_flow_ext_mreg_supported(dev))
1389                 return rte_flow_error_set(error, ENOTSUP,
1390                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1391                                           "extended metadata register"
1392                                           " isn't supported");
1393         if (!nic_mask.id)
1394                 return rte_flow_error_set(error, ENOTSUP,
1395                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1396                                           "extended metadata register"
1397                                           " isn't available");
1398         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1399         if (ret < 0)
1400                 return ret;
1401         if (!spec)
1402                 return rte_flow_error_set(error, EINVAL,
1403                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1404                                           item->spec,
1405                                           "data cannot be empty");
1406         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1407                 return rte_flow_error_set(error, EINVAL,
1408                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1409                                           &spec->id,
1410                                           "mark id exceeds the limit");
1411         if (!mask)
1412                 mask = &nic_mask;
1413         if (!mask->id)
1414                 return rte_flow_error_set(error, EINVAL,
1415                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1416                                         "mask cannot be zero");
1417
1418         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1419                                         (const uint8_t *)&nic_mask,
1420                                         sizeof(struct rte_flow_item_mark),
1421                                         error);
1422         if (ret < 0)
1423                 return ret;
1424         return 0;
1425 }
1426
1427 /**
1428  * Validate META item.
1429  *
1430  * @param[in] dev
1431  *   Pointer to the rte_eth_dev structure.
1432  * @param[in] item
1433  *   Item specification.
1434  * @param[in] attr
1435  *   Attributes of flow that includes this item.
1436  * @param[out] error
1437  *   Pointer to error structure.
1438  *
1439  * @return
1440  *   0 on success, a negative errno value otherwise and rte_errno is set.
1441  */
1442 static int
1443 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1444                            const struct rte_flow_item *item,
1445                            const struct rte_flow_attr *attr,
1446                            struct rte_flow_error *error)
1447 {
1448         struct mlx5_priv *priv = dev->data->dev_private;
1449         struct mlx5_dev_config *config = &priv->config;
1450         const struct rte_flow_item_meta *spec = item->spec;
1451         const struct rte_flow_item_meta *mask = item->mask;
1452         struct rte_flow_item_meta nic_mask = {
1453                 .data = UINT32_MAX
1454         };
1455         int reg;
1456         int ret;
1457
1458         if (!spec)
1459                 return rte_flow_error_set(error, EINVAL,
1460                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1461                                           item->spec,
1462                                           "data cannot be empty");
1463         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1464                 if (!mlx5_flow_ext_mreg_supported(dev))
1465                         return rte_flow_error_set(error, ENOTSUP,
1466                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1467                                           "extended metadata register"
1468                                           " isn't supported");
1469                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1470                 if (reg < 0)
1471                         return reg;
1472                 if (reg == REG_B)
1473                         return rte_flow_error_set(error, ENOTSUP,
1474                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1475                                           "match on reg_b "
1476                                           "isn't supported");
1477                 if (reg != REG_A)
1478                         nic_mask.data = priv->sh->dv_meta_mask;
1479         } else if (attr->transfer) {
1480                 return rte_flow_error_set(error, ENOTSUP,
1481                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1482                                         "extended metadata feature "
1483                                         "should be enabled when "
1484                                         "meta item is requested "
1485                                         "with e-switch mode ");
1486         }
1487         if (!mask)
1488                 mask = &rte_flow_item_meta_mask;
1489         if (!mask->data)
1490                 return rte_flow_error_set(error, EINVAL,
1491                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1492                                         "mask cannot be zero");
1493
1494         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1495                                         (const uint8_t *)&nic_mask,
1496                                         sizeof(struct rte_flow_item_meta),
1497                                         error);
1498         return ret;
1499 }
1500
1501 /**
1502  * Validate TAG item.
1503  *
1504  * @param[in] dev
1505  *   Pointer to the rte_eth_dev structure.
1506  * @param[in] item
1507  *   Item specification.
1508  * @param[in] attr
1509  *   Attributes of flow that includes this item.
1510  * @param[out] error
1511  *   Pointer to error structure.
1512  *
1513  * @return
1514  *   0 on success, a negative errno value otherwise and rte_errno is set.
1515  */
1516 static int
1517 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1518                           const struct rte_flow_item *item,
1519                           const struct rte_flow_attr *attr __rte_unused,
1520                           struct rte_flow_error *error)
1521 {
1522         const struct rte_flow_item_tag *spec = item->spec;
1523         const struct rte_flow_item_tag *mask = item->mask;
1524         const struct rte_flow_item_tag nic_mask = {
1525                 .data = RTE_BE32(UINT32_MAX),
1526                 .index = 0xff,
1527         };
1528         int ret;
1529
1530         if (!mlx5_flow_ext_mreg_supported(dev))
1531                 return rte_flow_error_set(error, ENOTSUP,
1532                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1533                                           "extensive metadata register"
1534                                           " isn't supported");
1535         if (!spec)
1536                 return rte_flow_error_set(error, EINVAL,
1537                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1538                                           item->spec,
1539                                           "data cannot be empty");
1540         if (!mask)
1541                 mask = &rte_flow_item_tag_mask;
1542         if (!mask->data)
1543                 return rte_flow_error_set(error, EINVAL,
1544                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1545                                         "mask cannot be zero");
1546
1547         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1548                                         (const uint8_t *)&nic_mask,
1549                                         sizeof(struct rte_flow_item_tag),
1550                                         error);
1551         if (ret < 0)
1552                 return ret;
1553         if (mask->index != 0xff)
1554                 return rte_flow_error_set(error, EINVAL,
1555                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1556                                           "partial mask for tag index"
1557                                           " is not supported");
1558         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1559         if (ret < 0)
1560                 return ret;
1561         MLX5_ASSERT(ret != REG_NONE);
1562         return 0;
1563 }
1564
1565 /**
1566  * Validate vport item.
1567  *
1568  * @param[in] dev
1569  *   Pointer to the rte_eth_dev structure.
1570  * @param[in] item
1571  *   Item specification.
1572  * @param[in] attr
1573  *   Attributes of flow that includes this item.
1574  * @param[in] item_flags
1575  *   Bit-fields that holds the items detected until now.
1576  * @param[out] error
1577  *   Pointer to error structure.
1578  *
1579  * @return
1580  *   0 on success, a negative errno value otherwise and rte_errno is set.
1581  */
1582 static int
1583 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1584                               const struct rte_flow_item *item,
1585                               const struct rte_flow_attr *attr,
1586                               uint64_t item_flags,
1587                               struct rte_flow_error *error)
1588 {
1589         const struct rte_flow_item_port_id *spec = item->spec;
1590         const struct rte_flow_item_port_id *mask = item->mask;
1591         const struct rte_flow_item_port_id switch_mask = {
1592                         .id = 0xffffffff,
1593         };
1594         struct mlx5_priv *esw_priv;
1595         struct mlx5_priv *dev_priv;
1596         int ret;
1597
1598         if (!attr->transfer)
1599                 return rte_flow_error_set(error, EINVAL,
1600                                           RTE_FLOW_ERROR_TYPE_ITEM,
1601                                           NULL,
1602                                           "match on port id is valid only"
1603                                           " when transfer flag is enabled");
1604         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1605                 return rte_flow_error_set(error, ENOTSUP,
1606                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1607                                           "multiple source ports are not"
1608                                           " supported");
1609         if (!mask)
1610                 mask = &switch_mask;
1611         if (mask->id != 0xffffffff)
1612                 return rte_flow_error_set(error, ENOTSUP,
1613                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1614                                            mask,
1615                                            "no support for partial mask on"
1616                                            " \"id\" field");
1617         ret = mlx5_flow_item_acceptable
1618                                 (item, (const uint8_t *)mask,
1619                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1620                                  sizeof(struct rte_flow_item_port_id),
1621                                  error);
1622         if (ret)
1623                 return ret;
1624         if (!spec)
1625                 return 0;
1626         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1627         if (!esw_priv)
1628                 return rte_flow_error_set(error, rte_errno,
1629                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1630                                           "failed to obtain E-Switch info for"
1631                                           " port");
1632         dev_priv = mlx5_dev_to_eswitch_info(dev);
1633         if (!dev_priv)
1634                 return rte_flow_error_set(error, rte_errno,
1635                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1636                                           NULL,
1637                                           "failed to obtain E-Switch info");
1638         if (esw_priv->domain_id != dev_priv->domain_id)
1639                 return rte_flow_error_set(error, EINVAL,
1640                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1641                                           "cannot match on a port from a"
1642                                           " different E-Switch");
1643         return 0;
1644 }
1645
1646 /**
1647  * Validate VLAN item.
1648  *
1649  * @param[in] item
1650  *   Item specification.
1651  * @param[in] item_flags
1652  *   Bit-fields that holds the items detected until now.
1653  * @param[in] dev
1654  *   Ethernet device flow is being created on.
1655  * @param[out] error
1656  *   Pointer to error structure.
1657  *
1658  * @return
1659  *   0 on success, a negative errno value otherwise and rte_errno is set.
1660  */
1661 static int
1662 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1663                            uint64_t item_flags,
1664                            struct rte_eth_dev *dev,
1665                            struct rte_flow_error *error)
1666 {
1667         const struct rte_flow_item_vlan *mask = item->mask;
1668         const struct rte_flow_item_vlan nic_mask = {
1669                 .tci = RTE_BE16(UINT16_MAX),
1670                 .inner_type = RTE_BE16(UINT16_MAX),
1671         };
1672         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1673         int ret;
1674         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1675                                         MLX5_FLOW_LAYER_INNER_L4) :
1676                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1677                                         MLX5_FLOW_LAYER_OUTER_L4);
1678         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1679                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1680
1681         if (item_flags & vlanm)
1682                 return rte_flow_error_set(error, EINVAL,
1683                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1684                                           "multiple VLAN layers not supported");
1685         else if ((item_flags & l34m) != 0)
1686                 return rte_flow_error_set(error, EINVAL,
1687                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1688                                           "VLAN cannot follow L3/L4 layer");
1689         if (!mask)
1690                 mask = &rte_flow_item_vlan_mask;
1691         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1692                                         (const uint8_t *)&nic_mask,
1693                                         sizeof(struct rte_flow_item_vlan),
1694                                         error);
1695         if (ret)
1696                 return ret;
1697         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1698                 struct mlx5_priv *priv = dev->data->dev_private;
1699
1700                 if (priv->vmwa_context) {
1701                         /*
1702                          * Non-NULL context means we have a virtual machine
1703                          * and SR-IOV enabled, we have to create VLAN interface
1704                          * to make hypervisor to setup E-Switch vport
1705                          * context correctly. We avoid creating the multiple
1706                          * VLAN interfaces, so we cannot support VLAN tag mask.
1707                          */
1708                         return rte_flow_error_set(error, EINVAL,
1709                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1710                                                   item,
1711                                                   "VLAN tag mask is not"
1712                                                   " supported in virtual"
1713                                                   " environment");
1714                 }
1715         }
1716         return 0;
1717 }
1718
1719 /*
1720  * GTP flags are contained in 1 byte of the format:
1721  * -------------------------------------------
1722  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1723  * |-----------------------------------------|
1724  * | value | Version | PT | Res | E | S | PN |
1725  * -------------------------------------------
1726  *
1727  * Matching is supported only for GTP flags E, S, PN.
1728  */
1729 #define MLX5_GTP_FLAGS_MASK     0x07
1730
1731 /**
1732  * Validate GTP item.
1733  *
1734  * @param[in] dev
1735  *   Pointer to the rte_eth_dev structure.
1736  * @param[in] item
1737  *   Item specification.
1738  * @param[in] item_flags
1739  *   Bit-fields that holds the items detected until now.
1740  * @param[out] error
1741  *   Pointer to error structure.
1742  *
1743  * @return
1744  *   0 on success, a negative errno value otherwise and rte_errno is set.
1745  */
1746 static int
1747 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1748                           const struct rte_flow_item *item,
1749                           uint64_t item_flags,
1750                           struct rte_flow_error *error)
1751 {
1752         struct mlx5_priv *priv = dev->data->dev_private;
1753         const struct rte_flow_item_gtp *spec = item->spec;
1754         const struct rte_flow_item_gtp *mask = item->mask;
1755         const struct rte_flow_item_gtp nic_mask = {
1756                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1757                 .msg_type = 0xff,
1758                 .teid = RTE_BE32(0xffffffff),
1759         };
1760
1761         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1762                 return rte_flow_error_set(error, ENOTSUP,
1763                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1764                                           "GTP support is not enabled");
1765         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1766                 return rte_flow_error_set(error, ENOTSUP,
1767                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1768                                           "multiple tunnel layers not"
1769                                           " supported");
1770         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1771                 return rte_flow_error_set(error, EINVAL,
1772                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1773                                           "no outer UDP layer found");
1774         if (!mask)
1775                 mask = &rte_flow_item_gtp_mask;
1776         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1777                 return rte_flow_error_set(error, ENOTSUP,
1778                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1779                                           "Match is supported for GTP"
1780                                           " flags only");
1781         return mlx5_flow_item_acceptable
1782                 (item, (const uint8_t *)mask,
1783                  (const uint8_t *)&nic_mask,
1784                  sizeof(struct rte_flow_item_gtp),
1785                  error);
1786 }
1787
1788 /**
1789  * Validate the pop VLAN action.
1790  *
1791  * @param[in] dev
1792  *   Pointer to the rte_eth_dev structure.
1793  * @param[in] action_flags
1794  *   Holds the actions detected until now.
1795  * @param[in] action
1796  *   Pointer to the pop vlan action.
1797  * @param[in] item_flags
1798  *   The items found in this flow rule.
1799  * @param[in] attr
1800  *   Pointer to flow attributes.
1801  * @param[out] error
1802  *   Pointer to error structure.
1803  *
1804  * @return
1805  *   0 on success, a negative errno value otherwise and rte_errno is set.
1806  */
1807 static int
1808 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1809                                  uint64_t action_flags,
1810                                  const struct rte_flow_action *action,
1811                                  uint64_t item_flags,
1812                                  const struct rte_flow_attr *attr,
1813                                  struct rte_flow_error *error)
1814 {
1815         const struct mlx5_priv *priv = dev->data->dev_private;
1816
1817         (void)action;
1818         (void)attr;
1819         if (!priv->sh->pop_vlan_action)
1820                 return rte_flow_error_set(error, ENOTSUP,
1821                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1822                                           NULL,
1823                                           "pop vlan action is not supported");
1824         if (attr->egress)
1825                 return rte_flow_error_set(error, ENOTSUP,
1826                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1827                                           NULL,
1828                                           "pop vlan action not supported for "
1829                                           "egress");
1830         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1831                 return rte_flow_error_set(error, ENOTSUP,
1832                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1833                                           "no support for multiple VLAN "
1834                                           "actions");
1835         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1836                 return rte_flow_error_set(error, ENOTSUP,
1837                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1838                                           NULL,
1839                                           "cannot pop vlan without a "
1840                                           "match on (outer) vlan in the flow");
1841         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1842                 return rte_flow_error_set(error, EINVAL,
1843                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1844                                           "wrong action order, port_id should "
1845                                           "be after pop VLAN action");
1846         if (!attr->transfer && priv->representor)
1847                 return rte_flow_error_set(error, ENOTSUP,
1848                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1849                                           "pop vlan action for VF representor "
1850                                           "not supported on NIC table");
1851         return 0;
1852 }
1853
1854 /**
1855  * Get VLAN default info from vlan match info.
1856  *
1857  * @param[in] items
1858  *   the list of item specifications.
1859  * @param[out] vlan
1860  *   pointer VLAN info to fill to.
1861  *
1862  * @return
1863  *   0 on success, a negative errno value otherwise and rte_errno is set.
1864  */
1865 static void
1866 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1867                                   struct rte_vlan_hdr *vlan)
1868 {
1869         const struct rte_flow_item_vlan nic_mask = {
1870                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1871                                 MLX5DV_FLOW_VLAN_VID_MASK),
1872                 .inner_type = RTE_BE16(0xffff),
1873         };
1874
1875         if (items == NULL)
1876                 return;
1877         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1878                 int type = items->type;
1879
1880                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
1881                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
1882                         break;
1883         }
1884         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
1885                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1886                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1887
1888                 /* If VLAN item in pattern doesn't contain data, return here. */
1889                 if (!vlan_v)
1890                         return;
1891                 if (!vlan_m)
1892                         vlan_m = &nic_mask;
1893                 /* Only full match values are accepted */
1894                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1895                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1896                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
1897                         vlan->vlan_tci |=
1898                                 rte_be_to_cpu_16(vlan_v->tci &
1899                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1900                 }
1901                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1902                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1903                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1904                         vlan->vlan_tci |=
1905                                 rte_be_to_cpu_16(vlan_v->tci &
1906                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1907                 }
1908                 if (vlan_m->inner_type == nic_mask.inner_type)
1909                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1910                                                            vlan_m->inner_type);
1911         }
1912 }
1913
1914 /**
1915  * Validate the push VLAN action.
1916  *
1917  * @param[in] dev
1918  *   Pointer to the rte_eth_dev structure.
1919  * @param[in] action_flags
1920  *   Holds the actions detected until now.
1921  * @param[in] item_flags
1922  *   The items found in this flow rule.
1923  * @param[in] action
1924  *   Pointer to the action structure.
1925  * @param[in] attr
1926  *   Pointer to flow attributes
1927  * @param[out] error
1928  *   Pointer to error structure.
1929  *
1930  * @return
1931  *   0 on success, a negative errno value otherwise and rte_errno is set.
1932  */
1933 static int
1934 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
1935                                   uint64_t action_flags,
1936                                   const struct rte_flow_item_vlan *vlan_m,
1937                                   const struct rte_flow_action *action,
1938                                   const struct rte_flow_attr *attr,
1939                                   struct rte_flow_error *error)
1940 {
1941         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1942         const struct mlx5_priv *priv = dev->data->dev_private;
1943
1944         if (!attr->transfer && attr->ingress)
1945                 return rte_flow_error_set(error, ENOTSUP,
1946                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1947                                           NULL,
1948                                           "push VLAN action not supported for "
1949                                           "ingress");
1950         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1951             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1952                 return rte_flow_error_set(error, EINVAL,
1953                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1954                                           "invalid vlan ethertype");
1955         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1956                 return rte_flow_error_set(error, ENOTSUP,
1957                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1958                                           "no support for multiple VLAN "
1959                                           "actions");
1960         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1961                 return rte_flow_error_set(error, EINVAL,
1962                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1963                                           "wrong action order, port_id should "
1964                                           "be after push VLAN");
1965         if (!attr->transfer && priv->representor)
1966                 return rte_flow_error_set(error, ENOTSUP,
1967                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1968                                           "push vlan action for VF representor "
1969                                           "not supported on NIC table");
1970         if (vlan_m &&
1971             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
1972             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
1973                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
1974             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
1975             !(mlx5_flow_find_action
1976                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
1977                 return rte_flow_error_set(error, EINVAL,
1978                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1979                                           "not full match mask on VLAN PCP and "
1980                                           "there is no of_set_vlan_pcp action, "
1981                                           "push VLAN action cannot figure out "
1982                                           "PCP value");
1983         if (vlan_m &&
1984             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
1985             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
1986                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
1987             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
1988             !(mlx5_flow_find_action
1989                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
1990                 return rte_flow_error_set(error, EINVAL,
1991                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1992                                           "not full match mask on VLAN VID and "
1993                                           "there is no of_set_vlan_vid action, "
1994                                           "push VLAN action cannot figure out "
1995                                           "VID value");
1996         (void)attr;
1997         return 0;
1998 }
1999
2000 /**
2001  * Validate the set VLAN PCP.
2002  *
2003  * @param[in] action_flags
2004  *   Holds the actions detected until now.
2005  * @param[in] actions
2006  *   Pointer to the list of actions remaining in the flow rule.
2007  * @param[out] error
2008  *   Pointer to error structure.
2009  *
2010  * @return
2011  *   0 on success, a negative errno value otherwise and rte_errno is set.
2012  */
2013 static int
2014 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2015                                      const struct rte_flow_action actions[],
2016                                      struct rte_flow_error *error)
2017 {
2018         const struct rte_flow_action *action = actions;
2019         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2020
2021         if (conf->vlan_pcp > 7)
2022                 return rte_flow_error_set(error, EINVAL,
2023                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2024                                           "VLAN PCP value is too big");
2025         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2026                 return rte_flow_error_set(error, ENOTSUP,
2027                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2028                                           "set VLAN PCP action must follow "
2029                                           "the push VLAN action");
2030         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2031                 return rte_flow_error_set(error, ENOTSUP,
2032                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2033                                           "Multiple VLAN PCP modification are "
2034                                           "not supported");
2035         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2036                 return rte_flow_error_set(error, EINVAL,
2037                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2038                                           "wrong action order, port_id should "
2039                                           "be after set VLAN PCP");
2040         return 0;
2041 }
2042
2043 /**
2044  * Validate the set VLAN VID.
2045  *
2046  * @param[in] item_flags
2047  *   Holds the items detected in this rule.
2048  * @param[in] action_flags
2049  *   Holds the actions detected until now.
2050  * @param[in] actions
2051  *   Pointer to the list of actions remaining in the flow rule.
2052  * @param[out] error
2053  *   Pointer to error structure.
2054  *
2055  * @return
2056  *   0 on success, a negative errno value otherwise and rte_errno is set.
2057  */
2058 static int
2059 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2060                                      uint64_t action_flags,
2061                                      const struct rte_flow_action actions[],
2062                                      struct rte_flow_error *error)
2063 {
2064         const struct rte_flow_action *action = actions;
2065         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2066
2067         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2068                 return rte_flow_error_set(error, EINVAL,
2069                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2070                                           "VLAN VID value is too big");
2071         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2072             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2073                 return rte_flow_error_set(error, ENOTSUP,
2074                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2075                                           "set VLAN VID action must follow push"
2076                                           " VLAN action or match on VLAN item");
2077         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2078                 return rte_flow_error_set(error, ENOTSUP,
2079                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2080                                           "Multiple VLAN VID modifications are "
2081                                           "not supported");
2082         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2083                 return rte_flow_error_set(error, EINVAL,
2084                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2085                                           "wrong action order, port_id should "
2086                                           "be after set VLAN VID");
2087         return 0;
2088 }
2089
2090 /*
2091  * Validate the FLAG action.
2092  *
2093  * @param[in] dev
2094  *   Pointer to the rte_eth_dev structure.
2095  * @param[in] action_flags
2096  *   Holds the actions detected until now.
2097  * @param[in] attr
2098  *   Pointer to flow attributes
2099  * @param[out] error
2100  *   Pointer to error structure.
2101  *
2102  * @return
2103  *   0 on success, a negative errno value otherwise and rte_errno is set.
2104  */
2105 static int
2106 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2107                              uint64_t action_flags,
2108                              const struct rte_flow_attr *attr,
2109                              struct rte_flow_error *error)
2110 {
2111         struct mlx5_priv *priv = dev->data->dev_private;
2112         struct mlx5_dev_config *config = &priv->config;
2113         int ret;
2114
2115         /* Fall back if no extended metadata register support. */
2116         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2117                 return mlx5_flow_validate_action_flag(action_flags, attr,
2118                                                       error);
2119         /* Extensive metadata mode requires registers. */
2120         if (!mlx5_flow_ext_mreg_supported(dev))
2121                 return rte_flow_error_set(error, ENOTSUP,
2122                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2123                                           "no metadata registers "
2124                                           "to support flag action");
2125         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2126                 return rte_flow_error_set(error, ENOTSUP,
2127                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2128                                           "extended metadata register"
2129                                           " isn't available");
2130         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2131         if (ret < 0)
2132                 return ret;
2133         MLX5_ASSERT(ret > 0);
2134         if (action_flags & MLX5_FLOW_ACTION_MARK)
2135                 return rte_flow_error_set(error, EINVAL,
2136                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2137                                           "can't mark and flag in same flow");
2138         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2139                 return rte_flow_error_set(error, EINVAL,
2140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2141                                           "can't have 2 flag"
2142                                           " actions in same flow");
2143         return 0;
2144 }
2145
2146 /**
2147  * Validate MARK action.
2148  *
2149  * @param[in] dev
2150  *   Pointer to the rte_eth_dev structure.
2151  * @param[in] action
2152  *   Pointer to action.
2153  * @param[in] action_flags
2154  *   Holds the actions detected until now.
2155  * @param[in] attr
2156  *   Pointer to flow attributes
2157  * @param[out] error
2158  *   Pointer to error structure.
2159  *
2160  * @return
2161  *   0 on success, a negative errno value otherwise and rte_errno is set.
2162  */
2163 static int
2164 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2165                              const struct rte_flow_action *action,
2166                              uint64_t action_flags,
2167                              const struct rte_flow_attr *attr,
2168                              struct rte_flow_error *error)
2169 {
2170         struct mlx5_priv *priv = dev->data->dev_private;
2171         struct mlx5_dev_config *config = &priv->config;
2172         const struct rte_flow_action_mark *mark = action->conf;
2173         int ret;
2174
2175         /* Fall back if no extended metadata register support. */
2176         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2177                 return mlx5_flow_validate_action_mark(action, action_flags,
2178                                                       attr, error);
2179         /* Extensive metadata mode requires registers. */
2180         if (!mlx5_flow_ext_mreg_supported(dev))
2181                 return rte_flow_error_set(error, ENOTSUP,
2182                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2183                                           "no metadata registers "
2184                                           "to support mark action");
2185         if (!priv->sh->dv_mark_mask)
2186                 return rte_flow_error_set(error, ENOTSUP,
2187                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2188                                           "extended metadata register"
2189                                           " isn't available");
2190         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2191         if (ret < 0)
2192                 return ret;
2193         MLX5_ASSERT(ret > 0);
2194         if (!mark)
2195                 return rte_flow_error_set(error, EINVAL,
2196                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2197                                           "configuration cannot be null");
2198         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2199                 return rte_flow_error_set(error, EINVAL,
2200                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2201                                           &mark->id,
2202                                           "mark id exceeds the limit");
2203         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2204                 return rte_flow_error_set(error, EINVAL,
2205                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2206                                           "can't flag and mark in same flow");
2207         if (action_flags & MLX5_FLOW_ACTION_MARK)
2208                 return rte_flow_error_set(error, EINVAL,
2209                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2210                                           "can't have 2 mark actions in same"
2211                                           " flow");
2212         return 0;
2213 }
2214
2215 /**
2216  * Validate SET_META action.
2217  *
2218  * @param[in] dev
2219  *   Pointer to the rte_eth_dev structure.
2220  * @param[in] action
2221  *   Pointer to the action structure.
2222  * @param[in] action_flags
2223  *   Holds the actions detected until now.
2224  * @param[in] attr
2225  *   Pointer to flow attributes
2226  * @param[out] error
2227  *   Pointer to error structure.
2228  *
2229  * @return
2230  *   0 on success, a negative errno value otherwise and rte_errno is set.
2231  */
2232 static int
2233 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2234                                  const struct rte_flow_action *action,
2235                                  uint64_t action_flags __rte_unused,
2236                                  const struct rte_flow_attr *attr,
2237                                  struct rte_flow_error *error)
2238 {
2239         const struct rte_flow_action_set_meta *conf;
2240         uint32_t nic_mask = UINT32_MAX;
2241         int reg;
2242
2243         if (!mlx5_flow_ext_mreg_supported(dev))
2244                 return rte_flow_error_set(error, ENOTSUP,
2245                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2246                                           "extended metadata register"
2247                                           " isn't supported");
2248         reg = flow_dv_get_metadata_reg(dev, attr, error);
2249         if (reg < 0)
2250                 return reg;
2251         if (reg != REG_A && reg != REG_B) {
2252                 struct mlx5_priv *priv = dev->data->dev_private;
2253
2254                 nic_mask = priv->sh->dv_meta_mask;
2255         }
2256         if (!(action->conf))
2257                 return rte_flow_error_set(error, EINVAL,
2258                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2259                                           "configuration cannot be null");
2260         conf = (const struct rte_flow_action_set_meta *)action->conf;
2261         if (!conf->mask)
2262                 return rte_flow_error_set(error, EINVAL,
2263                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2264                                           "zero mask doesn't have any effect");
2265         if (conf->mask & ~nic_mask)
2266                 return rte_flow_error_set(error, EINVAL,
2267                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2268                                           "meta data must be within reg C0");
2269         return 0;
2270 }
2271
2272 /**
2273  * Validate SET_TAG action.
2274  *
2275  * @param[in] dev
2276  *   Pointer to the rte_eth_dev structure.
2277  * @param[in] action
2278  *   Pointer to the action structure.
2279  * @param[in] action_flags
2280  *   Holds the actions detected until now.
2281  * @param[in] attr
2282  *   Pointer to flow attributes
2283  * @param[out] error
2284  *   Pointer to error structure.
2285  *
2286  * @return
2287  *   0 on success, a negative errno value otherwise and rte_errno is set.
2288  */
2289 static int
2290 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2291                                 const struct rte_flow_action *action,
2292                                 uint64_t action_flags,
2293                                 const struct rte_flow_attr *attr,
2294                                 struct rte_flow_error *error)
2295 {
2296         const struct rte_flow_action_set_tag *conf;
2297         const uint64_t terminal_action_flags =
2298                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2299                 MLX5_FLOW_ACTION_RSS;
2300         int ret;
2301
2302         if (!mlx5_flow_ext_mreg_supported(dev))
2303                 return rte_flow_error_set(error, ENOTSUP,
2304                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2305                                           "extensive metadata register"
2306                                           " isn't supported");
2307         if (!(action->conf))
2308                 return rte_flow_error_set(error, EINVAL,
2309                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2310                                           "configuration cannot be null");
2311         conf = (const struct rte_flow_action_set_tag *)action->conf;
2312         if (!conf->mask)
2313                 return rte_flow_error_set(error, EINVAL,
2314                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2315                                           "zero mask doesn't have any effect");
2316         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2317         if (ret < 0)
2318                 return ret;
2319         if (!attr->transfer && attr->ingress &&
2320             (action_flags & terminal_action_flags))
2321                 return rte_flow_error_set(error, EINVAL,
2322                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2323                                           "set_tag has no effect"
2324                                           " with terminal actions");
2325         return 0;
2326 }
2327
2328 /**
2329  * Validate count action.
2330  *
2331  * @param[in] dev
2332  *   Pointer to rte_eth_dev structure.
2333  * @param[out] error
2334  *   Pointer to error structure.
2335  *
2336  * @return
2337  *   0 on success, a negative errno value otherwise and rte_errno is set.
2338  */
2339 static int
2340 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2341                               struct rte_flow_error *error)
2342 {
2343         struct mlx5_priv *priv = dev->data->dev_private;
2344
2345         if (!priv->config.devx)
2346                 goto notsup_err;
2347 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2348         return 0;
2349 #endif
2350 notsup_err:
2351         return rte_flow_error_set
2352                       (error, ENOTSUP,
2353                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2354                        NULL,
2355                        "count action not supported");
2356 }
2357
2358 /**
2359  * Validate the L2 encap action.
2360  *
2361  * @param[in] dev
2362  *   Pointer to the rte_eth_dev structure.
2363  * @param[in] action_flags
2364  *   Holds the actions detected until now.
2365  * @param[in] action
2366  *   Pointer to the action structure.
2367  * @param[in] attr
2368  *   Pointer to flow attributes.
2369  * @param[out] error
2370  *   Pointer to error structure.
2371  *
2372  * @return
2373  *   0 on success, a negative errno value otherwise and rte_errno is set.
2374  */
2375 static int
2376 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2377                                  uint64_t action_flags,
2378                                  const struct rte_flow_action *action,
2379                                  const struct rte_flow_attr *attr,
2380                                  struct rte_flow_error *error)
2381 {
2382         const struct mlx5_priv *priv = dev->data->dev_private;
2383
2384         if (!(action->conf))
2385                 return rte_flow_error_set(error, EINVAL,
2386                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2387                                           "configuration cannot be null");
2388         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2389                 return rte_flow_error_set(error, EINVAL,
2390                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2391                                           "can only have a single encap action "
2392                                           "in a flow");
2393         if (!attr->transfer && priv->representor)
2394                 return rte_flow_error_set(error, ENOTSUP,
2395                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2396                                           "encap action for VF representor "
2397                                           "not supported on NIC table");
2398         return 0;
2399 }
2400
2401 /**
2402  * Validate a decap action.
2403  *
2404  * @param[in] dev
2405  *   Pointer to the rte_eth_dev structure.
2406  * @param[in] action_flags
2407  *   Holds the actions detected until now.
2408  * @param[in] attr
2409  *   Pointer to flow attributes
2410  * @param[out] error
2411  *   Pointer to error structure.
2412  *
2413  * @return
2414  *   0 on success, a negative errno value otherwise and rte_errno is set.
2415  */
2416 static int
2417 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2418                               uint64_t action_flags,
2419                               const struct rte_flow_attr *attr,
2420                               struct rte_flow_error *error)
2421 {
2422         const struct mlx5_priv *priv = dev->data->dev_private;
2423
2424         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2425             !priv->config.decap_en)
2426                 return rte_flow_error_set(error, ENOTSUP,
2427                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2428                                           "decap is not enabled");
2429         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2430                 return rte_flow_error_set(error, ENOTSUP,
2431                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2432                                           action_flags &
2433                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2434                                           "have a single decap action" : "decap "
2435                                           "after encap is not supported");
2436         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2437                 return rte_flow_error_set(error, EINVAL,
2438                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2439                                           "can't have decap action after"
2440                                           " modify action");
2441         if (attr->egress)
2442                 return rte_flow_error_set(error, ENOTSUP,
2443                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2444                                           NULL,
2445                                           "decap action not supported for "
2446                                           "egress");
2447         if (!attr->transfer && priv->representor)
2448                 return rte_flow_error_set(error, ENOTSUP,
2449                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2450                                           "decap action for VF representor "
2451                                           "not supported on NIC table");
2452         return 0;
2453 }
2454
2455 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2456
2457 /**
2458  * Validate the raw encap and decap actions.
2459  *
2460  * @param[in] dev
2461  *   Pointer to the rte_eth_dev structure.
2462  * @param[in] decap
2463  *   Pointer to the decap action.
2464  * @param[in] encap
2465  *   Pointer to the encap action.
2466  * @param[in] attr
2467  *   Pointer to flow attributes
2468  * @param[in/out] action_flags
2469  *   Holds the actions detected until now.
2470  * @param[out] actions_n
2471  *   pointer to the number of actions counter.
2472  * @param[out] error
2473  *   Pointer to error structure.
2474  *
2475  * @return
2476  *   0 on success, a negative errno value otherwise and rte_errno is set.
2477  */
2478 static int
2479 flow_dv_validate_action_raw_encap_decap
2480         (struct rte_eth_dev *dev,
2481          const struct rte_flow_action_raw_decap *decap,
2482          const struct rte_flow_action_raw_encap *encap,
2483          const struct rte_flow_attr *attr, uint64_t *action_flags,
2484          int *actions_n, struct rte_flow_error *error)
2485 {
2486         const struct mlx5_priv *priv = dev->data->dev_private;
2487         int ret;
2488
2489         if (encap && (!encap->size || !encap->data))
2490                 return rte_flow_error_set(error, EINVAL,
2491                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2492                                           "raw encap data cannot be empty");
2493         if (decap && encap) {
2494                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2495                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2496                         /* L3 encap. */
2497                         decap = NULL;
2498                 else if (encap->size <=
2499                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2500                            decap->size >
2501                            MLX5_ENCAPSULATION_DECISION_SIZE)
2502                         /* L3 decap. */
2503                         encap = NULL;
2504                 else if (encap->size >
2505                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2506                            decap->size >
2507                            MLX5_ENCAPSULATION_DECISION_SIZE)
2508                         /* 2 L2 actions: encap and decap. */
2509                         ;
2510                 else
2511                         return rte_flow_error_set(error,
2512                                 ENOTSUP,
2513                                 RTE_FLOW_ERROR_TYPE_ACTION,
2514                                 NULL, "unsupported too small "
2515                                 "raw decap and too small raw "
2516                                 "encap combination");
2517         }
2518         if (decap) {
2519                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2520                                                     error);
2521                 if (ret < 0)
2522                         return ret;
2523                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2524                 ++(*actions_n);
2525         }
2526         if (encap) {
2527                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2528                         return rte_flow_error_set(error, ENOTSUP,
2529                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2530                                                   NULL,
2531                                                   "small raw encap size");
2532                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2533                         return rte_flow_error_set(error, EINVAL,
2534                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2535                                                   NULL,
2536                                                   "more than one encap action");
2537                 if (!attr->transfer && priv->representor)
2538                         return rte_flow_error_set
2539                                         (error, ENOTSUP,
2540                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2541                                          "encap action for VF representor "
2542                                          "not supported on NIC table");
2543                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2544                 ++(*actions_n);
2545         }
2546         return 0;
2547 }
2548
2549 /**
2550  * Find existing encap/decap resource or create and register a new one.
2551  *
2552  * @param[in, out] dev
2553  *   Pointer to rte_eth_dev structure.
2554  * @param[in, out] resource
2555  *   Pointer to encap/decap resource.
2556  * @parm[in, out] dev_flow
2557  *   Pointer to the dev_flow.
2558  * @param[out] error
2559  *   pointer to error structure.
2560  *
2561  * @return
2562  *   0 on success otherwise -errno and errno is set.
2563  */
2564 static int
2565 flow_dv_encap_decap_resource_register
2566                         (struct rte_eth_dev *dev,
2567                          struct mlx5_flow_dv_encap_decap_resource *resource,
2568                          struct mlx5_flow *dev_flow,
2569                          struct rte_flow_error *error)
2570 {
2571         struct mlx5_priv *priv = dev->data->dev_private;
2572         struct mlx5_dev_ctx_shared *sh = priv->sh;
2573         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2574         struct mlx5dv_dr_domain *domain;
2575         uint32_t idx = 0;
2576         int ret;
2577
2578         resource->flags = dev_flow->dv.group ? 0 : 1;
2579         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2580                 domain = sh->fdb_domain;
2581         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2582                 domain = sh->rx_domain;
2583         else
2584                 domain = sh->tx_domain;
2585         /* Lookup a matching resource from cache. */
2586         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx,
2587                       cache_resource, next) {
2588                 if (resource->reformat_type == cache_resource->reformat_type &&
2589                     resource->ft_type == cache_resource->ft_type &&
2590                     resource->flags == cache_resource->flags &&
2591                     resource->size == cache_resource->size &&
2592                     !memcmp((const void *)resource->buf,
2593                             (const void *)cache_resource->buf,
2594                             resource->size)) {
2595                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2596                                 (void *)cache_resource,
2597                                 rte_atomic32_read(&cache_resource->refcnt));
2598                         rte_atomic32_inc(&cache_resource->refcnt);
2599                         dev_flow->handle->dvh.rix_encap_decap = idx;
2600                         dev_flow->dv.encap_decap = cache_resource;
2601                         return 0;
2602                 }
2603         }
2604         /* Register new encap/decap resource. */
2605         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2606                                        &dev_flow->handle->dvh.rix_encap_decap);
2607         if (!cache_resource)
2608                 return rte_flow_error_set(error, ENOMEM,
2609                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2610                                           "cannot allocate resource memory");
2611         *cache_resource = *resource;
2612         ret = mlx5_flow_os_create_flow_action_packet_reformat
2613                                         (sh->ctx, domain, cache_resource,
2614                                          &cache_resource->action);
2615         if (ret) {
2616                 mlx5_free(cache_resource);
2617                 return rte_flow_error_set(error, ENOMEM,
2618                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2619                                           NULL, "cannot create action");
2620         }
2621         rte_atomic32_init(&cache_resource->refcnt);
2622         rte_atomic32_inc(&cache_resource->refcnt);
2623         ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
2624                      dev_flow->handle->dvh.rix_encap_decap, cache_resource,
2625                      next);
2626         dev_flow->dv.encap_decap = cache_resource;
2627         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2628                 (void *)cache_resource,
2629                 rte_atomic32_read(&cache_resource->refcnt));
2630         return 0;
2631 }
2632
2633 /**
2634  * Find existing table jump resource or create and register a new one.
2635  *
2636  * @param[in, out] dev
2637  *   Pointer to rte_eth_dev structure.
2638  * @param[in, out] tbl
2639  *   Pointer to flow table resource.
2640  * @parm[in, out] dev_flow
2641  *   Pointer to the dev_flow.
2642  * @param[out] error
2643  *   pointer to error structure.
2644  *
2645  * @return
2646  *   0 on success otherwise -errno and errno is set.
2647  */
2648 static int
2649 flow_dv_jump_tbl_resource_register
2650                         (struct rte_eth_dev *dev __rte_unused,
2651                          struct mlx5_flow_tbl_resource *tbl,
2652                          struct mlx5_flow *dev_flow,
2653                          struct rte_flow_error *error)
2654 {
2655         struct mlx5_flow_tbl_data_entry *tbl_data =
2656                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2657         int cnt, ret;
2658
2659         MLX5_ASSERT(tbl);
2660         cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
2661         if (!cnt) {
2662                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
2663                                 (tbl->obj, &tbl_data->jump.action);
2664                 if (ret)
2665                         return rte_flow_error_set(error, ENOMEM,
2666                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2667                                         NULL, "cannot create jump action");
2668                 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2669                         (void *)&tbl_data->jump, cnt);
2670         } else {
2671                 /* old jump should not make the table ref++. */
2672                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
2673                 MLX5_ASSERT(tbl_data->jump.action);
2674                 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2675                         (void *)&tbl_data->jump, cnt);
2676         }
2677         rte_atomic32_inc(&tbl_data->jump.refcnt);
2678         dev_flow->handle->rix_jump = tbl_data->idx;
2679         dev_flow->dv.jump = &tbl_data->jump;
2680         return 0;
2681 }
2682
2683 /**
2684  * Find existing default miss resource or create and register a new one.
2685  *
2686  * @param[in, out] dev
2687  *   Pointer to rte_eth_dev structure.
2688  * @param[out] error
2689  *   pointer to error structure.
2690  *
2691  * @return
2692  *   0 on success otherwise -errno and errno is set.
2693  */
2694 static int
2695 flow_dv_default_miss_resource_register(struct rte_eth_dev *dev,
2696                 struct rte_flow_error *error)
2697 {
2698         struct mlx5_priv *priv = dev->data->dev_private;
2699         struct mlx5_dev_ctx_shared *sh = priv->sh;
2700         struct mlx5_flow_default_miss_resource *cache_resource =
2701                         &sh->default_miss;
2702         int cnt = rte_atomic32_read(&cache_resource->refcnt);
2703
2704         if (!cnt) {
2705                 MLX5_ASSERT(cache_resource->action);
2706                 cache_resource->action =
2707                 mlx5_glue->dr_create_flow_action_default_miss();
2708                 if (!cache_resource->action)
2709                         return rte_flow_error_set(error, ENOMEM,
2710                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2711                                         "cannot create default miss action");
2712                 DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++",
2713                                 (void *)cache_resource->action, cnt);
2714         }
2715         rte_atomic32_inc(&cache_resource->refcnt);
2716         return 0;
2717 }
2718
2719 /**
2720  * Find existing table port ID resource or create and register a new one.
2721  *
2722  * @param[in, out] dev
2723  *   Pointer to rte_eth_dev structure.
2724  * @param[in, out] resource
2725  *   Pointer to port ID action resource.
2726  * @parm[in, out] dev_flow
2727  *   Pointer to the dev_flow.
2728  * @param[out] error
2729  *   pointer to error structure.
2730  *
2731  * @return
2732  *   0 on success otherwise -errno and errno is set.
2733  */
2734 static int
2735 flow_dv_port_id_action_resource_register
2736                         (struct rte_eth_dev *dev,
2737                          struct mlx5_flow_dv_port_id_action_resource *resource,
2738                          struct mlx5_flow *dev_flow,
2739                          struct rte_flow_error *error)
2740 {
2741         struct mlx5_priv *priv = dev->data->dev_private;
2742         struct mlx5_dev_ctx_shared *sh = priv->sh;
2743         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2744         uint32_t idx = 0;
2745         int ret;
2746
2747         /* Lookup a matching resource from cache. */
2748         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
2749                       idx, cache_resource, next) {
2750                 if (resource->port_id == cache_resource->port_id) {
2751                         DRV_LOG(DEBUG, "port id action resource resource %p: "
2752                                 "refcnt %d++",
2753                                 (void *)cache_resource,
2754                                 rte_atomic32_read(&cache_resource->refcnt));
2755                         rte_atomic32_inc(&cache_resource->refcnt);
2756                         dev_flow->handle->rix_port_id_action = idx;
2757                         dev_flow->dv.port_id_action = cache_resource;
2758                         return 0;
2759                 }
2760         }
2761         /* Register new port id action resource. */
2762         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
2763                                        &dev_flow->handle->rix_port_id_action);
2764         if (!cache_resource)
2765                 return rte_flow_error_set(error, ENOMEM,
2766                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2767                                           "cannot allocate resource memory");
2768         *cache_resource = *resource;
2769         ret = mlx5_flow_os_create_flow_action_dest_port
2770                                 (priv->sh->fdb_domain, resource->port_id,
2771                                  &cache_resource->action);
2772         if (ret) {
2773                 mlx5_free(cache_resource);
2774                 return rte_flow_error_set(error, ENOMEM,
2775                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2776                                           NULL, "cannot create action");
2777         }
2778         rte_atomic32_init(&cache_resource->refcnt);
2779         rte_atomic32_inc(&cache_resource->refcnt);
2780         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
2781                      dev_flow->handle->rix_port_id_action, cache_resource,
2782                      next);
2783         dev_flow->dv.port_id_action = cache_resource;
2784         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
2785                 (void *)cache_resource,
2786                 rte_atomic32_read(&cache_resource->refcnt));
2787         return 0;
2788 }
2789
2790 /**
2791  * Find existing push vlan resource or create and register a new one.
2792  *
2793  * @param [in, out] dev
2794  *   Pointer to rte_eth_dev structure.
2795  * @param[in, out] resource
2796  *   Pointer to port ID action resource.
2797  * @parm[in, out] dev_flow
2798  *   Pointer to the dev_flow.
2799  * @param[out] error
2800  *   pointer to error structure.
2801  *
2802  * @return
2803  *   0 on success otherwise -errno and errno is set.
2804  */
2805 static int
2806 flow_dv_push_vlan_action_resource_register
2807                        (struct rte_eth_dev *dev,
2808                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
2809                         struct mlx5_flow *dev_flow,
2810                         struct rte_flow_error *error)
2811 {
2812         struct mlx5_priv *priv = dev->data->dev_private;
2813         struct mlx5_dev_ctx_shared *sh = priv->sh;
2814         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
2815         struct mlx5dv_dr_domain *domain;
2816         uint32_t idx = 0;
2817         int ret;
2818
2819         /* Lookup a matching resource from cache. */
2820         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2821                       sh->push_vlan_action_list, idx, cache_resource, next) {
2822                 if (resource->vlan_tag == cache_resource->vlan_tag &&
2823                     resource->ft_type == cache_resource->ft_type) {
2824                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
2825                                 "refcnt %d++",
2826                                 (void *)cache_resource,
2827                                 rte_atomic32_read(&cache_resource->refcnt));
2828                         rte_atomic32_inc(&cache_resource->refcnt);
2829                         dev_flow->handle->dvh.rix_push_vlan = idx;
2830                         dev_flow->dv.push_vlan_res = cache_resource;
2831                         return 0;
2832                 }
2833         }
2834         /* Register new push_vlan action resource. */
2835         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2836                                        &dev_flow->handle->dvh.rix_push_vlan);
2837         if (!cache_resource)
2838                 return rte_flow_error_set(error, ENOMEM,
2839                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2840                                           "cannot allocate resource memory");
2841         *cache_resource = *resource;
2842         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2843                 domain = sh->fdb_domain;
2844         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2845                 domain = sh->rx_domain;
2846         else
2847                 domain = sh->tx_domain;
2848         ret = mlx5_flow_os_create_flow_action_push_vlan
2849                                         (domain, resource->vlan_tag,
2850                                          &cache_resource->action);
2851         if (ret) {
2852                 mlx5_free(cache_resource);
2853                 return rte_flow_error_set(error, ENOMEM,
2854                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2855                                           NULL, "cannot create action");
2856         }
2857         rte_atomic32_init(&cache_resource->refcnt);
2858         rte_atomic32_inc(&cache_resource->refcnt);
2859         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2860                      &sh->push_vlan_action_list,
2861                      dev_flow->handle->dvh.rix_push_vlan,
2862                      cache_resource, next);
2863         dev_flow->dv.push_vlan_res = cache_resource;
2864         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
2865                 (void *)cache_resource,
2866                 rte_atomic32_read(&cache_resource->refcnt));
2867         return 0;
2868 }
2869 /**
2870  * Get the size of specific rte_flow_item_type
2871  *
2872  * @param[in] item_type
2873  *   Tested rte_flow_item_type.
2874  *
2875  * @return
2876  *   sizeof struct item_type, 0 if void or irrelevant.
2877  */
2878 static size_t
2879 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
2880 {
2881         size_t retval;
2882
2883         switch (item_type) {
2884         case RTE_FLOW_ITEM_TYPE_ETH:
2885                 retval = sizeof(struct rte_flow_item_eth);
2886                 break;
2887         case RTE_FLOW_ITEM_TYPE_VLAN:
2888                 retval = sizeof(struct rte_flow_item_vlan);
2889                 break;
2890         case RTE_FLOW_ITEM_TYPE_IPV4:
2891                 retval = sizeof(struct rte_flow_item_ipv4);
2892                 break;
2893         case RTE_FLOW_ITEM_TYPE_IPV6:
2894                 retval = sizeof(struct rte_flow_item_ipv6);
2895                 break;
2896         case RTE_FLOW_ITEM_TYPE_UDP:
2897                 retval = sizeof(struct rte_flow_item_udp);
2898                 break;
2899         case RTE_FLOW_ITEM_TYPE_TCP:
2900                 retval = sizeof(struct rte_flow_item_tcp);
2901                 break;
2902         case RTE_FLOW_ITEM_TYPE_VXLAN:
2903                 retval = sizeof(struct rte_flow_item_vxlan);
2904                 break;
2905         case RTE_FLOW_ITEM_TYPE_GRE:
2906                 retval = sizeof(struct rte_flow_item_gre);
2907                 break;
2908         case RTE_FLOW_ITEM_TYPE_NVGRE:
2909                 retval = sizeof(struct rte_flow_item_nvgre);
2910                 break;
2911         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2912                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
2913                 break;
2914         case RTE_FLOW_ITEM_TYPE_MPLS:
2915                 retval = sizeof(struct rte_flow_item_mpls);
2916                 break;
2917         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
2918         default:
2919                 retval = 0;
2920                 break;
2921         }
2922         return retval;
2923 }
2924
2925 #define MLX5_ENCAP_IPV4_VERSION         0x40
2926 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
2927 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
2928 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
2929 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
2930 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
2931 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
2932
2933 /**
2934  * Convert the encap action data from list of rte_flow_item to raw buffer
2935  *
2936  * @param[in] items
2937  *   Pointer to rte_flow_item objects list.
2938  * @param[out] buf
2939  *   Pointer to the output buffer.
2940  * @param[out] size
2941  *   Pointer to the output buffer size.
2942  * @param[out] error
2943  *   Pointer to the error structure.
2944  *
2945  * @return
2946  *   0 on success, a negative errno value otherwise and rte_errno is set.
2947  */
2948 static int
2949 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2950                            size_t *size, struct rte_flow_error *error)
2951 {
2952         struct rte_ether_hdr *eth = NULL;
2953         struct rte_vlan_hdr *vlan = NULL;
2954         struct rte_ipv4_hdr *ipv4 = NULL;
2955         struct rte_ipv6_hdr *ipv6 = NULL;
2956         struct rte_udp_hdr *udp = NULL;
2957         struct rte_vxlan_hdr *vxlan = NULL;
2958         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
2959         struct rte_gre_hdr *gre = NULL;
2960         size_t len;
2961         size_t temp_size = 0;
2962
2963         if (!items)
2964                 return rte_flow_error_set(error, EINVAL,
2965                                           RTE_FLOW_ERROR_TYPE_ACTION,
2966                                           NULL, "invalid empty data");
2967         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2968                 len = flow_dv_get_item_len(items->type);
2969                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
2970                         return rte_flow_error_set(error, EINVAL,
2971                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2972                                                   (void *)items->type,
2973                                                   "items total size is too big"
2974                                                   " for encap action");
2975                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
2976                 switch (items->type) {
2977                 case RTE_FLOW_ITEM_TYPE_ETH:
2978                         eth = (struct rte_ether_hdr *)&buf[temp_size];
2979                         break;
2980                 case RTE_FLOW_ITEM_TYPE_VLAN:
2981                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
2982                         if (!eth)
2983                                 return rte_flow_error_set(error, EINVAL,
2984                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2985                                                 (void *)items->type,
2986                                                 "eth header not found");
2987                         if (!eth->ether_type)
2988                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2989                         break;
2990                 case RTE_FLOW_ITEM_TYPE_IPV4:
2991                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
2992                         if (!vlan && !eth)
2993                                 return rte_flow_error_set(error, EINVAL,
2994                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2995                                                 (void *)items->type,
2996                                                 "neither eth nor vlan"
2997                                                 " header found");
2998                         if (vlan && !vlan->eth_proto)
2999                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3000                         else if (eth && !eth->ether_type)
3001                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3002                         if (!ipv4->version_ihl)
3003                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3004                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3005                         if (!ipv4->time_to_live)
3006                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3007                         break;
3008                 case RTE_FLOW_ITEM_TYPE_IPV6:
3009                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3010                         if (!vlan && !eth)
3011                                 return rte_flow_error_set(error, EINVAL,
3012                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3013                                                 (void *)items->type,
3014                                                 "neither eth nor vlan"
3015                                                 " header found");
3016                         if (vlan && !vlan->eth_proto)
3017                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3018                         else if (eth && !eth->ether_type)
3019                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3020                         if (!ipv6->vtc_flow)
3021                                 ipv6->vtc_flow =
3022                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3023                         if (!ipv6->hop_limits)
3024                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3025                         break;
3026                 case RTE_FLOW_ITEM_TYPE_UDP:
3027                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3028                         if (!ipv4 && !ipv6)
3029                                 return rte_flow_error_set(error, EINVAL,
3030                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3031                                                 (void *)items->type,
3032                                                 "ip header not found");
3033                         if (ipv4 && !ipv4->next_proto_id)
3034                                 ipv4->next_proto_id = IPPROTO_UDP;
3035                         else if (ipv6 && !ipv6->proto)
3036                                 ipv6->proto = IPPROTO_UDP;
3037                         break;
3038                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3039                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3040                         if (!udp)
3041                                 return rte_flow_error_set(error, EINVAL,
3042                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3043                                                 (void *)items->type,
3044                                                 "udp header not found");
3045                         if (!udp->dst_port)
3046                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3047                         if (!vxlan->vx_flags)
3048                                 vxlan->vx_flags =
3049                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3050                         break;
3051                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3052                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3053                         if (!udp)
3054                                 return rte_flow_error_set(error, EINVAL,
3055                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3056                                                 (void *)items->type,
3057                                                 "udp header not found");
3058                         if (!vxlan_gpe->proto)
3059                                 return rte_flow_error_set(error, EINVAL,
3060                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3061                                                 (void *)items->type,
3062                                                 "next protocol not found");
3063                         if (!udp->dst_port)
3064                                 udp->dst_port =
3065                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3066                         if (!vxlan_gpe->vx_flags)
3067                                 vxlan_gpe->vx_flags =
3068                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3069                         break;
3070                 case RTE_FLOW_ITEM_TYPE_GRE:
3071                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3072                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3073                         if (!gre->proto)
3074                                 return rte_flow_error_set(error, EINVAL,
3075                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3076                                                 (void *)items->type,
3077                                                 "next protocol not found");
3078                         if (!ipv4 && !ipv6)
3079                                 return rte_flow_error_set(error, EINVAL,
3080                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3081                                                 (void *)items->type,
3082                                                 "ip header not found");
3083                         if (ipv4 && !ipv4->next_proto_id)
3084                                 ipv4->next_proto_id = IPPROTO_GRE;
3085                         else if (ipv6 && !ipv6->proto)
3086                                 ipv6->proto = IPPROTO_GRE;
3087                         break;
3088                 case RTE_FLOW_ITEM_TYPE_VOID:
3089                         break;
3090                 default:
3091                         return rte_flow_error_set(error, EINVAL,
3092                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3093                                                   (void *)items->type,
3094                                                   "unsupported item type");
3095                         break;
3096                 }
3097                 temp_size += len;
3098         }
3099         *size = temp_size;
3100         return 0;
3101 }
3102
3103 static int
3104 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3105 {
3106         struct rte_ether_hdr *eth = NULL;
3107         struct rte_vlan_hdr *vlan = NULL;
3108         struct rte_ipv6_hdr *ipv6 = NULL;
3109         struct rte_udp_hdr *udp = NULL;
3110         char *next_hdr;
3111         uint16_t proto;
3112
3113         eth = (struct rte_ether_hdr *)data;
3114         next_hdr = (char *)(eth + 1);
3115         proto = RTE_BE16(eth->ether_type);
3116
3117         /* VLAN skipping */
3118         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3119                 vlan = (struct rte_vlan_hdr *)next_hdr;
3120                 proto = RTE_BE16(vlan->eth_proto);
3121                 next_hdr += sizeof(struct rte_vlan_hdr);
3122         }
3123
3124         /* HW calculates IPv4 csum. no need to proceed */
3125         if (proto == RTE_ETHER_TYPE_IPV4)
3126                 return 0;
3127
3128         /* non IPv4/IPv6 header. not supported */
3129         if (proto != RTE_ETHER_TYPE_IPV6) {
3130                 return rte_flow_error_set(error, ENOTSUP,
3131                                           RTE_FLOW_ERROR_TYPE_ACTION,
3132                                           NULL, "Cannot offload non IPv4/IPv6");
3133         }
3134
3135         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3136
3137         /* ignore non UDP */
3138         if (ipv6->proto != IPPROTO_UDP)
3139                 return 0;
3140
3141         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3142         udp->dgram_cksum = 0;
3143
3144         return 0;
3145 }
3146
3147 /**
3148  * Convert L2 encap action to DV specification.
3149  *
3150  * @param[in] dev
3151  *   Pointer to rte_eth_dev structure.
3152  * @param[in] action
3153  *   Pointer to action structure.
3154  * @param[in, out] dev_flow
3155  *   Pointer to the mlx5_flow.
3156  * @param[in] transfer
3157  *   Mark if the flow is E-Switch flow.
3158  * @param[out] error
3159  *   Pointer to the error structure.
3160  *
3161  * @return
3162  *   0 on success, a negative errno value otherwise and rte_errno is set.
3163  */
3164 static int
3165 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3166                                const struct rte_flow_action *action,
3167                                struct mlx5_flow *dev_flow,
3168                                uint8_t transfer,
3169                                struct rte_flow_error *error)
3170 {
3171         const struct rte_flow_item *encap_data;
3172         const struct rte_flow_action_raw_encap *raw_encap_data;
3173         struct mlx5_flow_dv_encap_decap_resource res = {
3174                 .reformat_type =
3175                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3176                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3177                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3178         };
3179
3180         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3181                 raw_encap_data =
3182                         (const struct rte_flow_action_raw_encap *)action->conf;
3183                 res.size = raw_encap_data->size;
3184                 memcpy(res.buf, raw_encap_data->data, res.size);
3185         } else {
3186                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3187                         encap_data =
3188                                 ((const struct rte_flow_action_vxlan_encap *)
3189                                                 action->conf)->definition;
3190                 else
3191                         encap_data =
3192                                 ((const struct rte_flow_action_nvgre_encap *)
3193                                                 action->conf)->definition;
3194                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3195                                                &res.size, error))
3196                         return -rte_errno;
3197         }
3198         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3199                 return -rte_errno;
3200         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3201                 return rte_flow_error_set(error, EINVAL,
3202                                           RTE_FLOW_ERROR_TYPE_ACTION,
3203                                           NULL, "can't create L2 encap action");
3204         return 0;
3205 }
3206
3207 /**
3208  * Convert L2 decap action to DV specification.
3209  *
3210  * @param[in] dev
3211  *   Pointer to rte_eth_dev structure.
3212  * @param[in, out] dev_flow
3213  *   Pointer to the mlx5_flow.
3214  * @param[in] transfer
3215  *   Mark if the flow is E-Switch flow.
3216  * @param[out] error
3217  *   Pointer to the error structure.
3218  *
3219  * @return
3220  *   0 on success, a negative errno value otherwise and rte_errno is set.
3221  */
3222 static int
3223 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3224                                struct mlx5_flow *dev_flow,
3225                                uint8_t transfer,
3226                                struct rte_flow_error *error)
3227 {
3228         struct mlx5_flow_dv_encap_decap_resource res = {
3229                 .size = 0,
3230                 .reformat_type =
3231                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3232                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3233                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3234         };
3235
3236         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3237                 return rte_flow_error_set(error, EINVAL,
3238                                           RTE_FLOW_ERROR_TYPE_ACTION,
3239                                           NULL, "can't create L2 decap action");
3240         return 0;
3241 }
3242
3243 /**
3244  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3245  *
3246  * @param[in] dev
3247  *   Pointer to rte_eth_dev structure.
3248  * @param[in] action
3249  *   Pointer to action structure.
3250  * @param[in, out] dev_flow
3251  *   Pointer to the mlx5_flow.
3252  * @param[in] attr
3253  *   Pointer to the flow attributes.
3254  * @param[out] error
3255  *   Pointer to the error structure.
3256  *
3257  * @return
3258  *   0 on success, a negative errno value otherwise and rte_errno is set.
3259  */
3260 static int
3261 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3262                                 const struct rte_flow_action *action,
3263                                 struct mlx5_flow *dev_flow,
3264                                 const struct rte_flow_attr *attr,
3265                                 struct rte_flow_error *error)
3266 {
3267         const struct rte_flow_action_raw_encap *encap_data;
3268         struct mlx5_flow_dv_encap_decap_resource res;
3269
3270         memset(&res, 0, sizeof(res));
3271         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3272         res.size = encap_data->size;
3273         memcpy(res.buf, encap_data->data, res.size);
3274         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3275                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3276                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3277         if (attr->transfer)
3278                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3279         else
3280                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3281                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3282         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3283                 return rte_flow_error_set(error, EINVAL,
3284                                           RTE_FLOW_ERROR_TYPE_ACTION,
3285                                           NULL, "can't create encap action");
3286         return 0;
3287 }
3288
3289 /**
3290  * Create action push VLAN.
3291  *
3292  * @param[in] dev
3293  *   Pointer to rte_eth_dev structure.
3294  * @param[in] attr
3295  *   Pointer to the flow attributes.
3296  * @param[in] vlan
3297  *   Pointer to the vlan to push to the Ethernet header.
3298  * @param[in, out] dev_flow
3299  *   Pointer to the mlx5_flow.
3300  * @param[out] error
3301  *   Pointer to the error structure.
3302  *
3303  * @return
3304  *   0 on success, a negative errno value otherwise and rte_errno is set.
3305  */
3306 static int
3307 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3308                                 const struct rte_flow_attr *attr,
3309                                 const struct rte_vlan_hdr *vlan,
3310                                 struct mlx5_flow *dev_flow,
3311                                 struct rte_flow_error *error)
3312 {
3313         struct mlx5_flow_dv_push_vlan_action_resource res;
3314
3315         memset(&res, 0, sizeof(res));
3316         res.vlan_tag =
3317                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3318                                  vlan->vlan_tci);
3319         if (attr->transfer)
3320                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3321         else
3322                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3323                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3324         return flow_dv_push_vlan_action_resource_register
3325                                             (dev, &res, dev_flow, error);
3326 }
3327
3328 /**
3329  * Validate the modify-header actions.
3330  *
3331  * @param[in] action_flags
3332  *   Holds the actions detected until now.
3333  * @param[in] action
3334  *   Pointer to the modify action.
3335  * @param[out] error
3336  *   Pointer to error structure.
3337  *
3338  * @return
3339  *   0 on success, a negative errno value otherwise and rte_errno is set.
3340  */
3341 static int
3342 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3343                                    const struct rte_flow_action *action,
3344                                    struct rte_flow_error *error)
3345 {
3346         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3347                 return rte_flow_error_set(error, EINVAL,
3348                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3349                                           NULL, "action configuration not set");
3350         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3351                 return rte_flow_error_set(error, EINVAL,
3352                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3353                                           "can't have encap action before"
3354                                           " modify action");
3355         return 0;
3356 }
3357
3358 /**
3359  * Validate the modify-header MAC address actions.
3360  *
3361  * @param[in] action_flags
3362  *   Holds the actions detected until now.
3363  * @param[in] action
3364  *   Pointer to the modify action.
3365  * @param[in] item_flags
3366  *   Holds the items detected.
3367  * @param[out] error
3368  *   Pointer to error structure.
3369  *
3370  * @return
3371  *   0 on success, a negative errno value otherwise and rte_errno is set.
3372  */
3373 static int
3374 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3375                                    const struct rte_flow_action *action,
3376                                    const uint64_t item_flags,
3377                                    struct rte_flow_error *error)
3378 {
3379         int ret = 0;
3380
3381         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3382         if (!ret) {
3383                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3384                         return rte_flow_error_set(error, EINVAL,
3385                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3386                                                   NULL,
3387                                                   "no L2 item in pattern");
3388         }
3389         return ret;
3390 }
3391
3392 /**
3393  * Validate the modify-header IPv4 address actions.
3394  *
3395  * @param[in] action_flags
3396  *   Holds the actions detected until now.
3397  * @param[in] action
3398  *   Pointer to the modify action.
3399  * @param[in] item_flags
3400  *   Holds the items detected.
3401  * @param[out] error
3402  *   Pointer to error structure.
3403  *
3404  * @return
3405  *   0 on success, a negative errno value otherwise and rte_errno is set.
3406  */
3407 static int
3408 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3409                                     const struct rte_flow_action *action,
3410                                     const uint64_t item_flags,
3411                                     struct rte_flow_error *error)
3412 {
3413         int ret = 0;
3414         uint64_t layer;
3415
3416         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3417         if (!ret) {
3418                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3419                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3420                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3421                 if (!(item_flags & layer))
3422                         return rte_flow_error_set(error, EINVAL,
3423                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3424                                                   NULL,
3425                                                   "no ipv4 item in pattern");
3426         }
3427         return ret;
3428 }
3429
3430 /**
3431  * Validate the modify-header IPv6 address actions.
3432  *
3433  * @param[in] action_flags
3434  *   Holds the actions detected until now.
3435  * @param[in] action
3436  *   Pointer to the modify action.
3437  * @param[in] item_flags
3438  *   Holds the items detected.
3439  * @param[out] error
3440  *   Pointer to error structure.
3441  *
3442  * @return
3443  *   0 on success, a negative errno value otherwise and rte_errno is set.
3444  */
3445 static int
3446 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3447                                     const struct rte_flow_action *action,
3448                                     const uint64_t item_flags,
3449                                     struct rte_flow_error *error)
3450 {
3451         int ret = 0;
3452         uint64_t layer;
3453
3454         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3455         if (!ret) {
3456                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3457                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3458                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3459                 if (!(item_flags & layer))
3460                         return rte_flow_error_set(error, EINVAL,
3461                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3462                                                   NULL,
3463                                                   "no ipv6 item in pattern");
3464         }
3465         return ret;
3466 }
3467
3468 /**
3469  * Validate the modify-header TP actions.
3470  *
3471  * @param[in] action_flags
3472  *   Holds the actions detected until now.
3473  * @param[in] action
3474  *   Pointer to the modify action.
3475  * @param[in] item_flags
3476  *   Holds the items detected.
3477  * @param[out] error
3478  *   Pointer to error structure.
3479  *
3480  * @return
3481  *   0 on success, a negative errno value otherwise and rte_errno is set.
3482  */
3483 static int
3484 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3485                                   const struct rte_flow_action *action,
3486                                   const uint64_t item_flags,
3487                                   struct rte_flow_error *error)
3488 {
3489         int ret = 0;
3490         uint64_t layer;
3491
3492         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3493         if (!ret) {
3494                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3495                                  MLX5_FLOW_LAYER_INNER_L4 :
3496                                  MLX5_FLOW_LAYER_OUTER_L4;
3497                 if (!(item_flags & layer))
3498                         return rte_flow_error_set(error, EINVAL,
3499                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3500                                                   NULL, "no transport layer "
3501                                                   "in pattern");
3502         }
3503         return ret;
3504 }
3505
3506 /**
3507  * Validate the modify-header actions of increment/decrement
3508  * TCP Sequence-number.
3509  *
3510  * @param[in] action_flags
3511  *   Holds the actions detected until now.
3512  * @param[in] action
3513  *   Pointer to the modify action.
3514  * @param[in] item_flags
3515  *   Holds the items detected.
3516  * @param[out] error
3517  *   Pointer to error structure.
3518  *
3519  * @return
3520  *   0 on success, a negative errno value otherwise and rte_errno is set.
3521  */
3522 static int
3523 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3524                                        const struct rte_flow_action *action,
3525                                        const uint64_t item_flags,
3526                                        struct rte_flow_error *error)
3527 {
3528         int ret = 0;
3529         uint64_t layer;
3530
3531         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3532         if (!ret) {
3533                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3534                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3535                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3536                 if (!(item_flags & layer))
3537                         return rte_flow_error_set(error, EINVAL,
3538                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3539                                                   NULL, "no TCP item in"
3540                                                   " pattern");
3541                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3542                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3543                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3544                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3545                         return rte_flow_error_set(error, EINVAL,
3546                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3547                                                   NULL,
3548                                                   "cannot decrease and increase"
3549                                                   " TCP sequence number"
3550                                                   " at the same time");
3551         }
3552         return ret;
3553 }
3554
3555 /**
3556  * Validate the modify-header actions of increment/decrement
3557  * TCP Acknowledgment number.
3558  *
3559  * @param[in] action_flags
3560  *   Holds the actions detected until now.
3561  * @param[in] action
3562  *   Pointer to the modify action.
3563  * @param[in] item_flags
3564  *   Holds the items detected.
3565  * @param[out] error
3566  *   Pointer to error structure.
3567  *
3568  * @return
3569  *   0 on success, a negative errno value otherwise and rte_errno is set.
3570  */
3571 static int
3572 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3573                                        const struct rte_flow_action *action,
3574                                        const uint64_t item_flags,
3575                                        struct rte_flow_error *error)
3576 {
3577         int ret = 0;
3578         uint64_t layer;
3579
3580         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3581         if (!ret) {
3582                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3583                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3584                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3585                 if (!(item_flags & layer))
3586                         return rte_flow_error_set(error, EINVAL,
3587                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3588                                                   NULL, "no TCP item in"
3589                                                   " pattern");
3590                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3591                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3592                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3593                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3594                         return rte_flow_error_set(error, EINVAL,
3595                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3596                                                   NULL,
3597                                                   "cannot decrease and increase"
3598                                                   " TCP acknowledgment number"
3599                                                   " at the same time");
3600         }
3601         return ret;
3602 }
3603
3604 /**
3605  * Validate the modify-header TTL actions.
3606  *
3607  * @param[in] action_flags
3608  *   Holds the actions detected until now.
3609  * @param[in] action
3610  *   Pointer to the modify action.
3611  * @param[in] item_flags
3612  *   Holds the items detected.
3613  * @param[out] error
3614  *   Pointer to error structure.
3615  *
3616  * @return
3617  *   0 on success, a negative errno value otherwise and rte_errno is set.
3618  */
3619 static int
3620 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3621                                    const struct rte_flow_action *action,
3622                                    const uint64_t item_flags,
3623                                    struct rte_flow_error *error)
3624 {
3625         int ret = 0;
3626         uint64_t layer;
3627
3628         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3629         if (!ret) {
3630                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3631                                  MLX5_FLOW_LAYER_INNER_L3 :
3632                                  MLX5_FLOW_LAYER_OUTER_L3;
3633                 if (!(item_flags & layer))
3634                         return rte_flow_error_set(error, EINVAL,
3635                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3636                                                   NULL,
3637                                                   "no IP protocol in pattern");
3638         }
3639         return ret;
3640 }
3641
3642 /**
3643  * Validate jump action.
3644  *
3645  * @param[in] action
3646  *   Pointer to the jump action.
3647  * @param[in] action_flags
3648  *   Holds the actions detected until now.
3649  * @param[in] attributes
3650  *   Pointer to flow attributes
3651  * @param[in] external
3652  *   Action belongs to flow rule created by request external to PMD.
3653  * @param[out] error
3654  *   Pointer to error structure.
3655  *
3656  * @return
3657  *   0 on success, a negative errno value otherwise and rte_errno is set.
3658  */
3659 static int
3660 flow_dv_validate_action_jump(const struct rte_flow_action *action,
3661                              uint64_t action_flags,
3662                              const struct rte_flow_attr *attributes,
3663                              bool external, struct rte_flow_error *error)
3664 {
3665         uint32_t target_group, table;
3666         int ret = 0;
3667
3668         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3669                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3670                 return rte_flow_error_set(error, EINVAL,
3671                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3672                                           "can't have 2 fate actions in"
3673                                           " same flow");
3674         if (action_flags & MLX5_FLOW_ACTION_METER)
3675                 return rte_flow_error_set(error, ENOTSUP,
3676                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3677                                           "jump with meter not support");
3678         if (!action->conf)
3679                 return rte_flow_error_set(error, EINVAL,
3680                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3681                                           NULL, "action configuration not set");
3682         target_group =
3683                 ((const struct rte_flow_action_jump *)action->conf)->group;
3684         ret = mlx5_flow_group_to_table(attributes, external, target_group,
3685                                        true, &table, error);
3686         if (ret)
3687                 return ret;
3688         if (attributes->group == target_group)
3689                 return rte_flow_error_set(error, EINVAL,
3690                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3691                                           "target group must be other than"
3692                                           " the current flow group");
3693         return 0;
3694 }
3695
3696 /*
3697  * Validate the port_id action.
3698  *
3699  * @param[in] dev
3700  *   Pointer to rte_eth_dev structure.
3701  * @param[in] action_flags
3702  *   Bit-fields that holds the actions detected until now.
3703  * @param[in] action
3704  *   Port_id RTE action structure.
3705  * @param[in] attr
3706  *   Attributes of flow that includes this action.
3707  * @param[out] error
3708  *   Pointer to error structure.
3709  *
3710  * @return
3711  *   0 on success, a negative errno value otherwise and rte_errno is set.
3712  */
3713 static int
3714 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3715                                 uint64_t action_flags,
3716                                 const struct rte_flow_action *action,
3717                                 const struct rte_flow_attr *attr,
3718                                 struct rte_flow_error *error)
3719 {
3720         const struct rte_flow_action_port_id *port_id;
3721         struct mlx5_priv *act_priv;
3722         struct mlx5_priv *dev_priv;
3723         uint16_t port;
3724
3725         if (!attr->transfer)
3726                 return rte_flow_error_set(error, ENOTSUP,
3727                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3728                                           NULL,
3729                                           "port id action is valid in transfer"
3730                                           " mode only");
3731         if (!action || !action->conf)
3732                 return rte_flow_error_set(error, ENOTSUP,
3733                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3734                                           NULL,
3735                                           "port id action parameters must be"
3736                                           " specified");
3737         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3738                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3739                 return rte_flow_error_set(error, EINVAL,
3740                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3741                                           "can have only one fate actions in"
3742                                           " a flow");
3743         dev_priv = mlx5_dev_to_eswitch_info(dev);
3744         if (!dev_priv)
3745                 return rte_flow_error_set(error, rte_errno,
3746                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3747                                           NULL,
3748                                           "failed to obtain E-Switch info");
3749         port_id = action->conf;
3750         port = port_id->original ? dev->data->port_id : port_id->id;
3751         act_priv = mlx5_port_to_eswitch_info(port, false);
3752         if (!act_priv)
3753                 return rte_flow_error_set
3754                                 (error, rte_errno,
3755                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
3756                                  "failed to obtain E-Switch port id for port");
3757         if (act_priv->domain_id != dev_priv->domain_id)
3758                 return rte_flow_error_set
3759                                 (error, EINVAL,
3760                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3761                                  "port does not belong to"
3762                                  " E-Switch being configured");
3763         return 0;
3764 }
3765
3766 /**
3767  * Get the maximum number of modify header actions.
3768  *
3769  * @param dev
3770  *   Pointer to rte_eth_dev structure.
3771  * @param flags
3772  *   Flags bits to check if root level.
3773  *
3774  * @return
3775  *   Max number of modify header actions device can support.
3776  */
3777 static inline unsigned int
3778 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
3779                               uint64_t flags)
3780 {
3781         /*
3782          * There's no way to directly query the max capacity from FW.
3783          * The maximal value on root table should be assumed to be supported.
3784          */
3785         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
3786                 return MLX5_MAX_MODIFY_NUM;
3787         else
3788                 return MLX5_ROOT_TBL_MODIFY_NUM;
3789 }
3790
3791 /**
3792  * Validate the meter action.
3793  *
3794  * @param[in] dev
3795  *   Pointer to rte_eth_dev structure.
3796  * @param[in] action_flags
3797  *   Bit-fields that holds the actions detected until now.
3798  * @param[in] action
3799  *   Pointer to the meter action.
3800  * @param[in] attr
3801  *   Attributes of flow that includes this action.
3802  * @param[out] error
3803  *   Pointer to error structure.
3804  *
3805  * @return
3806  *   0 on success, a negative errno value otherwise and rte_ernno is set.
3807  */
3808 static int
3809 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
3810                                 uint64_t action_flags,
3811                                 const struct rte_flow_action *action,
3812                                 const struct rte_flow_attr *attr,
3813                                 struct rte_flow_error *error)
3814 {
3815         struct mlx5_priv *priv = dev->data->dev_private;
3816         const struct rte_flow_action_meter *am = action->conf;
3817         struct mlx5_flow_meter *fm;
3818
3819         if (!am)
3820                 return rte_flow_error_set(error, EINVAL,
3821                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3822                                           "meter action conf is NULL");
3823
3824         if (action_flags & MLX5_FLOW_ACTION_METER)
3825                 return rte_flow_error_set(error, ENOTSUP,
3826                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3827                                           "meter chaining not support");
3828         if (action_flags & MLX5_FLOW_ACTION_JUMP)
3829                 return rte_flow_error_set(error, ENOTSUP,
3830                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3831                                           "meter with jump not support");
3832         if (!priv->mtr_en)
3833                 return rte_flow_error_set(error, ENOTSUP,
3834                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3835                                           NULL,
3836                                           "meter action not supported");
3837         fm = mlx5_flow_meter_find(priv, am->mtr_id);
3838         if (!fm)
3839                 return rte_flow_error_set(error, EINVAL,
3840                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3841                                           "Meter not found");
3842         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
3843               (!fm->ingress && !attr->ingress && attr->egress) ||
3844               (!fm->egress && !attr->egress && attr->ingress))))
3845                 return rte_flow_error_set(error, EINVAL,
3846                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3847                                           "Flow attributes are either invalid "
3848                                           "or have a conflict with current "
3849                                           "meter attributes");
3850         return 0;
3851 }
3852
3853 /**
3854  * Validate the age action.
3855  *
3856  * @param[in] action_flags
3857  *   Holds the actions detected until now.
3858  * @param[in] action
3859  *   Pointer to the age action.
3860  * @param[in] dev
3861  *   Pointer to the Ethernet device structure.
3862  * @param[out] error
3863  *   Pointer to error structure.
3864  *
3865  * @return
3866  *   0 on success, a negative errno value otherwise and rte_errno is set.
3867  */
3868 static int
3869 flow_dv_validate_action_age(uint64_t action_flags,
3870                             const struct rte_flow_action *action,
3871                             struct rte_eth_dev *dev,
3872                             struct rte_flow_error *error)
3873 {
3874         struct mlx5_priv *priv = dev->data->dev_private;
3875         const struct rte_flow_action_age *age = action->conf;
3876
3877         if (!priv->config.devx || priv->counter_fallback)
3878                 return rte_flow_error_set(error, ENOTSUP,
3879                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3880                                           NULL,
3881                                           "age action not supported");
3882         if (!(action->conf))
3883                 return rte_flow_error_set(error, EINVAL,
3884                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3885                                           "configuration cannot be null");
3886         if (age->timeout >= UINT16_MAX / 2 / 10)
3887                 return rte_flow_error_set(error, ENOTSUP,
3888                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3889                                           "Max age time: 3275 seconds");
3890         if (action_flags & MLX5_FLOW_ACTION_AGE)
3891                 return rte_flow_error_set(error, EINVAL,
3892                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3893                                           "Duplicate age ctions set");
3894         return 0;
3895 }
3896
3897 /**
3898  * Validate the modify-header IPv4 DSCP actions.
3899  *
3900  * @param[in] action_flags
3901  *   Holds the actions detected until now.
3902  * @param[in] action
3903  *   Pointer to the modify action.
3904  * @param[in] item_flags
3905  *   Holds the items detected.
3906  * @param[out] error
3907  *   Pointer to error structure.
3908  *
3909  * @return
3910  *   0 on success, a negative errno value otherwise and rte_errno is set.
3911  */
3912 static int
3913 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
3914                                          const struct rte_flow_action *action,
3915                                          const uint64_t item_flags,
3916                                          struct rte_flow_error *error)
3917 {
3918         int ret = 0;
3919
3920         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3921         if (!ret) {
3922                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3923                         return rte_flow_error_set(error, EINVAL,
3924                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3925                                                   NULL,
3926                                                   "no ipv4 item in pattern");
3927         }
3928         return ret;
3929 }
3930
3931 /**
3932  * Validate the modify-header IPv6 DSCP actions.
3933  *
3934  * @param[in] action_flags
3935  *   Holds the actions detected until now.
3936  * @param[in] action
3937  *   Pointer to the modify action.
3938  * @param[in] item_flags
3939  *   Holds the items detected.
3940  * @param[out] error
3941  *   Pointer to error structure.
3942  *
3943  * @return
3944  *   0 on success, a negative errno value otherwise and rte_errno is set.
3945  */
3946 static int
3947 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
3948                                          const struct rte_flow_action *action,
3949                                          const uint64_t item_flags,
3950                                          struct rte_flow_error *error)
3951 {
3952         int ret = 0;
3953
3954         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3955         if (!ret) {
3956                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3957                         return rte_flow_error_set(error, EINVAL,
3958                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3959                                                   NULL,
3960                                                   "no ipv6 item in pattern");
3961         }
3962         return ret;
3963 }
3964
3965 /**
3966  * Find existing modify-header resource or create and register a new one.
3967  *
3968  * @param dev[in, out]
3969  *   Pointer to rte_eth_dev structure.
3970  * @param[in, out] resource
3971  *   Pointer to modify-header resource.
3972  * @parm[in, out] dev_flow
3973  *   Pointer to the dev_flow.
3974  * @param[out] error
3975  *   pointer to error structure.
3976  *
3977  * @return
3978  *   0 on success otherwise -errno and errno is set.
3979  */
3980 static int
3981 flow_dv_modify_hdr_resource_register
3982                         (struct rte_eth_dev *dev,
3983                          struct mlx5_flow_dv_modify_hdr_resource *resource,
3984                          struct mlx5_flow *dev_flow,
3985                          struct rte_flow_error *error)
3986 {
3987         struct mlx5_priv *priv = dev->data->dev_private;
3988         struct mlx5_dev_ctx_shared *sh = priv->sh;
3989         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
3990         struct mlx5dv_dr_domain *ns;
3991         uint32_t actions_len;
3992         int ret;
3993
3994         resource->flags = dev_flow->dv.group ? 0 :
3995                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
3996         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
3997                                     resource->flags))
3998                 return rte_flow_error_set(error, EOVERFLOW,
3999                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4000                                           "too many modify header items");
4001         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4002                 ns = sh->fdb_domain;
4003         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4004                 ns = sh->tx_domain;
4005         else
4006                 ns = sh->rx_domain;
4007         /* Lookup a matching resource from cache. */
4008         actions_len = resource->actions_num * sizeof(resource->actions[0]);
4009         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
4010                 if (resource->ft_type == cache_resource->ft_type &&
4011                     resource->actions_num == cache_resource->actions_num &&
4012                     resource->flags == cache_resource->flags &&
4013                     !memcmp((const void *)resource->actions,
4014                             (const void *)cache_resource->actions,
4015                             actions_len)) {
4016                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
4017                                 (void *)cache_resource,
4018                                 rte_atomic32_read(&cache_resource->refcnt));
4019                         rte_atomic32_inc(&cache_resource->refcnt);
4020                         dev_flow->handle->dvh.modify_hdr = cache_resource;
4021                         return 0;
4022                 }
4023         }
4024         /* Register new modify-header resource. */
4025         cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
4026                                     sizeof(*cache_resource) + actions_len, 0,
4027                                     SOCKET_ID_ANY);
4028         if (!cache_resource)
4029                 return rte_flow_error_set(error, ENOMEM,
4030                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4031                                           "cannot allocate resource memory");
4032         *cache_resource = *resource;
4033         rte_memcpy(cache_resource->actions, resource->actions, actions_len);
4034         ret = mlx5_flow_os_create_flow_action_modify_header
4035                                         (sh->ctx, ns, cache_resource,
4036                                          actions_len, &cache_resource->action);
4037         if (ret) {
4038                 mlx5_free(cache_resource);
4039                 return rte_flow_error_set(error, ENOMEM,
4040                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4041                                           NULL, "cannot create action");
4042         }
4043         rte_atomic32_init(&cache_resource->refcnt);
4044         rte_atomic32_inc(&cache_resource->refcnt);
4045         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
4046         dev_flow->handle->dvh.modify_hdr = cache_resource;
4047         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
4048                 (void *)cache_resource,
4049                 rte_atomic32_read(&cache_resource->refcnt));
4050         return 0;
4051 }
4052
4053 /**
4054  * Get DV flow counter by index.
4055  *
4056  * @param[in] dev
4057  *   Pointer to the Ethernet device structure.
4058  * @param[in] idx
4059  *   mlx5 flow counter index in the container.
4060  * @param[out] ppool
4061  *   mlx5 flow counter pool in the container,
4062  *
4063  * @return
4064  *   Pointer to the counter, NULL otherwise.
4065  */
4066 static struct mlx5_flow_counter *
4067 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4068                            uint32_t idx,
4069                            struct mlx5_flow_counter_pool **ppool)
4070 {
4071         struct mlx5_priv *priv = dev->data->dev_private;
4072         struct mlx5_pools_container *cont;
4073         struct mlx5_flow_counter_pool *pool;
4074         uint32_t batch = 0, age = 0;
4075
4076         idx--;
4077         age = MLX_CNT_IS_AGE(idx);
4078         idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx;
4079         if (idx >= MLX5_CNT_BATCH_OFFSET) {
4080                 idx -= MLX5_CNT_BATCH_OFFSET;
4081                 batch = 1;
4082         }
4083         cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
4084         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
4085         pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
4086         MLX5_ASSERT(pool);
4087         if (ppool)
4088                 *ppool = pool;
4089         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4090 }
4091
4092 /**
4093  * Check the devx counter belongs to the pool.
4094  *
4095  * @param[in] pool
4096  *   Pointer to the counter pool.
4097  * @param[in] id
4098  *   The counter devx ID.
4099  *
4100  * @return
4101  *   True if counter belongs to the pool, false otherwise.
4102  */
4103 static bool
4104 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4105 {
4106         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4107                    MLX5_COUNTERS_PER_POOL;
4108
4109         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4110                 return true;
4111         return false;
4112 }
4113
4114 /**
4115  * Get a pool by devx counter ID.
4116  *
4117  * @param[in] cont
4118  *   Pointer to the counter container.
4119  * @param[in] id
4120  *   The counter devx ID.
4121  *
4122  * @return
4123  *   The counter pool pointer if exists, NULL otherwise,
4124  */
4125 static struct mlx5_flow_counter_pool *
4126 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
4127 {
4128         uint32_t i;
4129
4130         /* Check last used pool. */
4131         if (cont->last_pool_idx != POOL_IDX_INVALID &&
4132             flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
4133                 return cont->pools[cont->last_pool_idx];
4134         /* ID out of range means no suitable pool in the container. */
4135         if (id > cont->max_id || id < cont->min_id)
4136                 return NULL;
4137         /*
4138          * Find the pool from the end of the container, since mostly counter
4139          * ID is sequence increasing, and the last pool should be the needed
4140          * one.
4141          */
4142         i = rte_atomic16_read(&cont->n_valid);
4143         while (i--) {
4144                 struct mlx5_flow_counter_pool *pool = cont->pools[i];
4145
4146                 if (flow_dv_is_counter_in_pool(pool, id))
4147                         return pool;
4148         }
4149         return NULL;
4150 }
4151
4152 /**
4153  * Allocate a new memory for the counter values wrapped by all the needed
4154  * management.
4155  *
4156  * @param[in] dev
4157  *   Pointer to the Ethernet device structure.
4158  * @param[in] raws_n
4159  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
4160  *
4161  * @return
4162  *   The new memory management pointer on success, otherwise NULL and rte_errno
4163  *   is set.
4164  */
4165 static struct mlx5_counter_stats_mem_mng *
4166 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
4167 {
4168         struct mlx5_priv *priv = dev->data->dev_private;
4169         struct mlx5_dev_ctx_shared *sh = priv->sh;
4170         struct mlx5_devx_mkey_attr mkey_attr;
4171         struct mlx5_counter_stats_mem_mng *mem_mng;
4172         volatile struct flow_counter_stats *raw_data;
4173         int size = (sizeof(struct flow_counter_stats) *
4174                         MLX5_COUNTERS_PER_POOL +
4175                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
4176                         sizeof(struct mlx5_counter_stats_mem_mng);
4177         size_t pgsize = rte_mem_page_size();
4178         if (pgsize == (size_t)-1) {
4179                 DRV_LOG(ERR, "Failed to get mem page size");
4180                 rte_errno = ENOMEM;
4181                 return NULL;
4182         }
4183         uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize,
4184                                   SOCKET_ID_ANY);
4185         int i;
4186
4187         if (!mem) {
4188                 rte_errno = ENOMEM;
4189                 return NULL;
4190         }
4191         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
4192         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
4193         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
4194                                                  IBV_ACCESS_LOCAL_WRITE);
4195         if (!mem_mng->umem) {
4196                 rte_errno = errno;
4197                 mlx5_free(mem);
4198                 return NULL;
4199         }
4200         mkey_attr.addr = (uintptr_t)mem;
4201         mkey_attr.size = size;
4202         mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
4203         mkey_attr.pd = sh->pdn;
4204         mkey_attr.log_entity_size = 0;
4205         mkey_attr.pg_access = 0;
4206         mkey_attr.klm_array = NULL;
4207         mkey_attr.klm_num = 0;
4208         if (priv->config.hca_attr.relaxed_ordering_write &&
4209                 priv->config.hca_attr.relaxed_ordering_read  &&
4210                 !haswell_broadwell_cpu)
4211                 mkey_attr.relaxed_ordering = 1;
4212         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
4213         if (!mem_mng->dm) {
4214                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
4215                 rte_errno = errno;
4216                 mlx5_free(mem);
4217                 return NULL;
4218         }
4219         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
4220         raw_data = (volatile struct flow_counter_stats *)mem;
4221         for (i = 0; i < raws_n; ++i) {
4222                 mem_mng->raws[i].mem_mng = mem_mng;
4223                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
4224         }
4225         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
4226         return mem_mng;
4227 }
4228
4229 /**
4230  * Resize a counter container.
4231  *
4232  * @param[in] dev
4233  *   Pointer to the Ethernet device structure.
4234  * @param[in] batch
4235  *   Whether the pool is for counter that was allocated by batch command.
4236  * @param[in] age
4237  *   Whether the pool is for Aging counter.
4238  *
4239  * @return
4240  *   0 on success, otherwise negative errno value and rte_errno is set.
4241  */
4242 static int
4243 flow_dv_container_resize(struct rte_eth_dev *dev,
4244                                 uint32_t batch, uint32_t age)
4245 {
4246         struct mlx5_priv *priv = dev->data->dev_private;
4247         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4248                                                                age);
4249         struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
4250         void *old_pools = cont->pools;
4251         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
4252         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4253         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4254
4255         if (!pools) {
4256                 rte_errno = ENOMEM;
4257                 return -ENOMEM;
4258         }
4259         if (old_pools)
4260                 memcpy(pools, old_pools, cont->n *
4261                                        sizeof(struct mlx5_flow_counter_pool *));
4262         /*
4263          * Fallback mode query the counter directly, no background query
4264          * resources are needed.
4265          */
4266         if (!priv->counter_fallback) {
4267                 int i;
4268
4269                 mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
4270                           MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
4271                 if (!mem_mng) {
4272                         mlx5_free(pools);
4273                         return -ENOMEM;
4274                 }
4275                 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
4276                         LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
4277                                          mem_mng->raws +
4278                                          MLX5_CNT_CONTAINER_RESIZE +
4279                                          i, next);
4280         }
4281         rte_spinlock_lock(&cont->resize_sl);
4282         cont->n = resize;
4283         cont->mem_mng = mem_mng;
4284         cont->pools = pools;
4285         rte_spinlock_unlock(&cont->resize_sl);
4286         if (old_pools)
4287                 mlx5_free(old_pools);
4288         return 0;
4289 }
4290
4291 /**
4292  * Query a devx flow counter.
4293  *
4294  * @param[in] dev
4295  *   Pointer to the Ethernet device structure.
4296  * @param[in] cnt
4297  *   Index to the flow counter.
4298  * @param[out] pkts
4299  *   The statistics value of packets.
4300  * @param[out] bytes
4301  *   The statistics value of bytes.
4302  *
4303  * @return
4304  *   0 on success, otherwise a negative errno value and rte_errno is set.
4305  */
4306 static inline int
4307 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4308                      uint64_t *bytes)
4309 {
4310         struct mlx5_priv *priv = dev->data->dev_private;
4311         struct mlx5_flow_counter_pool *pool = NULL;
4312         struct mlx5_flow_counter *cnt;
4313         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4314         int offset;
4315
4316         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4317         MLX5_ASSERT(pool);
4318         if (counter < MLX5_CNT_BATCH_OFFSET) {
4319                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4320                 if (priv->counter_fallback)
4321                         return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0,
4322                                         0, pkts, bytes, 0, NULL, NULL, 0);
4323         }
4324
4325         rte_spinlock_lock(&pool->sl);
4326         /*
4327          * The single counters allocation may allocate smaller ID than the
4328          * current allocated in parallel to the host reading.
4329          * In this case the new counter values must be reported as 0.
4330          */
4331         if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
4332                 *pkts = 0;
4333                 *bytes = 0;
4334         } else {
4335                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4336                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4337                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4338         }
4339         rte_spinlock_unlock(&pool->sl);
4340         return 0;
4341 }
4342
4343 /**
4344  * Create and initialize a new counter pool.
4345  *
4346  * @param[in] dev
4347  *   Pointer to the Ethernet device structure.
4348  * @param[out] dcs
4349  *   The devX counter handle.
4350  * @param[in] batch
4351  *   Whether the pool is for counter that was allocated by batch command.
4352  * @param[in] age
4353  *   Whether the pool is for counter that was allocated for aging.
4354  * @param[in/out] cont_cur
4355  *   Pointer to the container pointer, it will be update in pool resize.
4356  *
4357  * @return
4358  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4359  */
4360 static struct mlx5_flow_counter_pool *
4361 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4362                     uint32_t batch, uint32_t age)
4363 {
4364         struct mlx5_priv *priv = dev->data->dev_private;
4365         struct mlx5_flow_counter_pool *pool;
4366         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4367                                                                age);
4368         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
4369         uint32_t size = sizeof(*pool);
4370
4371         if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age))
4372                 return NULL;
4373         size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
4374         size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
4375         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
4376         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4377         if (!pool) {
4378                 rte_errno = ENOMEM;
4379                 return NULL;
4380         }
4381         pool->min_dcs = dcs;
4382         if (!priv->counter_fallback)
4383                 pool->raw = cont->mem_mng->raws + n_valid %
4384                                                       MLX5_CNT_CONTAINER_RESIZE;
4385         pool->raw_hw = NULL;
4386         pool->type = 0;
4387         pool->type |= (batch ? 0 :  CNT_POOL_TYPE_EXT);
4388         pool->type |= (!age ? 0 :  CNT_POOL_TYPE_AGE);
4389         pool->query_gen = 0;
4390         rte_spinlock_init(&pool->sl);
4391         TAILQ_INIT(&pool->counters[0]);
4392         TAILQ_INIT(&pool->counters[1]);
4393         TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
4394         pool->index = n_valid;
4395         cont->pools[n_valid] = pool;
4396         if (!batch) {
4397                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4398
4399                 if (base < cont->min_id)
4400                         cont->min_id = base;
4401                 if (base > cont->max_id)
4402                         cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4403                 cont->last_pool_idx = pool->index;
4404         }
4405         /* Pool initialization must be updated before host thread access. */
4406         rte_cio_wmb();
4407         rte_atomic16_add(&cont->n_valid, 1);
4408         return pool;
4409 }
4410
4411 /**
4412  * Update the minimum dcs-id for aged or no-aged counter pool.
4413  *
4414  * @param[in] dev
4415  *   Pointer to the Ethernet device structure.
4416  * @param[in] pool
4417  *   Current counter pool.
4418  * @param[in] batch
4419  *   Whether the pool is for counter that was allocated by batch command.
4420  * @param[in] age
4421  *   Whether the counter is for aging.
4422  */
4423 static void
4424 flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev,
4425                         struct mlx5_flow_counter_pool *pool,
4426                         uint32_t batch, uint32_t age)
4427 {
4428         struct mlx5_priv *priv = dev->data->dev_private;
4429         struct mlx5_flow_counter_pool *other;
4430         struct mlx5_pools_container *cont;
4431
4432         cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1));
4433         other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id);
4434         if (!other)
4435                 return;
4436         if (pool->min_dcs->id < other->min_dcs->id) {
4437                 rte_atomic64_set(&other->a64_dcs,
4438                         rte_atomic64_read(&pool->a64_dcs));
4439         } else {
4440                 rte_atomic64_set(&pool->a64_dcs,
4441                         rte_atomic64_read(&other->a64_dcs));
4442         }
4443 }
4444 /**
4445  * Prepare a new counter and/or a new counter pool.
4446  *
4447  * @param[in] dev
4448  *   Pointer to the Ethernet device structure.
4449  * @param[out] cnt_free
4450  *   Where to put the pointer of a new counter.
4451  * @param[in] batch
4452  *   Whether the pool is for counter that was allocated by batch command.
4453  * @param[in] age
4454  *   Whether the pool is for counter that was allocated for aging.
4455  *
4456  * @return
4457  *   The counter pool pointer and @p cnt_free is set on success,
4458  *   NULL otherwise and rte_errno is set.
4459  */
4460 static struct mlx5_flow_counter_pool *
4461 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4462                              struct mlx5_flow_counter **cnt_free,
4463                              uint32_t batch, uint32_t age)
4464 {
4465         struct mlx5_priv *priv = dev->data->dev_private;
4466         struct mlx5_pools_container *cont;
4467         struct mlx5_flow_counter_pool *pool;
4468         struct mlx5_counters tmp_tq;
4469         struct mlx5_devx_obj *dcs = NULL;
4470         struct mlx5_flow_counter *cnt;
4471         uint32_t i;
4472
4473         cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
4474         if (!batch) {
4475                 /* bulk_bitmap must be 0 for single counter allocation. */
4476                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4477                 if (!dcs)
4478                         return NULL;
4479                 pool = flow_dv_find_pool_by_id(cont, dcs->id);
4480                 if (!pool) {
4481                         pool = flow_dv_pool_create(dev, dcs, batch, age);
4482                         if (!pool) {
4483                                 mlx5_devx_cmd_destroy(dcs);
4484                                 return NULL;
4485                         }
4486                 } else if (dcs->id < pool->min_dcs->id) {
4487                         rte_atomic64_set(&pool->a64_dcs,
4488                                          (int64_t)(uintptr_t)dcs);
4489                 }
4490                 flow_dv_counter_update_min_dcs(dev,
4491                                                 pool, batch, age);
4492                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4493                 cnt = MLX5_POOL_GET_CNT(pool, i);
4494                 cnt->pool = pool;
4495                 MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
4496                 *cnt_free = cnt;
4497                 return pool;
4498         }
4499         /* bulk_bitmap is in 128 counters units. */
4500         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
4501                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4502         if (!dcs) {
4503                 rte_errno = ENODATA;
4504                 return NULL;
4505         }
4506         pool = flow_dv_pool_create(dev, dcs, batch, age);
4507         if (!pool) {
4508                 mlx5_devx_cmd_destroy(dcs);
4509                 return NULL;
4510         }
4511         TAILQ_INIT(&tmp_tq);
4512         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4513                 cnt = MLX5_POOL_GET_CNT(pool, i);
4514                 cnt->pool = pool;
4515                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4516         }
4517         rte_spinlock_lock(&cont->csl);
4518         TAILQ_CONCAT(&cont->counters, &tmp_tq, next);
4519         rte_spinlock_unlock(&cont->csl);
4520         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4521         (*cnt_free)->pool = pool;
4522         return pool;
4523 }
4524
4525 /**
4526  * Search for existed shared counter.
4527  *
4528  * @param[in] dev
4529  *   Pointer to the Ethernet device structure.
4530  * @param[in] id
4531  *   The shared counter ID to search.
4532  * @param[out] ppool
4533  *   mlx5 flow counter pool in the container,
4534  *
4535  * @return
4536  *   NULL if not existed, otherwise pointer to the shared extend counter.
4537  */
4538 static struct mlx5_flow_counter_ext *
4539 flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id,
4540                               struct mlx5_flow_counter_pool **ppool)
4541 {
4542         struct mlx5_priv *priv = dev->data->dev_private;
4543         union mlx5_l3t_data data;
4544         uint32_t cnt_idx;
4545
4546         if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword)
4547                 return NULL;
4548         cnt_idx = data.dword;
4549         /*
4550          * Shared counters don't have age info. The counter extend is after
4551          * the counter datat structure.
4552          */
4553         return (struct mlx5_flow_counter_ext *)
4554                ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1);
4555 }
4556
4557 /**
4558  * Allocate a flow counter.
4559  *
4560  * @param[in] dev
4561  *   Pointer to the Ethernet device structure.
4562  * @param[in] shared
4563  *   Indicate if this counter is shared with other flows.
4564  * @param[in] id
4565  *   Counter identifier.
4566  * @param[in] group
4567  *   Counter flow group.
4568  * @param[in] age
4569  *   Whether the counter was allocated for aging.
4570  *
4571  * @return
4572  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4573  */
4574 static uint32_t
4575 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
4576                       uint16_t group, uint32_t age)
4577 {
4578         struct mlx5_priv *priv = dev->data->dev_private;
4579         struct mlx5_flow_counter_pool *pool = NULL;
4580         struct mlx5_flow_counter *cnt_free = NULL;
4581         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4582         /*
4583          * Currently group 0 flow counter cannot be assigned to a flow if it is
4584          * not the first one in the batch counter allocation, so it is better
4585          * to allocate counters one by one for these flows in a separate
4586          * container.
4587          * A counter can be shared between different groups so need to take
4588          * shared counters from the single container.
4589          */
4590         uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
4591         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4592                                                                age);
4593         uint32_t cnt_idx;
4594
4595         if (!priv->config.devx) {
4596                 rte_errno = ENOTSUP;
4597                 return 0;
4598         }
4599         if (shared) {
4600                 cnt_ext = flow_dv_counter_shared_search(dev, id, &pool);
4601                 if (cnt_ext) {
4602                         if (cnt_ext->ref_cnt + 1 == 0) {
4603                                 rte_errno = E2BIG;
4604                                 return 0;
4605                         }
4606                         cnt_ext->ref_cnt++;
4607                         cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
4608                                   (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
4609                                   + 1;
4610                         return cnt_idx;
4611                 }
4612         }
4613         /* Get free counters from container. */
4614         rte_spinlock_lock(&cont->csl);
4615         cnt_free = TAILQ_FIRST(&cont->counters);
4616         if (cnt_free)
4617                 TAILQ_REMOVE(&cont->counters, cnt_free, next);
4618         rte_spinlock_unlock(&cont->csl);
4619         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free,
4620                                                        batch, age))
4621                 goto err;
4622         pool = cnt_free->pool;
4623         if (!batch)
4624                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
4625         /* Create a DV counter action only in the first time usage. */
4626         if (!cnt_free->action) {
4627                 uint16_t offset;
4628                 struct mlx5_devx_obj *dcs;
4629                 int ret;
4630
4631                 if (batch) {
4632                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4633                         dcs = pool->min_dcs;
4634                 } else {
4635                         offset = 0;
4636                         dcs = cnt_ext->dcs;
4637                 }
4638                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4639                                                             &cnt_free->action);
4640                 if (ret) {
4641                         rte_errno = errno;
4642                         goto err;
4643                 }
4644         }
4645         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4646                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4647         cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
4648         cnt_idx += age * MLX5_CNT_AGE_OFFSET;
4649         /* Update the counter reset values. */
4650         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4651                                  &cnt_free->bytes))
4652                 goto err;
4653         if (cnt_ext) {
4654                 cnt_ext->shared = shared;
4655                 cnt_ext->ref_cnt = 1;
4656                 cnt_ext->id = id;
4657                 if (shared) {
4658                         union mlx5_l3t_data data;
4659
4660                         data.dword = cnt_idx;
4661                         if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
4662                                 return 0;
4663                 }
4664         }
4665         if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
4666                 /* Start the asynchronous batch query by the host thread. */
4667                 mlx5_set_query_alarm(priv->sh);
4668         return cnt_idx;
4669 err:
4670         if (cnt_free) {
4671                 cnt_free->pool = pool;
4672                 rte_spinlock_lock(&cont->csl);
4673                 TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next);
4674                 rte_spinlock_unlock(&cont->csl);
4675         }
4676         return 0;
4677 }
4678
4679 /**
4680  * Get age param from counter index.
4681  *
4682  * @param[in] dev
4683  *   Pointer to the Ethernet device structure.
4684  * @param[in] counter
4685  *   Index to the counter handler.
4686  *
4687  * @return
4688  *   The aging parameter specified for the counter index.
4689  */
4690 static struct mlx5_age_param*
4691 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
4692                                 uint32_t counter)
4693 {
4694         struct mlx5_flow_counter *cnt;
4695         struct mlx5_flow_counter_pool *pool = NULL;
4696
4697         flow_dv_counter_get_by_idx(dev, counter, &pool);
4698         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
4699         cnt = MLX5_POOL_GET_CNT(pool, counter);
4700         return MLX5_CNT_TO_AGE(cnt);
4701 }
4702
4703 /**
4704  * Remove a flow counter from aged counter list.
4705  *
4706  * @param[in] dev
4707  *   Pointer to the Ethernet device structure.
4708  * @param[in] counter
4709  *   Index to the counter handler.
4710  * @param[in] cnt
4711  *   Pointer to the counter handler.
4712  */
4713 static void
4714 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
4715                                 uint32_t counter, struct mlx5_flow_counter *cnt)
4716 {
4717         struct mlx5_age_info *age_info;
4718         struct mlx5_age_param *age_param;
4719         struct mlx5_priv *priv = dev->data->dev_private;
4720
4721         age_info = GET_PORT_AGE_INFO(priv);
4722         age_param = flow_dv_counter_idx_get_age(dev, counter);
4723         if (rte_atomic16_cmpset((volatile uint16_t *)
4724                         &age_param->state,
4725                         AGE_CANDIDATE, AGE_FREE)
4726                         != AGE_CANDIDATE) {
4727                 /**
4728                  * We need the lock even it is age timeout,
4729                  * since counter may still in process.
4730                  */
4731                 rte_spinlock_lock(&age_info->aged_sl);
4732                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
4733                 rte_spinlock_unlock(&age_info->aged_sl);
4734         }
4735         rte_atomic16_set(&age_param->state, AGE_FREE);
4736 }
4737 /**
4738  * Release a flow counter.
4739  *
4740  * @param[in] dev
4741  *   Pointer to the Ethernet device structure.
4742  * @param[in] counter
4743  *   Index to the counter handler.
4744  */
4745 static void
4746 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
4747 {
4748         struct mlx5_priv *priv = dev->data->dev_private;
4749         struct mlx5_flow_counter_pool *pool = NULL;
4750         struct mlx5_flow_counter *cnt;
4751         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4752
4753         if (!counter)
4754                 return;
4755         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4756         MLX5_ASSERT(pool);
4757         if (counter < MLX5_CNT_BATCH_OFFSET) {
4758                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4759                 if (cnt_ext) {
4760                         if (--cnt_ext->ref_cnt)
4761                                 return;
4762                         if (cnt_ext->shared)
4763                                 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
4764                                                      cnt_ext->id);
4765                 }
4766         }
4767         if (IS_AGE_POOL(pool))
4768                 flow_dv_counter_remove_from_age(dev, counter, cnt);
4769         cnt->pool = pool;
4770         /*
4771          * Put the counter back to list to be updated in none fallback mode.
4772          * Currently, we are using two list alternately, while one is in query,
4773          * add the freed counter to the other list based on the pool query_gen
4774          * value. After query finishes, add counter the list to the global
4775          * container counter list. The list changes while query starts. In
4776          * this case, lock will not be needed as query callback and release
4777          * function both operate with the different list.
4778          *
4779          */
4780         if (!priv->counter_fallback)
4781                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
4782         else
4783                 TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER
4784                                   (priv->sh, 0, 0))->counters),
4785                                   cnt, next);
4786 }
4787
4788 /**
4789  * Verify the @p attributes will be correctly understood by the NIC and store
4790  * them in the @p flow if everything is correct.
4791  *
4792  * @param[in] dev
4793  *   Pointer to dev struct.
4794  * @param[in] attributes
4795  *   Pointer to flow attributes
4796  * @param[in] external
4797  *   This flow rule is created by request external to PMD.
4798  * @param[out] error
4799  *   Pointer to error structure.
4800  *
4801  * @return
4802  *   - 0 on success and non root table.
4803  *   - 1 on success and root table.
4804  *   - a negative errno value otherwise and rte_errno is set.
4805  */
4806 static int
4807 flow_dv_validate_attributes(struct rte_eth_dev *dev,
4808                             const struct rte_flow_attr *attributes,
4809                             bool external __rte_unused,
4810                             struct rte_flow_error *error)
4811 {
4812         struct mlx5_priv *priv = dev->data->dev_private;
4813         uint32_t priority_max = priv->config.flow_prio - 1;
4814         int ret = 0;
4815
4816 #ifndef HAVE_MLX5DV_DR
4817         if (attributes->group)
4818                 return rte_flow_error_set(error, ENOTSUP,
4819                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4820                                           NULL,
4821                                           "groups are not supported");
4822 #else
4823         uint32_t table = 0;
4824
4825         ret = mlx5_flow_group_to_table(attributes, external,
4826                                        attributes->group, !!priv->fdb_def_rule,
4827                                        &table, error);
4828         if (ret)
4829                 return ret;
4830         if (!table)
4831                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4832 #endif
4833         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
4834             attributes->priority >= priority_max)
4835                 return rte_flow_error_set(error, ENOTSUP,
4836                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4837                                           NULL,
4838                                           "priority out of range");
4839         if (attributes->transfer) {
4840                 if (!priv->config.dv_esw_en)
4841                         return rte_flow_error_set
4842                                 (error, ENOTSUP,
4843                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4844                                  "E-Switch dr is not supported");
4845                 if (!(priv->representor || priv->master))
4846                         return rte_flow_error_set
4847                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4848                                  NULL, "E-Switch configuration can only be"
4849                                  " done by a master or a representor device");
4850                 if (attributes->egress)
4851                         return rte_flow_error_set
4852                                 (error, ENOTSUP,
4853                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
4854                                  "egress is not supported");
4855         }
4856         if (!(attributes->egress ^ attributes->ingress))
4857                 return rte_flow_error_set(error, ENOTSUP,
4858                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4859                                           "must specify exactly one of "
4860                                           "ingress or egress");
4861         return ret;
4862 }
4863
4864 /**
4865  * Internal validation function. For validating both actions and items.
4866  *
4867  * @param[in] dev
4868  *   Pointer to the rte_eth_dev structure.
4869  * @param[in] attr
4870  *   Pointer to the flow attributes.
4871  * @param[in] items
4872  *   Pointer to the list of items.
4873  * @param[in] actions
4874  *   Pointer to the list of actions.
4875  * @param[in] external
4876  *   This flow rule is created by request external to PMD.
4877  * @param[in] hairpin
4878  *   Number of hairpin TX actions, 0 means classic flow.
4879  * @param[out] error
4880  *   Pointer to the error structure.
4881  *
4882  * @return
4883  *   0 on success, a negative errno value otherwise and rte_errno is set.
4884  */
4885 static int
4886 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4887                  const struct rte_flow_item items[],
4888                  const struct rte_flow_action actions[],
4889                  bool external, int hairpin, struct rte_flow_error *error)
4890 {
4891         int ret;
4892         uint64_t action_flags = 0;
4893         uint64_t item_flags = 0;
4894         uint64_t last_item = 0;
4895         uint8_t next_protocol = 0xff;
4896         uint16_t ether_type = 0;
4897         int actions_n = 0;
4898         uint8_t item_ipv6_proto = 0;
4899         const struct rte_flow_item *gre_item = NULL;
4900         const struct rte_flow_action_raw_decap *decap;
4901         const struct rte_flow_action_raw_encap *encap;
4902         const struct rte_flow_action_rss *rss;
4903         const struct rte_flow_item_tcp nic_tcp_mask = {
4904                 .hdr = {
4905                         .tcp_flags = 0xFF,
4906                         .src_port = RTE_BE16(UINT16_MAX),
4907                         .dst_port = RTE_BE16(UINT16_MAX),
4908                 }
4909         };
4910         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
4911                 .hdr = {
4912                         .src_addr = RTE_BE32(0xffffffff),
4913                         .dst_addr = RTE_BE32(0xffffffff),
4914                         .type_of_service = 0xff,
4915                         .next_proto_id = 0xff,
4916                         .time_to_live = 0xff,
4917                 },
4918         };
4919         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
4920                 .hdr = {
4921                         .src_addr =
4922                         "\xff\xff\xff\xff\xff\xff\xff\xff"
4923                         "\xff\xff\xff\xff\xff\xff\xff\xff",
4924                         .dst_addr =
4925                         "\xff\xff\xff\xff\xff\xff\xff\xff"
4926                         "\xff\xff\xff\xff\xff\xff\xff\xff",
4927                         .vtc_flow = RTE_BE32(0xffffffff),
4928                         .proto = 0xff,
4929                         .hop_limits = 0xff,
4930                 },
4931         };
4932         const struct rte_flow_item_ecpri nic_ecpri_mask = {
4933                 .hdr = {
4934                         .common = {
4935                                 .u32 =
4936                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
4937                                         .type = 0xFF,
4938                                         }).u32),
4939                         },
4940                         .dummy[0] = 0xffffffff,
4941                 },
4942         };
4943         struct mlx5_priv *priv = dev->data->dev_private;
4944         struct mlx5_dev_config *dev_conf = &priv->config;
4945         uint16_t queue_index = 0xFFFF;
4946         const struct rte_flow_item_vlan *vlan_m = NULL;
4947         int16_t rw_act_num = 0;
4948         uint64_t is_root;
4949
4950         if (items == NULL)
4951                 return -1;
4952         ret = flow_dv_validate_attributes(dev, attr, external, error);
4953         if (ret < 0)
4954                 return ret;
4955         is_root = (uint64_t)ret;
4956         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4957                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4958                 int type = items->type;
4959
4960                 if (!mlx5_flow_os_item_supported(type))
4961                         return rte_flow_error_set(error, ENOTSUP,
4962                                                   RTE_FLOW_ERROR_TYPE_ITEM,
4963                                                   NULL, "item not supported");
4964                 switch (type) {
4965                 case RTE_FLOW_ITEM_TYPE_VOID:
4966                         break;
4967                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4968                         ret = flow_dv_validate_item_port_id
4969                                         (dev, items, attr, item_flags, error);
4970                         if (ret < 0)
4971                                 return ret;
4972                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4973                         break;
4974                 case RTE_FLOW_ITEM_TYPE_ETH:
4975                         ret = mlx5_flow_validate_item_eth(items, item_flags,
4976                                                           error);
4977                         if (ret < 0)
4978                                 return ret;
4979                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4980                                              MLX5_FLOW_LAYER_OUTER_L2;
4981                         if (items->mask != NULL && items->spec != NULL) {
4982                                 ether_type =
4983                                         ((const struct rte_flow_item_eth *)
4984                                          items->spec)->type;
4985                                 ether_type &=
4986                                         ((const struct rte_flow_item_eth *)
4987                                          items->mask)->type;
4988                                 ether_type = rte_be_to_cpu_16(ether_type);
4989                         } else {
4990                                 ether_type = 0;
4991                         }
4992                         break;
4993                 case RTE_FLOW_ITEM_TYPE_VLAN:
4994                         ret = flow_dv_validate_item_vlan(items, item_flags,
4995                                                          dev, error);
4996                         if (ret < 0)
4997                                 return ret;
4998                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
4999                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5000                         if (items->mask != NULL && items->spec != NULL) {
5001                                 ether_type =
5002                                         ((const struct rte_flow_item_vlan *)
5003                                          items->spec)->inner_type;
5004                                 ether_type &=
5005                                         ((const struct rte_flow_item_vlan *)
5006                                          items->mask)->inner_type;
5007                                 ether_type = rte_be_to_cpu_16(ether_type);
5008                         } else {
5009                                 ether_type = 0;
5010                         }
5011                         /* Store outer VLAN mask for of_push_vlan action. */
5012                         if (!tunnel)
5013                                 vlan_m = items->mask;
5014                         break;
5015                 case RTE_FLOW_ITEM_TYPE_IPV4:
5016                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5017                                                   &item_flags, &tunnel);
5018                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
5019                                                            last_item,
5020                                                            ether_type,
5021                                                            &nic_ipv4_mask,
5022                                                            error);
5023                         if (ret < 0)
5024                                 return ret;
5025                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5026                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5027                         if (items->mask != NULL &&
5028                             ((const struct rte_flow_item_ipv4 *)
5029                              items->mask)->hdr.next_proto_id) {
5030                                 next_protocol =
5031                                         ((const struct rte_flow_item_ipv4 *)
5032                                          (items->spec))->hdr.next_proto_id;
5033                                 next_protocol &=
5034                                         ((const struct rte_flow_item_ipv4 *)
5035                                          (items->mask))->hdr.next_proto_id;
5036                         } else {
5037                                 /* Reset for inner layer. */
5038                                 next_protocol = 0xff;
5039                         }
5040                         break;
5041                 case RTE_FLOW_ITEM_TYPE_IPV6:
5042                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5043                                                   &item_flags, &tunnel);
5044                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5045                                                            last_item,
5046                                                            ether_type,
5047                                                            &nic_ipv6_mask,
5048                                                            error);
5049                         if (ret < 0)
5050                                 return ret;
5051                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5052                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5053                         if (items->mask != NULL &&
5054                             ((const struct rte_flow_item_ipv6 *)
5055                              items->mask)->hdr.proto) {
5056                                 item_ipv6_proto =
5057                                         ((const struct rte_flow_item_ipv6 *)
5058                                          items->spec)->hdr.proto;
5059                                 next_protocol =
5060                                         ((const struct rte_flow_item_ipv6 *)
5061                                          items->spec)->hdr.proto;
5062                                 next_protocol &=
5063                                         ((const struct rte_flow_item_ipv6 *)
5064                                          items->mask)->hdr.proto;
5065                         } else {
5066                                 /* Reset for inner layer. */
5067                                 next_protocol = 0xff;
5068                         }
5069                         break;
5070                 case RTE_FLOW_ITEM_TYPE_TCP:
5071                         ret = mlx5_flow_validate_item_tcp
5072                                                 (items, item_flags,
5073                                                  next_protocol,
5074                                                  &nic_tcp_mask,
5075                                                  error);
5076                         if (ret < 0)
5077                                 return ret;
5078                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5079                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5080                         break;
5081                 case RTE_FLOW_ITEM_TYPE_UDP:
5082                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5083                                                           next_protocol,
5084                                                           error);
5085                         if (ret < 0)
5086                                 return ret;
5087                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5088                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5089                         break;
5090                 case RTE_FLOW_ITEM_TYPE_GRE:
5091                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5092                                                           next_protocol, error);
5093                         if (ret < 0)
5094                                 return ret;
5095                         gre_item = items;
5096                         last_item = MLX5_FLOW_LAYER_GRE;
5097                         break;
5098                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5099                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5100                                                             next_protocol,
5101                                                             error);
5102                         if (ret < 0)
5103                                 return ret;
5104                         last_item = MLX5_FLOW_LAYER_NVGRE;
5105                         break;
5106                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5107                         ret = mlx5_flow_validate_item_gre_key
5108                                 (items, item_flags, gre_item, error);
5109                         if (ret < 0)
5110                                 return ret;
5111                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5112                         break;
5113                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5114                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5115                                                             error);
5116                         if (ret < 0)
5117                                 return ret;
5118                         last_item = MLX5_FLOW_LAYER_VXLAN;
5119                         break;
5120                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5121                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5122                                                                 item_flags, dev,
5123                                                                 error);
5124                         if (ret < 0)
5125                                 return ret;
5126                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5127                         break;
5128                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5129                         ret = mlx5_flow_validate_item_geneve(items,
5130                                                              item_flags, dev,
5131                                                              error);
5132                         if (ret < 0)
5133                                 return ret;
5134                         last_item = MLX5_FLOW_LAYER_GENEVE;
5135                         break;
5136                 case RTE_FLOW_ITEM_TYPE_MPLS:
5137                         ret = mlx5_flow_validate_item_mpls(dev, items,
5138                                                            item_flags,
5139                                                            last_item, error);
5140                         if (ret < 0)
5141                                 return ret;
5142                         last_item = MLX5_FLOW_LAYER_MPLS;
5143                         break;
5144
5145                 case RTE_FLOW_ITEM_TYPE_MARK:
5146                         ret = flow_dv_validate_item_mark(dev, items, attr,
5147                                                          error);
5148                         if (ret < 0)
5149                                 return ret;
5150                         last_item = MLX5_FLOW_ITEM_MARK;
5151                         break;
5152                 case RTE_FLOW_ITEM_TYPE_META:
5153                         ret = flow_dv_validate_item_meta(dev, items, attr,
5154                                                          error);
5155                         if (ret < 0)
5156                                 return ret;
5157                         last_item = MLX5_FLOW_ITEM_METADATA;
5158                         break;
5159                 case RTE_FLOW_ITEM_TYPE_ICMP:
5160                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5161                                                            next_protocol,
5162                                                            error);
5163                         if (ret < 0)
5164                                 return ret;
5165                         last_item = MLX5_FLOW_LAYER_ICMP;
5166                         break;
5167                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5168                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5169                                                             next_protocol,
5170                                                             error);
5171                         if (ret < 0)
5172                                 return ret;
5173                         item_ipv6_proto = IPPROTO_ICMPV6;
5174                         last_item = MLX5_FLOW_LAYER_ICMP6;
5175                         break;
5176                 case RTE_FLOW_ITEM_TYPE_TAG:
5177                         ret = flow_dv_validate_item_tag(dev, items,
5178                                                         attr, error);
5179                         if (ret < 0)
5180                                 return ret;
5181                         last_item = MLX5_FLOW_ITEM_TAG;
5182                         break;
5183                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5184                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5185                         break;
5186                 case RTE_FLOW_ITEM_TYPE_GTP:
5187                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5188                                                         error);
5189                         if (ret < 0)
5190                                 return ret;
5191                         last_item = MLX5_FLOW_LAYER_GTP;
5192                         break;
5193                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5194                         /* Capacity will be checked in the translate stage. */
5195                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5196                                                             last_item,
5197                                                             ether_type,
5198                                                             &nic_ecpri_mask,
5199                                                             error);
5200                         if (ret < 0)
5201                                 return ret;
5202                         last_item = MLX5_FLOW_LAYER_ECPRI;
5203                         break;
5204                 default:
5205                         return rte_flow_error_set(error, ENOTSUP,
5206                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5207                                                   NULL, "item not supported");
5208                 }
5209                 item_flags |= last_item;
5210         }
5211         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5212                 int type = actions->type;
5213
5214                 if (!mlx5_flow_os_action_supported(type))
5215                         return rte_flow_error_set(error, ENOTSUP,
5216                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5217                                                   actions,
5218                                                   "action not supported");
5219                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5220                         return rte_flow_error_set(error, ENOTSUP,
5221                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5222                                                   actions, "too many actions");
5223                 switch (type) {
5224                 case RTE_FLOW_ACTION_TYPE_VOID:
5225                         break;
5226                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5227                         ret = flow_dv_validate_action_port_id(dev,
5228                                                               action_flags,
5229                                                               actions,
5230                                                               attr,
5231                                                               error);
5232                         if (ret)
5233                                 return ret;
5234                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5235                         ++actions_n;
5236                         break;
5237                 case RTE_FLOW_ACTION_TYPE_FLAG:
5238                         ret = flow_dv_validate_action_flag(dev, action_flags,
5239                                                            attr, error);
5240                         if (ret < 0)
5241                                 return ret;
5242                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5243                                 /* Count all modify-header actions as one. */
5244                                 if (!(action_flags &
5245                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5246                                         ++actions_n;
5247                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5248                                                 MLX5_FLOW_ACTION_MARK_EXT;
5249                         } else {
5250                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5251                                 ++actions_n;
5252                         }
5253                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5254                         break;
5255                 case RTE_FLOW_ACTION_TYPE_MARK:
5256                         ret = flow_dv_validate_action_mark(dev, actions,
5257                                                            action_flags,
5258                                                            attr, error);
5259                         if (ret < 0)
5260                                 return ret;
5261                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5262                                 /* Count all modify-header actions as one. */
5263                                 if (!(action_flags &
5264                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5265                                         ++actions_n;
5266                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5267                                                 MLX5_FLOW_ACTION_MARK_EXT;
5268                         } else {
5269                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5270                                 ++actions_n;
5271                         }
5272                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5273                         break;
5274                 case RTE_FLOW_ACTION_TYPE_SET_META:
5275                         ret = flow_dv_validate_action_set_meta(dev, actions,
5276                                                                action_flags,
5277                                                                attr, error);
5278                         if (ret < 0)
5279                                 return ret;
5280                         /* Count all modify-header actions as one action. */
5281                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5282                                 ++actions_n;
5283                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5284                         rw_act_num += MLX5_ACT_NUM_SET_META;
5285                         break;
5286                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5287                         ret = flow_dv_validate_action_set_tag(dev, actions,
5288                                                               action_flags,
5289                                                               attr, error);
5290                         if (ret < 0)
5291                                 return ret;
5292                         /* Count all modify-header actions as one action. */
5293                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5294                                 ++actions_n;
5295                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5296                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5297                         break;
5298                 case RTE_FLOW_ACTION_TYPE_DROP:
5299                         ret = mlx5_flow_validate_action_drop(action_flags,
5300                                                              attr, error);
5301                         if (ret < 0)
5302                                 return ret;
5303                         action_flags |= MLX5_FLOW_ACTION_DROP;
5304                         ++actions_n;
5305                         break;
5306                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5307                         ret = mlx5_flow_validate_action_queue(actions,
5308                                                               action_flags, dev,
5309                                                               attr, error);
5310                         if (ret < 0)
5311                                 return ret;
5312                         queue_index = ((const struct rte_flow_action_queue *)
5313                                                         (actions->conf))->index;
5314                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5315                         ++actions_n;
5316                         break;
5317                 case RTE_FLOW_ACTION_TYPE_RSS:
5318                         rss = actions->conf;
5319                         ret = mlx5_flow_validate_action_rss(actions,
5320                                                             action_flags, dev,
5321                                                             attr, item_flags,
5322                                                             error);
5323                         if (ret < 0)
5324                                 return ret;
5325                         if (rss != NULL && rss->queue_num)
5326                                 queue_index = rss->queue[0];
5327                         action_flags |= MLX5_FLOW_ACTION_RSS;
5328                         ++actions_n;
5329                         break;
5330                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5331                         ret =
5332                         mlx5_flow_validate_action_default_miss(action_flags,
5333                                         attr, error);
5334                         if (ret < 0)
5335                                 return ret;
5336                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5337                         ++actions_n;
5338                         break;
5339                 case RTE_FLOW_ACTION_TYPE_COUNT:
5340                         ret = flow_dv_validate_action_count(dev, error);
5341                         if (ret < 0)
5342                                 return ret;
5343                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5344                         ++actions_n;
5345                         break;
5346                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5347                         if (flow_dv_validate_action_pop_vlan(dev,
5348                                                              action_flags,
5349                                                              actions,
5350                                                              item_flags, attr,
5351                                                              error))
5352                                 return -rte_errno;
5353                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5354                         ++actions_n;
5355                         break;
5356                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5357                         ret = flow_dv_validate_action_push_vlan(dev,
5358                                                                 action_flags,
5359                                                                 vlan_m,
5360                                                                 actions, attr,
5361                                                                 error);
5362                         if (ret < 0)
5363                                 return ret;
5364                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5365                         ++actions_n;
5366                         break;
5367                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5368                         ret = flow_dv_validate_action_set_vlan_pcp
5369                                                 (action_flags, actions, error);
5370                         if (ret < 0)
5371                                 return ret;
5372                         /* Count PCP with push_vlan command. */
5373                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5374                         break;
5375                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5376                         ret = flow_dv_validate_action_set_vlan_vid
5377                                                 (item_flags, action_flags,
5378                                                  actions, error);
5379                         if (ret < 0)
5380                                 return ret;
5381                         /* Count VID with push_vlan command. */
5382                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5383                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5384                         break;
5385                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5386                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5387                         ret = flow_dv_validate_action_l2_encap(dev,
5388                                                                action_flags,
5389                                                                actions, attr,
5390                                                                error);
5391                         if (ret < 0)
5392                                 return ret;
5393                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5394                         ++actions_n;
5395                         break;
5396                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5397                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5398                         ret = flow_dv_validate_action_decap(dev, action_flags,
5399                                                             attr, error);
5400                         if (ret < 0)
5401                                 return ret;
5402                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5403                         ++actions_n;
5404                         break;
5405                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5406                         ret = flow_dv_validate_action_raw_encap_decap
5407                                 (dev, NULL, actions->conf, attr, &action_flags,
5408                                  &actions_n, error);
5409                         if (ret < 0)
5410                                 return ret;
5411                         break;
5412                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5413                         decap = actions->conf;
5414                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5415                                 ;
5416                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5417                                 encap = NULL;
5418                                 actions--;
5419                         } else {
5420                                 encap = actions->conf;
5421                         }
5422                         ret = flow_dv_validate_action_raw_encap_decap
5423                                            (dev,
5424                                             decap ? decap : &empty_decap, encap,
5425                                             attr, &action_flags, &actions_n,
5426                                             error);
5427                         if (ret < 0)
5428                                 return ret;
5429                         break;
5430                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5431                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5432                         ret = flow_dv_validate_action_modify_mac(action_flags,
5433                                                                  actions,
5434                                                                  item_flags,
5435                                                                  error);
5436                         if (ret < 0)
5437                                 return ret;
5438                         /* Count all modify-header actions as one action. */
5439                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5440                                 ++actions_n;
5441                         action_flags |= actions->type ==
5442                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5443                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5444                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5445                         /*
5446                          * Even if the source and destination MAC addresses have
5447                          * overlap in the header with 4B alignment, the convert
5448                          * function will handle them separately and 4 SW actions
5449                          * will be created. And 2 actions will be added each
5450                          * time no matter how many bytes of address will be set.
5451                          */
5452                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5453                         break;
5454                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5455                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5456                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5457                                                                   actions,
5458                                                                   item_flags,
5459                                                                   error);
5460                         if (ret < 0)
5461                                 return ret;
5462                         /* Count all modify-header actions as one action. */
5463                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5464                                 ++actions_n;
5465                         action_flags |= actions->type ==
5466                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5467                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5468                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5469                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5470                         break;
5471                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5472                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5473                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5474                                                                   actions,
5475                                                                   item_flags,
5476                                                                   error);
5477                         if (ret < 0)
5478                                 return ret;
5479                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5480                                 return rte_flow_error_set(error, ENOTSUP,
5481                                         RTE_FLOW_ERROR_TYPE_ACTION,
5482                                         actions,
5483                                         "Can't change header "
5484                                         "with ICMPv6 proto");
5485                         /* Count all modify-header actions as one action. */
5486                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5487                                 ++actions_n;
5488                         action_flags |= actions->type ==
5489                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5490                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5491                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5492                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5493                         break;
5494                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5495                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5496                         ret = flow_dv_validate_action_modify_tp(action_flags,
5497                                                                 actions,
5498                                                                 item_flags,
5499                                                                 error);
5500                         if (ret < 0)
5501                                 return ret;
5502                         /* Count all modify-header actions as one action. */
5503                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5504                                 ++actions_n;
5505                         action_flags |= actions->type ==
5506                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5507                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5508                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5509                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5510                         break;
5511                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5512                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5513                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5514                                                                  actions,
5515                                                                  item_flags,
5516                                                                  error);
5517                         if (ret < 0)
5518                                 return ret;
5519                         /* Count all modify-header actions as one action. */
5520                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5521                                 ++actions_n;
5522                         action_flags |= actions->type ==
5523                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5524                                                 MLX5_FLOW_ACTION_SET_TTL :
5525                                                 MLX5_FLOW_ACTION_DEC_TTL;
5526                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5527                         break;
5528                 case RTE_FLOW_ACTION_TYPE_JUMP:
5529                         ret = flow_dv_validate_action_jump(actions,
5530                                                            action_flags,
5531                                                            attr, external,
5532                                                            error);
5533                         if (ret)
5534                                 return ret;
5535                         ++actions_n;
5536                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5537                         break;
5538                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5539                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5540                         ret = flow_dv_validate_action_modify_tcp_seq
5541                                                                 (action_flags,
5542                                                                  actions,
5543                                                                  item_flags,
5544                                                                  error);
5545                         if (ret < 0)
5546                                 return ret;
5547                         /* Count all modify-header actions as one action. */
5548                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5549                                 ++actions_n;
5550                         action_flags |= actions->type ==
5551                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5552                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5553                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5554                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5555                         break;
5556                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5557                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5558                         ret = flow_dv_validate_action_modify_tcp_ack
5559                                                                 (action_flags,
5560                                                                  actions,
5561                                                                  item_flags,
5562                                                                  error);
5563                         if (ret < 0)
5564                                 return ret;
5565                         /* Count all modify-header actions as one action. */
5566                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5567                                 ++actions_n;
5568                         action_flags |= actions->type ==
5569                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5570                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5571                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5572                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5573                         break;
5574                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5575                         break;
5576                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5577                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5578                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5579                         break;
5580                 case RTE_FLOW_ACTION_TYPE_METER:
5581                         ret = mlx5_flow_validate_action_meter(dev,
5582                                                               action_flags,
5583                                                               actions, attr,
5584                                                               error);
5585                         if (ret < 0)
5586                                 return ret;
5587                         action_flags |= MLX5_FLOW_ACTION_METER;
5588                         ++actions_n;
5589                         /* Meter action will add one more TAG action. */
5590                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5591                         break;
5592                 case RTE_FLOW_ACTION_TYPE_AGE:
5593                         ret = flow_dv_validate_action_age(action_flags,
5594                                                           actions, dev,
5595                                                           error);
5596                         if (ret < 0)
5597                                 return ret;
5598                         action_flags |= MLX5_FLOW_ACTION_AGE;
5599                         ++actions_n;
5600                         break;
5601                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5602                         ret = flow_dv_validate_action_modify_ipv4_dscp
5603                                                          (action_flags,
5604                                                           actions,
5605                                                           item_flags,
5606                                                           error);
5607                         if (ret < 0)
5608                                 return ret;
5609                         /* Count all modify-header actions as one action. */
5610                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5611                                 ++actions_n;
5612                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5613                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5614                         break;
5615                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5616                         ret = flow_dv_validate_action_modify_ipv6_dscp
5617                                                                 (action_flags,
5618                                                                  actions,
5619                                                                  item_flags,
5620                                                                  error);
5621                         if (ret < 0)
5622                                 return ret;
5623                         /* Count all modify-header actions as one action. */
5624                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5625                                 ++actions_n;
5626                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5627                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5628                         break;
5629                 default:
5630                         return rte_flow_error_set(error, ENOTSUP,
5631                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5632                                                   actions,
5633                                                   "action not supported");
5634                 }
5635         }
5636         /*
5637          * Validate the drop action mutual exclusion with other actions.
5638          * Drop action is mutually-exclusive with any other action, except for
5639          * Count action.
5640          */
5641         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
5642             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
5643                 return rte_flow_error_set(error, EINVAL,
5644                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5645                                           "Drop action is mutually-exclusive "
5646                                           "with any other action, except for "
5647                                           "Count action");
5648         /* Eswitch has few restrictions on using items and actions */
5649         if (attr->transfer) {
5650                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5651                     action_flags & MLX5_FLOW_ACTION_FLAG)
5652                         return rte_flow_error_set(error, ENOTSUP,
5653                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5654                                                   NULL,
5655                                                   "unsupported action FLAG");
5656                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5657                     action_flags & MLX5_FLOW_ACTION_MARK)
5658                         return rte_flow_error_set(error, ENOTSUP,
5659                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5660                                                   NULL,
5661                                                   "unsupported action MARK");
5662                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
5663                         return rte_flow_error_set(error, ENOTSUP,
5664                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5665                                                   NULL,
5666                                                   "unsupported action QUEUE");
5667                 if (action_flags & MLX5_FLOW_ACTION_RSS)
5668                         return rte_flow_error_set(error, ENOTSUP,
5669                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5670                                                   NULL,
5671                                                   "unsupported action RSS");
5672                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5673                         return rte_flow_error_set(error, EINVAL,
5674                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5675                                                   actions,
5676                                                   "no fate action is found");
5677         } else {
5678                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
5679                         return rte_flow_error_set(error, EINVAL,
5680                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5681                                                   actions,
5682                                                   "no fate action is found");
5683         }
5684         /* Continue validation for Xcap actions.*/
5685         if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
5686             mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5687                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5688                     MLX5_FLOW_XCAP_ACTIONS)
5689                         return rte_flow_error_set(error, ENOTSUP,
5690                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5691                                                   NULL, "encap and decap "
5692                                                   "combination aren't supported");
5693                 if (!attr->transfer && attr->ingress && (action_flags &
5694                                                         MLX5_FLOW_ACTION_ENCAP))
5695                         return rte_flow_error_set(error, ENOTSUP,
5696                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5697                                                   NULL, "encap is not supported"
5698                                                   " for ingress traffic");
5699         }
5700         /* Hairpin flow will add one more TAG action. */
5701         if (hairpin > 0)
5702                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5703         /* extra metadata enabled: one more TAG action will be add. */
5704         if (dev_conf->dv_flow_en &&
5705             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
5706             mlx5_flow_ext_mreg_supported(dev))
5707                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5708         if ((uint32_t)rw_act_num >
5709                         flow_dv_modify_hdr_action_max(dev, is_root)) {
5710                 return rte_flow_error_set(error, ENOTSUP,
5711                                           RTE_FLOW_ERROR_TYPE_ACTION,
5712                                           NULL, "too many header modify"
5713                                           " actions to support");
5714         }
5715         return 0;
5716 }
5717
5718 /**
5719  * Internal preparation function. Allocates the DV flow size,
5720  * this size is constant.
5721  *
5722  * @param[in] dev
5723  *   Pointer to the rte_eth_dev structure.
5724  * @param[in] attr
5725  *   Pointer to the flow attributes.
5726  * @param[in] items
5727  *   Pointer to the list of items.
5728  * @param[in] actions
5729  *   Pointer to the list of actions.
5730  * @param[out] error
5731  *   Pointer to the error structure.
5732  *
5733  * @return
5734  *   Pointer to mlx5_flow object on success,
5735  *   otherwise NULL and rte_errno is set.
5736  */
5737 static struct mlx5_flow *
5738 flow_dv_prepare(struct rte_eth_dev *dev,
5739                 const struct rte_flow_attr *attr __rte_unused,
5740                 const struct rte_flow_item items[] __rte_unused,
5741                 const struct rte_flow_action actions[] __rte_unused,
5742                 struct rte_flow_error *error)
5743 {
5744         uint32_t handle_idx = 0;
5745         struct mlx5_flow *dev_flow;
5746         struct mlx5_flow_handle *dev_handle;
5747         struct mlx5_priv *priv = dev->data->dev_private;
5748
5749         /* In case of corrupting the memory. */
5750         if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
5751                 rte_flow_error_set(error, ENOSPC,
5752                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5753                                    "not free temporary device flow");
5754                 return NULL;
5755         }
5756         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
5757                                    &handle_idx);
5758         if (!dev_handle) {
5759                 rte_flow_error_set(error, ENOMEM,
5760                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5761                                    "not enough memory to create flow handle");
5762                 return NULL;
5763         }
5764         /* No multi-thread supporting. */
5765         dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
5766         dev_flow->handle = dev_handle;
5767         dev_flow->handle_idx = handle_idx;
5768         /*
5769          * In some old rdma-core releases, before continuing, a check of the
5770          * length of matching parameter will be done at first. It needs to use
5771          * the length without misc4 param. If the flow has misc4 support, then
5772          * the length needs to be adjusted accordingly. Each param member is
5773          * aligned with a 64B boundary naturally.
5774          */
5775         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
5776                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
5777         /*
5778          * The matching value needs to be cleared to 0 before using. In the
5779          * past, it will be automatically cleared when using rte_*alloc
5780          * API. The time consumption will be almost the same as before.
5781          */
5782         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
5783         dev_flow->ingress = attr->ingress;
5784         dev_flow->dv.transfer = attr->transfer;
5785         return dev_flow;
5786 }
5787
5788 #ifdef RTE_LIBRTE_MLX5_DEBUG
5789 /**
5790  * Sanity check for match mask and value. Similar to check_valid_spec() in
5791  * kernel driver. If unmasked bit is present in value, it returns failure.
5792  *
5793  * @param match_mask
5794  *   pointer to match mask buffer.
5795  * @param match_value
5796  *   pointer to match value buffer.
5797  *
5798  * @return
5799  *   0 if valid, -EINVAL otherwise.
5800  */
5801 static int
5802 flow_dv_check_valid_spec(void *match_mask, void *match_value)
5803 {
5804         uint8_t *m = match_mask;
5805         uint8_t *v = match_value;
5806         unsigned int i;
5807
5808         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
5809                 if (v[i] & ~m[i]) {
5810                         DRV_LOG(ERR,
5811                                 "match_value differs from match_criteria"
5812                                 " %p[%u] != %p[%u]",
5813                                 match_value, i, match_mask, i);
5814                         return -EINVAL;
5815                 }
5816         }
5817         return 0;
5818 }
5819 #endif
5820
5821 /**
5822  * Add match of ip_version.
5823  *
5824  * @param[in] group
5825  *   Flow group.
5826  * @param[in] headers_v
5827  *   Values header pointer.
5828  * @param[in] headers_m
5829  *   Masks header pointer.
5830  * @param[in] ip_version
5831  *   The IP version to set.
5832  */
5833 static inline void
5834 flow_dv_set_match_ip_version(uint32_t group,
5835                              void *headers_v,
5836                              void *headers_m,
5837                              uint8_t ip_version)
5838 {
5839         if (group == 0)
5840                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5841         else
5842                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
5843                          ip_version);
5844         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
5845         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
5846         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
5847 }
5848
5849 /**
5850  * Add Ethernet item to matcher and to the value.
5851  *
5852  * @param[in, out] matcher
5853  *   Flow matcher.
5854  * @param[in, out] key
5855  *   Flow matcher value.
5856  * @param[in] item
5857  *   Flow pattern to translate.
5858  * @param[in] inner
5859  *   Item is inner pattern.
5860  */
5861 static void
5862 flow_dv_translate_item_eth(void *matcher, void *key,
5863                            const struct rte_flow_item *item, int inner,
5864                            uint32_t group)
5865 {
5866         const struct rte_flow_item_eth *eth_m = item->mask;
5867         const struct rte_flow_item_eth *eth_v = item->spec;
5868         const struct rte_flow_item_eth nic_mask = {
5869                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5870                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5871                 .type = RTE_BE16(0xffff),
5872         };
5873         void *headers_m;
5874         void *headers_v;
5875         char *l24_v;
5876         unsigned int i;
5877
5878         if (!eth_v)
5879                 return;
5880         if (!eth_m)
5881                 eth_m = &nic_mask;
5882         if (inner) {
5883                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5884                                          inner_headers);
5885                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5886         } else {
5887                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5888                                          outer_headers);
5889                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5890         }
5891         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
5892                &eth_m->dst, sizeof(eth_m->dst));
5893         /* The value must be in the range of the mask. */
5894         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
5895         for (i = 0; i < sizeof(eth_m->dst); ++i)
5896                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
5897         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
5898                &eth_m->src, sizeof(eth_m->src));
5899         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
5900         /* The value must be in the range of the mask. */
5901         for (i = 0; i < sizeof(eth_m->dst); ++i)
5902                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
5903         if (eth_v->type) {
5904                 /* When ethertype is present set mask for tagged VLAN. */
5905                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5906                 /* Set value for tagged VLAN if ethertype is 802.1Q. */
5907                 if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
5908                     eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
5909                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
5910                                  1);
5911                         /* Return here to avoid setting match on ethertype. */
5912                         return;
5913                 }
5914         }
5915         /*
5916          * HW supports match on one Ethertype, the Ethertype following the last
5917          * VLAN tag of the packet (see PRM).
5918          * Set match on ethertype only if ETH header is not followed by VLAN.
5919          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
5920          * ethertype, and use ip_version field instead.
5921          * eCPRI over Ether layer will use type value 0xAEFE.
5922          */
5923         if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
5924             eth_m->type == 0xFFFF) {
5925                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
5926         } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
5927                    eth_m->type == 0xFFFF) {
5928                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
5929         } else {
5930                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5931                          rte_be_to_cpu_16(eth_m->type));
5932                 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5933                                      ethertype);
5934                 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
5935         }
5936 }
5937
5938 /**
5939  * Add VLAN item to matcher and to the value.
5940  *
5941  * @param[in, out] dev_flow
5942  *   Flow descriptor.
5943  * @param[in, out] matcher
5944  *   Flow matcher.
5945  * @param[in, out] key
5946  *   Flow matcher value.
5947  * @param[in] item
5948  *   Flow pattern to translate.
5949  * @param[in] inner
5950  *   Item is inner pattern.
5951  */
5952 static void
5953 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
5954                             void *matcher, void *key,
5955                             const struct rte_flow_item *item,
5956                             int inner, uint32_t group)
5957 {
5958         const struct rte_flow_item_vlan *vlan_m = item->mask;
5959         const struct rte_flow_item_vlan *vlan_v = item->spec;
5960         void *headers_m;
5961         void *headers_v;
5962         uint16_t tci_m;
5963         uint16_t tci_v;
5964
5965         if (inner) {
5966                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5967                                          inner_headers);
5968                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5969         } else {
5970                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5971                                          outer_headers);
5972                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5973                 /*
5974                  * This is workaround, masks are not supported,
5975                  * and pre-validated.
5976                  */
5977                 if (vlan_v)
5978                         dev_flow->handle->vf_vlan.tag =
5979                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
5980         }
5981         /*
5982          * When VLAN item exists in flow, mark packet as tagged,
5983          * even if TCI is not specified.
5984          */
5985         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5986         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
5987         if (!vlan_v)
5988                 return;
5989         if (!vlan_m)
5990                 vlan_m = &rte_flow_item_vlan_mask;
5991         tci_m = rte_be_to_cpu_16(vlan_m->tci);
5992         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
5993         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
5994         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
5995         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
5996         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
5997         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
5998         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
5999         /*
6000          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6001          * ethertype, and use ip_version field instead.
6002          */
6003         if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
6004             vlan_m->inner_type == 0xFFFF) {
6005                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6006         } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
6007                    vlan_m->inner_type == 0xFFFF) {
6008                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6009         } else {
6010                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
6011                          rte_be_to_cpu_16(vlan_m->inner_type));
6012                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
6013                          rte_be_to_cpu_16(vlan_m->inner_type &
6014                                           vlan_v->inner_type));
6015         }
6016 }
6017
6018 /**
6019  * Add IPV4 item to matcher and to the value.
6020  *
6021  * @param[in, out] matcher
6022  *   Flow matcher.
6023  * @param[in, out] key
6024  *   Flow matcher value.
6025  * @param[in] item
6026  *   Flow pattern to translate.
6027  * @param[in] item_flags
6028  *   Bit-fields that holds the items detected until now.
6029  * @param[in] inner
6030  *   Item is inner pattern.
6031  * @param[in] group
6032  *   The group to insert the rule.
6033  */
6034 static void
6035 flow_dv_translate_item_ipv4(void *matcher, void *key,
6036                             const struct rte_flow_item *item,
6037                             const uint64_t item_flags,
6038                             int inner, uint32_t group)
6039 {
6040         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6041         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6042         const struct rte_flow_item_ipv4 nic_mask = {
6043                 .hdr = {
6044                         .src_addr = RTE_BE32(0xffffffff),
6045                         .dst_addr = RTE_BE32(0xffffffff),
6046                         .type_of_service = 0xff,
6047                         .next_proto_id = 0xff,
6048                         .time_to_live = 0xff,
6049                 },
6050         };
6051         void *headers_m;
6052         void *headers_v;
6053         char *l24_m;
6054         char *l24_v;
6055         uint8_t tos;
6056
6057         if (inner) {
6058                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6059                                          inner_headers);
6060                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6061         } else {
6062                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6063                                          outer_headers);
6064                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6065         }
6066         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6067         /*
6068          * On outer header (which must contains L2), or inner header with L2,
6069          * set cvlan_tag mask bit to mark this packet as untagged.
6070          * This should be done even if item->spec is empty.
6071          */
6072         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
6073                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6074         if (!ipv4_v)
6075                 return;
6076         if (!ipv4_m)
6077                 ipv4_m = &nic_mask;
6078         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6079                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6080         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6081                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6082         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6083         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6084         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6085                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6086         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6087                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6088         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6089         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6090         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6091         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6092                  ipv4_m->hdr.type_of_service);
6093         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6094         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6095                  ipv4_m->hdr.type_of_service >> 2);
6096         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6097         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6098                  ipv4_m->hdr.next_proto_id);
6099         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6100                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6101         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6102                  ipv4_m->hdr.time_to_live);
6103         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6104                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6105 }
6106
6107 /**
6108  * Add IPV6 item to matcher and to the value.
6109  *
6110  * @param[in, out] matcher
6111  *   Flow matcher.
6112  * @param[in, out] key
6113  *   Flow matcher value.
6114  * @param[in] item
6115  *   Flow pattern to translate.
6116  * @param[in] item_flags
6117  *   Bit-fields that holds the items detected until now.
6118  * @param[in] inner
6119  *   Item is inner pattern.
6120  * @param[in] group
6121  *   The group to insert the rule.
6122  */
6123 static void
6124 flow_dv_translate_item_ipv6(void *matcher, void *key,
6125                             const struct rte_flow_item *item,
6126                             const uint64_t item_flags,
6127                             int inner, uint32_t group)
6128 {
6129         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6130         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6131         const struct rte_flow_item_ipv6 nic_mask = {
6132                 .hdr = {
6133                         .src_addr =
6134                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6135                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6136                         .dst_addr =
6137                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6138                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6139                         .vtc_flow = RTE_BE32(0xffffffff),
6140                         .proto = 0xff,
6141                         .hop_limits = 0xff,
6142                 },
6143         };
6144         void *headers_m;
6145         void *headers_v;
6146         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6147         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6148         char *l24_m;
6149         char *l24_v;
6150         uint32_t vtc_m;
6151         uint32_t vtc_v;
6152         int i;
6153         int size;
6154
6155         if (inner) {
6156                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6157                                          inner_headers);
6158                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6159         } else {
6160                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6161                                          outer_headers);
6162                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6163         }
6164         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6165         /*
6166          * On outer header (which must contains L2), or inner header with L2,
6167          * set cvlan_tag mask bit to mark this packet as untagged.
6168          * This should be done even if item->spec is empty.
6169          */
6170         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
6171                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6172         if (!ipv6_v)
6173                 return;
6174         if (!ipv6_m)
6175                 ipv6_m = &nic_mask;
6176         size = sizeof(ipv6_m->hdr.dst_addr);
6177         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6178                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6179         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6180                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6181         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6182         for (i = 0; i < size; ++i)
6183                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6184         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6185                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6186         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6187                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6188         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6189         for (i = 0; i < size; ++i)
6190                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6191         /* TOS. */
6192         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6193         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6194         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6195         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6196         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6197         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6198         /* Label. */
6199         if (inner) {
6200                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6201                          vtc_m);
6202                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6203                          vtc_v);
6204         } else {
6205                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6206                          vtc_m);
6207                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6208                          vtc_v);
6209         }
6210         /* Protocol. */
6211         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6212                  ipv6_m->hdr.proto);
6213         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6214                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6215         /* Hop limit. */
6216         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6217                  ipv6_m->hdr.hop_limits);
6218         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6219                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6220 }
6221
6222 /**
6223  * Add TCP item to matcher and to the value.
6224  *
6225  * @param[in, out] matcher
6226  *   Flow matcher.
6227  * @param[in, out] key
6228  *   Flow matcher value.
6229  * @param[in] item
6230  *   Flow pattern to translate.
6231  * @param[in] inner
6232  *   Item is inner pattern.
6233  */
6234 static void
6235 flow_dv_translate_item_tcp(void *matcher, void *key,
6236                            const struct rte_flow_item *item,
6237                            int inner)
6238 {
6239         const struct rte_flow_item_tcp *tcp_m = item->mask;
6240         const struct rte_flow_item_tcp *tcp_v = item->spec;
6241         void *headers_m;
6242         void *headers_v;
6243
6244         if (inner) {
6245                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6246                                          inner_headers);
6247                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6248         } else {
6249                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6250                                          outer_headers);
6251                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6252         }
6253         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6254         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6255         if (!tcp_v)
6256                 return;
6257         if (!tcp_m)
6258                 tcp_m = &rte_flow_item_tcp_mask;
6259         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6260                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6261         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6262                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6263         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6264                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6265         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6266                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6267         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6268                  tcp_m->hdr.tcp_flags);
6269         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6270                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6271 }
6272
6273 /**
6274  * Add UDP item to matcher and to the value.
6275  *
6276  * @param[in, out] matcher
6277  *   Flow matcher.
6278  * @param[in, out] key
6279  *   Flow matcher value.
6280  * @param[in] item
6281  *   Flow pattern to translate.
6282  * @param[in] inner
6283  *   Item is inner pattern.
6284  */
6285 static void
6286 flow_dv_translate_item_udp(void *matcher, void *key,
6287                            const struct rte_flow_item *item,
6288                            int inner)
6289 {
6290         const struct rte_flow_item_udp *udp_m = item->mask;
6291         const struct rte_flow_item_udp *udp_v = item->spec;
6292         void *headers_m;
6293         void *headers_v;
6294
6295         if (inner) {
6296                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6297                                          inner_headers);
6298                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6299         } else {
6300                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6301                                          outer_headers);
6302                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6303         }
6304         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6305         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6306         if (!udp_v)
6307                 return;
6308         if (!udp_m)
6309                 udp_m = &rte_flow_item_udp_mask;
6310         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6311                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6312         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6313                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6314         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6315                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6316         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6317                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6318 }
6319
6320 /**
6321  * Add GRE optional Key item to matcher and to the value.
6322  *
6323  * @param[in, out] matcher
6324  *   Flow matcher.
6325  * @param[in, out] key
6326  *   Flow matcher value.
6327  * @param[in] item
6328  *   Flow pattern to translate.
6329  * @param[in] inner
6330  *   Item is inner pattern.
6331  */
6332 static void
6333 flow_dv_translate_item_gre_key(void *matcher, void *key,
6334                                    const struct rte_flow_item *item)
6335 {
6336         const rte_be32_t *key_m = item->mask;
6337         const rte_be32_t *key_v = item->spec;
6338         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6339         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6340         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6341
6342         /* GRE K bit must be on and should already be validated */
6343         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6344         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6345         if (!key_v)
6346                 return;
6347         if (!key_m)
6348                 key_m = &gre_key_default_mask;
6349         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6350                  rte_be_to_cpu_32(*key_m) >> 8);
6351         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6352                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6353         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6354                  rte_be_to_cpu_32(*key_m) & 0xFF);
6355         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6356                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6357 }
6358
6359 /**
6360  * Add GRE item to matcher and to the value.
6361  *
6362  * @param[in, out] matcher
6363  *   Flow matcher.
6364  * @param[in, out] key
6365  *   Flow matcher value.
6366  * @param[in] item
6367  *   Flow pattern to translate.
6368  * @param[in] inner
6369  *   Item is inner pattern.
6370  */
6371 static void
6372 flow_dv_translate_item_gre(void *matcher, void *key,
6373                            const struct rte_flow_item *item,
6374                            int inner)
6375 {
6376         const struct rte_flow_item_gre *gre_m = item->mask;
6377         const struct rte_flow_item_gre *gre_v = item->spec;
6378         void *headers_m;
6379         void *headers_v;
6380         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6381         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6382         struct {
6383                 union {
6384                         __extension__
6385                         struct {
6386                                 uint16_t version:3;
6387                                 uint16_t rsvd0:9;
6388                                 uint16_t s_present:1;
6389                                 uint16_t k_present:1;
6390                                 uint16_t rsvd_bit1:1;
6391                                 uint16_t c_present:1;
6392                         };
6393                         uint16_t value;
6394                 };
6395         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6396
6397         if (inner) {
6398                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6399                                          inner_headers);
6400                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6401         } else {
6402                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6403                                          outer_headers);
6404                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6405         }
6406         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6407         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6408         if (!gre_v)
6409                 return;
6410         if (!gre_m)
6411                 gre_m = &rte_flow_item_gre_mask;
6412         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6413                  rte_be_to_cpu_16(gre_m->protocol));
6414         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6415                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6416         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6417         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6418         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6419                  gre_crks_rsvd0_ver_m.c_present);
6420         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6421                  gre_crks_rsvd0_ver_v.c_present &
6422                  gre_crks_rsvd0_ver_m.c_present);
6423         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6424                  gre_crks_rsvd0_ver_m.k_present);
6425         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6426                  gre_crks_rsvd0_ver_v.k_present &
6427                  gre_crks_rsvd0_ver_m.k_present);
6428         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6429                  gre_crks_rsvd0_ver_m.s_present);
6430         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6431                  gre_crks_rsvd0_ver_v.s_present &
6432                  gre_crks_rsvd0_ver_m.s_present);
6433 }
6434
6435 /**
6436  * Add NVGRE item to matcher and to the value.
6437  *
6438  * @param[in, out] matcher
6439  *   Flow matcher.
6440  * @param[in, out] key
6441  *   Flow matcher value.
6442  * @param[in] item
6443  *   Flow pattern to translate.
6444  * @param[in] inner
6445  *   Item is inner pattern.
6446  */
6447 static void
6448 flow_dv_translate_item_nvgre(void *matcher, void *key,
6449                              const struct rte_flow_item *item,
6450                              int inner)
6451 {
6452         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6453         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6454         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6455         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6456         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
6457         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
6458         char *gre_key_m;
6459         char *gre_key_v;
6460         int size;
6461         int i;
6462
6463         /* For NVGRE, GRE header fields must be set with defined values. */
6464         const struct rte_flow_item_gre gre_spec = {
6465                 .c_rsvd0_ver = RTE_BE16(0x2000),
6466                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
6467         };
6468         const struct rte_flow_item_gre gre_mask = {
6469                 .c_rsvd0_ver = RTE_BE16(0xB000),
6470                 .protocol = RTE_BE16(UINT16_MAX),
6471         };
6472         const struct rte_flow_item gre_item = {
6473                 .spec = &gre_spec,
6474                 .mask = &gre_mask,
6475                 .last = NULL,
6476         };
6477         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
6478         if (!nvgre_v)
6479                 return;
6480         if (!nvgre_m)
6481                 nvgre_m = &rte_flow_item_nvgre_mask;
6482         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
6483         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
6484         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
6485         memcpy(gre_key_m, tni_flow_id_m, size);
6486         for (i = 0; i < size; ++i)
6487                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
6488 }
6489
6490 /**
6491  * Add VXLAN item to matcher and to the value.
6492  *
6493  * @param[in, out] matcher
6494  *   Flow matcher.
6495  * @param[in, out] key
6496  *   Flow matcher value.
6497  * @param[in] item
6498  *   Flow pattern to translate.
6499  * @param[in] inner
6500  *   Item is inner pattern.
6501  */
6502 static void
6503 flow_dv_translate_item_vxlan(void *matcher, void *key,
6504                              const struct rte_flow_item *item,
6505                              int inner)
6506 {
6507         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
6508         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
6509         void *headers_m;
6510         void *headers_v;
6511         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6512         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6513         char *vni_m;
6514         char *vni_v;
6515         uint16_t dport;
6516         int size;
6517         int i;
6518
6519         if (inner) {
6520                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6521                                          inner_headers);
6522                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6523         } else {
6524                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6525                                          outer_headers);
6526                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6527         }
6528         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6529                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6530         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6531                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6532                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6533         }
6534         if (!vxlan_v)
6535                 return;
6536         if (!vxlan_m)
6537                 vxlan_m = &rte_flow_item_vxlan_mask;
6538         size = sizeof(vxlan_m->vni);
6539         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
6540         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
6541         memcpy(vni_m, vxlan_m->vni, size);
6542         for (i = 0; i < size; ++i)
6543                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6544 }
6545
6546 /**
6547  * Add VXLAN-GPE item to matcher and to the value.
6548  *
6549  * @param[in, out] matcher
6550  *   Flow matcher.
6551  * @param[in, out] key
6552  *   Flow matcher value.
6553  * @param[in] item
6554  *   Flow pattern to translate.
6555  * @param[in] inner
6556  *   Item is inner pattern.
6557  */
6558
6559 static void
6560 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
6561                                  const struct rte_flow_item *item, int inner)
6562 {
6563         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
6564         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
6565         void *headers_m;
6566         void *headers_v;
6567         void *misc_m =
6568                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
6569         void *misc_v =
6570                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6571         char *vni_m;
6572         char *vni_v;
6573         uint16_t dport;
6574         int size;
6575         int i;
6576         uint8_t flags_m = 0xff;
6577         uint8_t flags_v = 0xc;
6578
6579         if (inner) {
6580                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6581                                          inner_headers);
6582                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6583         } else {
6584                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6585                                          outer_headers);
6586                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6587         }
6588         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6589                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6590         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6591                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6592                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6593         }
6594         if (!vxlan_v)
6595                 return;
6596         if (!vxlan_m)
6597                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
6598         size = sizeof(vxlan_m->vni);
6599         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
6600         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
6601         memcpy(vni_m, vxlan_m->vni, size);
6602         for (i = 0; i < size; ++i)
6603                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6604         if (vxlan_m->flags) {
6605                 flags_m = vxlan_m->flags;
6606                 flags_v = vxlan_v->flags;
6607         }
6608         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
6609         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
6610         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
6611                  vxlan_m->protocol);
6612         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
6613                  vxlan_v->protocol);
6614 }
6615
6616 /**
6617  * Add Geneve item to matcher and to the value.
6618  *
6619  * @param[in, out] matcher
6620  *   Flow matcher.
6621  * @param[in, out] key
6622  *   Flow matcher value.
6623  * @param[in] item
6624  *   Flow pattern to translate.
6625  * @param[in] inner
6626  *   Item is inner pattern.
6627  */
6628
6629 static void
6630 flow_dv_translate_item_geneve(void *matcher, void *key,
6631                               const struct rte_flow_item *item, int inner)
6632 {
6633         const struct rte_flow_item_geneve *geneve_m = item->mask;
6634         const struct rte_flow_item_geneve *geneve_v = item->spec;
6635         void *headers_m;
6636         void *headers_v;
6637         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6638         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6639         uint16_t dport;
6640         uint16_t gbhdr_m;
6641         uint16_t gbhdr_v;
6642         char *vni_m;
6643         char *vni_v;
6644         size_t size, i;
6645
6646         if (inner) {
6647                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6648                                          inner_headers);
6649                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6650         } else {
6651                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6652                                          outer_headers);
6653                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6654         }
6655         dport = MLX5_UDP_PORT_GENEVE;
6656         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6657                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6658                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6659         }
6660         if (!geneve_v)
6661                 return;
6662         if (!geneve_m)
6663                 geneve_m = &rte_flow_item_geneve_mask;
6664         size = sizeof(geneve_m->vni);
6665         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
6666         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
6667         memcpy(vni_m, geneve_m->vni, size);
6668         for (i = 0; i < size; ++i)
6669                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
6670         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
6671                  rte_be_to_cpu_16(geneve_m->protocol));
6672         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
6673                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
6674         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
6675         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
6676         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
6677                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6678         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
6679                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6680         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
6681                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6682         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
6683                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
6684                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6685 }
6686
6687 /**
6688  * Add MPLS item to matcher and to the value.
6689  *
6690  * @param[in, out] matcher
6691  *   Flow matcher.
6692  * @param[in, out] key
6693  *   Flow matcher value.
6694  * @param[in] item
6695  *   Flow pattern to translate.
6696  * @param[in] prev_layer
6697  *   The protocol layer indicated in previous item.
6698  * @param[in] inner
6699  *   Item is inner pattern.
6700  */
6701 static void
6702 flow_dv_translate_item_mpls(void *matcher, void *key,
6703                             const struct rte_flow_item *item,
6704                             uint64_t prev_layer,
6705                             int inner)
6706 {
6707         const uint32_t *in_mpls_m = item->mask;
6708         const uint32_t *in_mpls_v = item->spec;
6709         uint32_t *out_mpls_m = 0;
6710         uint32_t *out_mpls_v = 0;
6711         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6712         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6713         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
6714                                      misc_parameters_2);
6715         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6716         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
6717         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6718
6719         switch (prev_layer) {
6720         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6721                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
6722                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6723                          MLX5_UDP_PORT_MPLS);
6724                 break;
6725         case MLX5_FLOW_LAYER_GRE:
6726                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
6727                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6728                          RTE_ETHER_TYPE_MPLS);
6729                 break;
6730         default:
6731                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6732                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6733                          IPPROTO_MPLS);
6734                 break;
6735         }
6736         if (!in_mpls_v)
6737                 return;
6738         if (!in_mpls_m)
6739                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
6740         switch (prev_layer) {
6741         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6742                 out_mpls_m =
6743                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6744                                                  outer_first_mpls_over_udp);
6745                 out_mpls_v =
6746                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6747                                                  outer_first_mpls_over_udp);
6748                 break;
6749         case MLX5_FLOW_LAYER_GRE:
6750                 out_mpls_m =
6751                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6752                                                  outer_first_mpls_over_gre);
6753                 out_mpls_v =
6754                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6755                                                  outer_first_mpls_over_gre);
6756                 break;
6757         default:
6758                 /* Inner MPLS not over GRE is not supported. */
6759                 if (!inner) {
6760                         out_mpls_m =
6761                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6762                                                          misc2_m,
6763                                                          outer_first_mpls);
6764                         out_mpls_v =
6765                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6766                                                          misc2_v,
6767                                                          outer_first_mpls);
6768                 }
6769                 break;
6770         }
6771         if (out_mpls_m && out_mpls_v) {
6772                 *out_mpls_m = *in_mpls_m;
6773                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
6774         }
6775 }
6776
6777 /**
6778  * Add metadata register item to matcher
6779  *
6780  * @param[in, out] matcher
6781  *   Flow matcher.
6782  * @param[in, out] key
6783  *   Flow matcher value.
6784  * @param[in] reg_type
6785  *   Type of device metadata register
6786  * @param[in] value
6787  *   Register value
6788  * @param[in] mask
6789  *   Register mask
6790  */
6791 static void
6792 flow_dv_match_meta_reg(void *matcher, void *key,
6793                        enum modify_reg reg_type,
6794                        uint32_t data, uint32_t mask)
6795 {
6796         void *misc2_m =
6797                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
6798         void *misc2_v =
6799                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6800         uint32_t temp;
6801
6802         data &= mask;
6803         switch (reg_type) {
6804         case REG_A:
6805                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
6806                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
6807                 break;
6808         case REG_B:
6809                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
6810                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
6811                 break;
6812         case REG_C_0:
6813                 /*
6814                  * The metadata register C0 field might be divided into
6815                  * source vport index and META item value, we should set
6816                  * this field according to specified mask, not as whole one.
6817                  */
6818                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
6819                 temp |= mask;
6820                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
6821                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
6822                 temp &= ~mask;
6823                 temp |= data;
6824                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
6825                 break;
6826         case REG_C_1:
6827                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
6828                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
6829                 break;
6830         case REG_C_2:
6831                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
6832                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
6833                 break;
6834         case REG_C_3:
6835                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
6836                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
6837                 break;
6838         case REG_C_4:
6839                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
6840                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
6841                 break;
6842         case REG_C_5:
6843                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
6844                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
6845                 break;
6846         case REG_C_6:
6847                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
6848                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
6849                 break;
6850         case REG_C_7:
6851                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
6852                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
6853                 break;
6854         default:
6855                 MLX5_ASSERT(false);
6856                 break;
6857         }
6858 }
6859
6860 /**
6861  * Add MARK item to matcher
6862  *
6863  * @param[in] dev
6864  *   The device to configure through.
6865  * @param[in, out] matcher
6866  *   Flow matcher.
6867  * @param[in, out] key
6868  *   Flow matcher value.
6869  * @param[in] item
6870  *   Flow pattern to translate.
6871  */
6872 static void
6873 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
6874                             void *matcher, void *key,
6875                             const struct rte_flow_item *item)
6876 {
6877         struct mlx5_priv *priv = dev->data->dev_private;
6878         const struct rte_flow_item_mark *mark;
6879         uint32_t value;
6880         uint32_t mask;
6881
6882         mark = item->mask ? (const void *)item->mask :
6883                             &rte_flow_item_mark_mask;
6884         mask = mark->id & priv->sh->dv_mark_mask;
6885         mark = (const void *)item->spec;
6886         MLX5_ASSERT(mark);
6887         value = mark->id & priv->sh->dv_mark_mask & mask;
6888         if (mask) {
6889                 enum modify_reg reg;
6890
6891                 /* Get the metadata register index for the mark. */
6892                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
6893                 MLX5_ASSERT(reg > 0);
6894                 if (reg == REG_C_0) {
6895                         struct mlx5_priv *priv = dev->data->dev_private;
6896                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6897                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6898
6899                         mask &= msk_c0;
6900                         mask <<= shl_c0;
6901                         value <<= shl_c0;
6902                 }
6903                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6904         }
6905 }
6906
6907 /**
6908  * Add META item to matcher
6909  *
6910  * @param[in] dev
6911  *   The devich to configure through.
6912  * @param[in, out] matcher
6913  *   Flow matcher.
6914  * @param[in, out] key
6915  *   Flow matcher value.
6916  * @param[in] attr
6917  *   Attributes of flow that includes this item.
6918  * @param[in] item
6919  *   Flow pattern to translate.
6920  */
6921 static void
6922 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
6923                             void *matcher, void *key,
6924                             const struct rte_flow_attr *attr,
6925                             const struct rte_flow_item *item)
6926 {
6927         const struct rte_flow_item_meta *meta_m;
6928         const struct rte_flow_item_meta *meta_v;
6929
6930         meta_m = (const void *)item->mask;
6931         if (!meta_m)
6932                 meta_m = &rte_flow_item_meta_mask;
6933         meta_v = (const void *)item->spec;
6934         if (meta_v) {
6935                 int reg;
6936                 uint32_t value = meta_v->data;
6937                 uint32_t mask = meta_m->data;
6938
6939                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
6940                 if (reg < 0)
6941                         return;
6942                 /*
6943                  * In datapath code there is no endianness
6944                  * coversions for perfromance reasons, all
6945                  * pattern conversions are done in rte_flow.
6946                  */
6947                 value = rte_cpu_to_be_32(value);
6948                 mask = rte_cpu_to_be_32(mask);
6949                 if (reg == REG_C_0) {
6950                         struct mlx5_priv *priv = dev->data->dev_private;
6951                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6952                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6953 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6954                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
6955
6956                         value >>= shr_c0;
6957                         mask >>= shr_c0;
6958 #endif
6959                         value <<= shl_c0;
6960                         mask <<= shl_c0;
6961                         MLX5_ASSERT(msk_c0);
6962                         MLX5_ASSERT(!(~msk_c0 & mask));
6963                 }
6964                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6965         }
6966 }
6967
6968 /**
6969  * Add vport metadata Reg C0 item to matcher
6970  *
6971  * @param[in, out] matcher
6972  *   Flow matcher.
6973  * @param[in, out] key
6974  *   Flow matcher value.
6975  * @param[in] reg
6976  *   Flow pattern to translate.
6977  */
6978 static void
6979 flow_dv_translate_item_meta_vport(void *matcher, void *key,
6980                                   uint32_t value, uint32_t mask)
6981 {
6982         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
6983 }
6984
6985 /**
6986  * Add tag item to matcher
6987  *
6988  * @param[in] dev
6989  *   The devich to configure through.
6990  * @param[in, out] matcher
6991  *   Flow matcher.
6992  * @param[in, out] key
6993  *   Flow matcher value.
6994  * @param[in] item
6995  *   Flow pattern to translate.
6996  */
6997 static void
6998 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
6999                                 void *matcher, void *key,
7000                                 const struct rte_flow_item *item)
7001 {
7002         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7003         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7004         uint32_t mask, value;
7005
7006         MLX5_ASSERT(tag_v);
7007         value = tag_v->data;
7008         mask = tag_m ? tag_m->data : UINT32_MAX;
7009         if (tag_v->id == REG_C_0) {
7010                 struct mlx5_priv *priv = dev->data->dev_private;
7011                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7012                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7013
7014                 mask &= msk_c0;
7015                 mask <<= shl_c0;
7016                 value <<= shl_c0;
7017         }
7018         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7019 }
7020
7021 /**
7022  * Add TAG item to matcher
7023  *
7024  * @param[in] dev
7025  *   The devich to configure through.
7026  * @param[in, out] matcher
7027  *   Flow matcher.
7028  * @param[in, out] key
7029  *   Flow matcher value.
7030  * @param[in] item
7031  *   Flow pattern to translate.
7032  */
7033 static void
7034 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7035                            void *matcher, void *key,
7036                            const struct rte_flow_item *item)
7037 {
7038         const struct rte_flow_item_tag *tag_v = item->spec;
7039         const struct rte_flow_item_tag *tag_m = item->mask;
7040         enum modify_reg reg;
7041
7042         MLX5_ASSERT(tag_v);
7043         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7044         /* Get the metadata register index for the tag. */
7045         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7046         MLX5_ASSERT(reg > 0);
7047         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7048 }
7049
7050 /**
7051  * Add source vport match to the specified matcher.
7052  *
7053  * @param[in, out] matcher
7054  *   Flow matcher.
7055  * @param[in, out] key
7056  *   Flow matcher value.
7057  * @param[in] port
7058  *   Source vport value to match
7059  * @param[in] mask
7060  *   Mask
7061  */
7062 static void
7063 flow_dv_translate_item_source_vport(void *matcher, void *key,
7064                                     int16_t port, uint16_t mask)
7065 {
7066         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7067         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7068
7069         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7070         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7071 }
7072
7073 /**
7074  * Translate port-id item to eswitch match on  port-id.
7075  *
7076  * @param[in] dev
7077  *   The devich to configure through.
7078  * @param[in, out] matcher
7079  *   Flow matcher.
7080  * @param[in, out] key
7081  *   Flow matcher value.
7082  * @param[in] item
7083  *   Flow pattern to translate.
7084  *
7085  * @return
7086  *   0 on success, a negative errno value otherwise.
7087  */
7088 static int
7089 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7090                                void *key, const struct rte_flow_item *item)
7091 {
7092         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7093         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7094         struct mlx5_priv *priv;
7095         uint16_t mask, id;
7096
7097         mask = pid_m ? pid_m->id : 0xffff;
7098         id = pid_v ? pid_v->id : dev->data->port_id;
7099         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7100         if (!priv)
7101                 return -rte_errno;
7102         /* Translate to vport field or to metadata, depending on mode. */
7103         if (priv->vport_meta_mask)
7104                 flow_dv_translate_item_meta_vport(matcher, key,
7105                                                   priv->vport_meta_tag,
7106                                                   priv->vport_meta_mask);
7107         else
7108                 flow_dv_translate_item_source_vport(matcher, key,
7109                                                     priv->vport_id, mask);
7110         return 0;
7111 }
7112
7113 /**
7114  * Add ICMP6 item to matcher and to the value.
7115  *
7116  * @param[in, out] matcher
7117  *   Flow matcher.
7118  * @param[in, out] key
7119  *   Flow matcher value.
7120  * @param[in] item
7121  *   Flow pattern to translate.
7122  * @param[in] inner
7123  *   Item is inner pattern.
7124  */
7125 static void
7126 flow_dv_translate_item_icmp6(void *matcher, void *key,
7127                               const struct rte_flow_item *item,
7128                               int inner)
7129 {
7130         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7131         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7132         void *headers_m;
7133         void *headers_v;
7134         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7135                                      misc_parameters_3);
7136         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7137         if (inner) {
7138                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7139                                          inner_headers);
7140                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7141         } else {
7142                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7143                                          outer_headers);
7144                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7145         }
7146         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7147         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7148         if (!icmp6_v)
7149                 return;
7150         if (!icmp6_m)
7151                 icmp6_m = &rte_flow_item_icmp6_mask;
7152         /*
7153          * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
7154          * If only the protocol is specified, no need to match the frag.
7155          */
7156         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7157         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
7158         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7159         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7160                  icmp6_v->type & icmp6_m->type);
7161         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7162         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7163                  icmp6_v->code & icmp6_m->code);
7164 }
7165
7166 /**
7167  * Add ICMP item to matcher and to the value.
7168  *
7169  * @param[in, out] matcher
7170  *   Flow matcher.
7171  * @param[in, out] key
7172  *   Flow matcher value.
7173  * @param[in] item
7174  *   Flow pattern to translate.
7175  * @param[in] inner
7176  *   Item is inner pattern.
7177  */
7178 static void
7179 flow_dv_translate_item_icmp(void *matcher, void *key,
7180                             const struct rte_flow_item *item,
7181                             int inner)
7182 {
7183         const struct rte_flow_item_icmp *icmp_m = item->mask;
7184         const struct rte_flow_item_icmp *icmp_v = item->spec;
7185         void *headers_m;
7186         void *headers_v;
7187         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7188                                      misc_parameters_3);
7189         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7190         if (inner) {
7191                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7192                                          inner_headers);
7193                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7194         } else {
7195                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7196                                          outer_headers);
7197                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7198         }
7199         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7200         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7201         if (!icmp_v)
7202                 return;
7203         if (!icmp_m)
7204                 icmp_m = &rte_flow_item_icmp_mask;
7205         /*
7206          * Force flow only to match the non-fragmented IPv4 ICMP packets.
7207          * If only the protocol is specified, no need to match the frag.
7208          */
7209         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7210         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
7211         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7212                  icmp_m->hdr.icmp_type);
7213         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7214                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7215         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7216                  icmp_m->hdr.icmp_code);
7217         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7218                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7219 }
7220
7221 /**
7222  * Add GTP item to matcher and to the value.
7223  *
7224  * @param[in, out] matcher
7225  *   Flow matcher.
7226  * @param[in, out] key
7227  *   Flow matcher value.
7228  * @param[in] item
7229  *   Flow pattern to translate.
7230  * @param[in] inner
7231  *   Item is inner pattern.
7232  */
7233 static void
7234 flow_dv_translate_item_gtp(void *matcher, void *key,
7235                            const struct rte_flow_item *item, int inner)
7236 {
7237         const struct rte_flow_item_gtp *gtp_m = item->mask;
7238         const struct rte_flow_item_gtp *gtp_v = item->spec;
7239         void *headers_m;
7240         void *headers_v;
7241         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7242                                      misc_parameters_3);
7243         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7244         uint16_t dport = RTE_GTPU_UDP_PORT;
7245
7246         if (inner) {
7247                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7248                                          inner_headers);
7249                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7250         } else {
7251                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7252                                          outer_headers);
7253                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7254         }
7255         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7256                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7257                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7258         }
7259         if (!gtp_v)
7260                 return;
7261         if (!gtp_m)
7262                 gtp_m = &rte_flow_item_gtp_mask;
7263         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7264                  gtp_m->v_pt_rsv_flags);
7265         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7266                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7267         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7268         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7269                  gtp_v->msg_type & gtp_m->msg_type);
7270         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7271                  rte_be_to_cpu_32(gtp_m->teid));
7272         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7273                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7274 }
7275
7276 /**
7277  * Add eCPRI item to matcher and to the value.
7278  *
7279  * @param[in] dev
7280  *   The devich to configure through.
7281  * @param[in, out] matcher
7282  *   Flow matcher.
7283  * @param[in, out] key
7284  *   Flow matcher value.
7285  * @param[in] item
7286  *   Flow pattern to translate.
7287  * @param[in] samples
7288  *   Sample IDs to be used in the matching.
7289  */
7290 static void
7291 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7292                              void *key, const struct rte_flow_item *item)
7293 {
7294         struct mlx5_priv *priv = dev->data->dev_private;
7295         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7296         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7297         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7298                                      misc_parameters_4);
7299         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7300         uint32_t *samples;
7301         void *dw_m;
7302         void *dw_v;
7303
7304         if (!ecpri_v)
7305                 return;
7306         if (!ecpri_m)
7307                 ecpri_m = &rte_flow_item_ecpri_mask;
7308         /*
7309          * Maximal four DW samples are supported in a single matching now.
7310          * Two are used now for a eCPRI matching:
7311          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7312          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7313          *    if any.
7314          */
7315         if (!ecpri_m->hdr.common.u32)
7316                 return;
7317         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7318         /* Need to take the whole DW as the mask to fill the entry. */
7319         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7320                             prog_sample_field_value_0);
7321         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7322                             prog_sample_field_value_0);
7323         /* Already big endian (network order) in the header. */
7324         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7325         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7326         /* Sample#0, used for matching type, offset 0. */
7327         MLX5_SET(fte_match_set_misc4, misc4_m,
7328                  prog_sample_field_id_0, samples[0]);
7329         /* It makes no sense to set the sample ID in the mask field. */
7330         MLX5_SET(fte_match_set_misc4, misc4_v,
7331                  prog_sample_field_id_0, samples[0]);
7332         /*
7333          * Checking if message body part needs to be matched.
7334          * Some wildcard rules only matching type field should be supported.
7335          */
7336         if (ecpri_m->hdr.dummy[0]) {
7337                 switch (ecpri_v->hdr.common.type) {
7338                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7339                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7340                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7341                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7342                                             prog_sample_field_value_1);
7343                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7344                                             prog_sample_field_value_1);
7345                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7346                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7347                         /* Sample#1, to match message body, offset 4. */
7348                         MLX5_SET(fte_match_set_misc4, misc4_m,
7349                                  prog_sample_field_id_1, samples[1]);
7350                         MLX5_SET(fte_match_set_misc4, misc4_v,
7351                                  prog_sample_field_id_1, samples[1]);
7352                         break;
7353                 default:
7354                         /* Others, do not match any sample ID. */
7355                         break;
7356                 }
7357         }
7358 }
7359
7360 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7361
7362 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7363         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7364                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7365
7366 /**
7367  * Calculate flow matcher enable bitmap.
7368  *
7369  * @param match_criteria
7370  *   Pointer to flow matcher criteria.
7371  *
7372  * @return
7373  *   Bitmap of enabled fields.
7374  */
7375 static uint8_t
7376 flow_dv_matcher_enable(uint32_t *match_criteria)
7377 {
7378         uint8_t match_criteria_enable;
7379
7380         match_criteria_enable =
7381                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7382                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7383         match_criteria_enable |=
7384                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7385                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7386         match_criteria_enable |=
7387                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7388                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7389         match_criteria_enable |=
7390                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7391                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7392         match_criteria_enable |=
7393                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7394                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7395         match_criteria_enable |=
7396                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7397                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7398         return match_criteria_enable;
7399 }
7400
7401
7402 /**
7403  * Get a flow table.
7404  *
7405  * @param[in, out] dev
7406  *   Pointer to rte_eth_dev structure.
7407  * @param[in] table_id
7408  *   Table id to use.
7409  * @param[in] egress
7410  *   Direction of the table.
7411  * @param[in] transfer
7412  *   E-Switch or NIC flow.
7413  * @param[out] error
7414  *   pointer to error structure.
7415  *
7416  * @return
7417  *   Returns tables resource based on the index, NULL in case of failed.
7418  */
7419 static struct mlx5_flow_tbl_resource *
7420 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
7421                          uint32_t table_id, uint8_t egress,
7422                          uint8_t transfer,
7423                          struct rte_flow_error *error)
7424 {
7425         struct mlx5_priv *priv = dev->data->dev_private;
7426         struct mlx5_dev_ctx_shared *sh = priv->sh;
7427         struct mlx5_flow_tbl_resource *tbl;
7428         union mlx5_flow_tbl_key table_key = {
7429                 {
7430                         .table_id = table_id,
7431                         .reserved = 0,
7432                         .domain = !!transfer,
7433                         .direction = !!egress,
7434                 }
7435         };
7436         struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
7437                                                          table_key.v64);
7438         struct mlx5_flow_tbl_data_entry *tbl_data;
7439         uint32_t idx = 0;
7440         int ret;
7441         void *domain;
7442
7443         if (pos) {
7444                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
7445                                         entry);
7446                 tbl = &tbl_data->tbl;
7447                 rte_atomic32_inc(&tbl->refcnt);
7448                 return tbl;
7449         }
7450         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7451         if (!tbl_data) {
7452                 rte_flow_error_set(error, ENOMEM,
7453                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7454                                    NULL,
7455                                    "cannot allocate flow table data entry");
7456                 return NULL;
7457         }
7458         tbl_data->idx = idx;
7459         tbl = &tbl_data->tbl;
7460         pos = &tbl_data->entry;
7461         if (transfer)
7462                 domain = sh->fdb_domain;
7463         else if (egress)
7464                 domain = sh->tx_domain;
7465         else
7466                 domain = sh->rx_domain;
7467         ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
7468         if (ret) {
7469                 rte_flow_error_set(error, ENOMEM,
7470                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7471                                    NULL, "cannot create flow table object");
7472                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7473                 return NULL;
7474         }
7475         /*
7476          * No multi-threads now, but still better to initialize the reference
7477          * count before insert it into the hash list.
7478          */
7479         rte_atomic32_init(&tbl->refcnt);
7480         /* Jump action reference count is initialized here. */
7481         rte_atomic32_init(&tbl_data->jump.refcnt);
7482         pos->key = table_key.v64;
7483         ret = mlx5_hlist_insert(sh->flow_tbls, pos);
7484         if (ret < 0) {
7485                 rte_flow_error_set(error, -ret,
7486                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7487                                    "cannot insert flow table data entry");
7488                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7489                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7490         }
7491         rte_atomic32_inc(&tbl->refcnt);
7492         return tbl;
7493 }
7494
7495 /**
7496  * Release a flow table.
7497  *
7498  * @param[in] dev
7499  *   Pointer to rte_eth_dev structure.
7500  * @param[in] tbl
7501  *   Table resource to be released.
7502  *
7503  * @return
7504  *   Returns 0 if table was released, else return 1;
7505  */
7506 static int
7507 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
7508                              struct mlx5_flow_tbl_resource *tbl)
7509 {
7510         struct mlx5_priv *priv = dev->data->dev_private;
7511         struct mlx5_dev_ctx_shared *sh = priv->sh;
7512         struct mlx5_flow_tbl_data_entry *tbl_data =
7513                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
7514
7515         if (!tbl)
7516                 return 0;
7517         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
7518                 struct mlx5_hlist_entry *pos = &tbl_data->entry;
7519
7520                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7521                 tbl->obj = NULL;
7522                 /* remove the entry from the hash list and free memory. */
7523                 mlx5_hlist_remove(sh->flow_tbls, pos);
7524                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP],
7525                                 tbl_data->idx);
7526                 return 0;
7527         }
7528         return 1;
7529 }
7530
7531 /**
7532  * Register the flow matcher.
7533  *
7534  * @param[in, out] dev
7535  *   Pointer to rte_eth_dev structure.
7536  * @param[in, out] matcher
7537  *   Pointer to flow matcher.
7538  * @param[in, out] key
7539  *   Pointer to flow table key.
7540  * @parm[in, out] dev_flow
7541  *   Pointer to the dev_flow.
7542  * @param[out] error
7543  *   pointer to error structure.
7544  *
7545  * @return
7546  *   0 on success otherwise -errno and errno is set.
7547  */
7548 static int
7549 flow_dv_matcher_register(struct rte_eth_dev *dev,
7550                          struct mlx5_flow_dv_matcher *matcher,
7551                          union mlx5_flow_tbl_key *key,
7552                          struct mlx5_flow *dev_flow,
7553                          struct rte_flow_error *error)
7554 {
7555         struct mlx5_priv *priv = dev->data->dev_private;
7556         struct mlx5_dev_ctx_shared *sh = priv->sh;
7557         struct mlx5_flow_dv_matcher *cache_matcher;
7558         struct mlx5dv_flow_matcher_attr dv_attr = {
7559                 .type = IBV_FLOW_ATTR_NORMAL,
7560                 .match_mask = (void *)&matcher->mask,
7561         };
7562         struct mlx5_flow_tbl_resource *tbl;
7563         struct mlx5_flow_tbl_data_entry *tbl_data;
7564         int ret;
7565
7566         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
7567                                        key->domain, error);
7568         if (!tbl)
7569                 return -rte_errno;      /* No need to refill the error info */
7570         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
7571         /* Lookup from cache. */
7572         LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
7573                 if (matcher->crc == cache_matcher->crc &&
7574                     matcher->priority == cache_matcher->priority &&
7575                     !memcmp((const void *)matcher->mask.buf,
7576                             (const void *)cache_matcher->mask.buf,
7577                             cache_matcher->mask.size)) {
7578                         DRV_LOG(DEBUG,
7579                                 "%s group %u priority %hd use %s "
7580                                 "matcher %p: refcnt %d++",
7581                                 key->domain ? "FDB" : "NIC", key->table_id,
7582                                 cache_matcher->priority,
7583                                 key->direction ? "tx" : "rx",
7584                                 (void *)cache_matcher,
7585                                 rte_atomic32_read(&cache_matcher->refcnt));
7586                         rte_atomic32_inc(&cache_matcher->refcnt);
7587                         dev_flow->handle->dvh.matcher = cache_matcher;
7588                         /* old matcher should not make the table ref++. */
7589                         flow_dv_tbl_resource_release(dev, tbl);
7590                         return 0;
7591                 }
7592         }
7593         /* Register new matcher. */
7594         cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
7595                                     SOCKET_ID_ANY);
7596         if (!cache_matcher) {
7597                 flow_dv_tbl_resource_release(dev, tbl);
7598                 return rte_flow_error_set(error, ENOMEM,
7599                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7600                                           "cannot allocate matcher memory");
7601         }
7602         *cache_matcher = *matcher;
7603         dv_attr.match_criteria_enable =
7604                 flow_dv_matcher_enable(cache_matcher->mask.buf);
7605         dv_attr.priority = matcher->priority;
7606         if (key->direction)
7607                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
7608         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
7609                                                &cache_matcher->matcher_object);
7610         if (ret) {
7611                 mlx5_free(cache_matcher);
7612 #ifdef HAVE_MLX5DV_DR
7613                 flow_dv_tbl_resource_release(dev, tbl);
7614 #endif
7615                 return rte_flow_error_set(error, ENOMEM,
7616                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7617                                           NULL, "cannot create matcher");
7618         }
7619         /* Save the table information */
7620         cache_matcher->tbl = tbl;
7621         rte_atomic32_init(&cache_matcher->refcnt);
7622         /* only matcher ref++, table ref++ already done above in get API. */
7623         rte_atomic32_inc(&cache_matcher->refcnt);
7624         LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
7625         dev_flow->handle->dvh.matcher = cache_matcher;
7626         DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
7627                 key->domain ? "FDB" : "NIC", key->table_id,
7628                 cache_matcher->priority,
7629                 key->direction ? "tx" : "rx", (void *)cache_matcher,
7630                 rte_atomic32_read(&cache_matcher->refcnt));
7631         return 0;
7632 }
7633
7634 /**
7635  * Find existing tag resource or create and register a new one.
7636  *
7637  * @param dev[in, out]
7638  *   Pointer to rte_eth_dev structure.
7639  * @param[in, out] tag_be24
7640  *   Tag value in big endian then R-shift 8.
7641  * @parm[in, out] dev_flow
7642  *   Pointer to the dev_flow.
7643  * @param[out] error
7644  *   pointer to error structure.
7645  *
7646  * @return
7647  *   0 on success otherwise -errno and errno is set.
7648  */
7649 static int
7650 flow_dv_tag_resource_register
7651                         (struct rte_eth_dev *dev,
7652                          uint32_t tag_be24,
7653                          struct mlx5_flow *dev_flow,
7654                          struct rte_flow_error *error)
7655 {
7656         struct mlx5_priv *priv = dev->data->dev_private;
7657         struct mlx5_dev_ctx_shared *sh = priv->sh;
7658         struct mlx5_flow_dv_tag_resource *cache_resource;
7659         struct mlx5_hlist_entry *entry;
7660         int ret;
7661
7662         /* Lookup a matching resource from cache. */
7663         entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
7664         if (entry) {
7665                 cache_resource = container_of
7666                         (entry, struct mlx5_flow_dv_tag_resource, entry);
7667                 rte_atomic32_inc(&cache_resource->refcnt);
7668                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
7669                 dev_flow->dv.tag_resource = cache_resource;
7670                 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
7671                         (void *)cache_resource,
7672                         rte_atomic32_read(&cache_resource->refcnt));
7673                 return 0;
7674         }
7675         /* Register new resource. */
7676         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
7677                                        &dev_flow->handle->dvh.rix_tag);
7678         if (!cache_resource)
7679                 return rte_flow_error_set(error, ENOMEM,
7680                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7681                                           "cannot allocate resource memory");
7682         cache_resource->entry.key = (uint64_t)tag_be24;
7683         ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
7684                                                   &cache_resource->action);
7685         if (ret) {
7686                 mlx5_free(cache_resource);
7687                 return rte_flow_error_set(error, ENOMEM,
7688                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7689                                           NULL, "cannot create action");
7690         }
7691         rte_atomic32_init(&cache_resource->refcnt);
7692         rte_atomic32_inc(&cache_resource->refcnt);
7693         if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
7694                 mlx5_flow_os_destroy_flow_action(cache_resource->action);
7695                 mlx5_free(cache_resource);
7696                 return rte_flow_error_set(error, EEXIST,
7697                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7698                                           NULL, "cannot insert tag");
7699         }
7700         dev_flow->dv.tag_resource = cache_resource;
7701         DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
7702                 (void *)cache_resource,
7703                 rte_atomic32_read(&cache_resource->refcnt));
7704         return 0;
7705 }
7706
7707 /**
7708  * Release the tag.
7709  *
7710  * @param dev
7711  *   Pointer to Ethernet device.
7712  * @param tag_idx
7713  *   Tag index.
7714  *
7715  * @return
7716  *   1 while a reference on it exists, 0 when freed.
7717  */
7718 static int
7719 flow_dv_tag_release(struct rte_eth_dev *dev,
7720                     uint32_t tag_idx)
7721 {
7722         struct mlx5_priv *priv = dev->data->dev_private;
7723         struct mlx5_dev_ctx_shared *sh = priv->sh;
7724         struct mlx5_flow_dv_tag_resource *tag;
7725
7726         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
7727         if (!tag)
7728                 return 0;
7729         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
7730                 dev->data->port_id, (void *)tag,
7731                 rte_atomic32_read(&tag->refcnt));
7732         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
7733                 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
7734                 mlx5_hlist_remove(sh->tag_table, &tag->entry);
7735                 DRV_LOG(DEBUG, "port %u tag %p: removed",
7736                         dev->data->port_id, (void *)tag);
7737                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
7738                 return 0;
7739         }
7740         return 1;
7741 }
7742
7743 /**
7744  * Translate port ID action to vport.
7745  *
7746  * @param[in] dev
7747  *   Pointer to rte_eth_dev structure.
7748  * @param[in] action
7749  *   Pointer to the port ID action.
7750  * @param[out] dst_port_id
7751  *   The target port ID.
7752  * @param[out] error
7753  *   Pointer to the error structure.
7754  *
7755  * @return
7756  *   0 on success, a negative errno value otherwise and rte_errno is set.
7757  */
7758 static int
7759 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
7760                                  const struct rte_flow_action *action,
7761                                  uint32_t *dst_port_id,
7762                                  struct rte_flow_error *error)
7763 {
7764         uint32_t port;
7765         struct mlx5_priv *priv;
7766         const struct rte_flow_action_port_id *conf =
7767                         (const struct rte_flow_action_port_id *)action->conf;
7768
7769         port = conf->original ? dev->data->port_id : conf->id;
7770         priv = mlx5_port_to_eswitch_info(port, false);
7771         if (!priv)
7772                 return rte_flow_error_set(error, -rte_errno,
7773                                           RTE_FLOW_ERROR_TYPE_ACTION,
7774                                           NULL,
7775                                           "No eswitch info was found for port");
7776 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
7777         /*
7778          * This parameter is transferred to
7779          * mlx5dv_dr_action_create_dest_ib_port().
7780          */
7781         *dst_port_id = priv->dev_port;
7782 #else
7783         /*
7784          * Legacy mode, no LAG configurations is supported.
7785          * This parameter is transferred to
7786          * mlx5dv_dr_action_create_dest_vport().
7787          */
7788         *dst_port_id = priv->vport_id;
7789 #endif
7790         return 0;
7791 }
7792
7793 /**
7794  * Create a counter with aging configuration.
7795  *
7796  * @param[in] dev
7797  *   Pointer to rte_eth_dev structure.
7798  * @param[out] count
7799  *   Pointer to the counter action configuration.
7800  * @param[in] age
7801  *   Pointer to the aging action configuration.
7802  *
7803  * @return
7804  *   Index to flow counter on success, 0 otherwise.
7805  */
7806 static uint32_t
7807 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
7808                                 struct mlx5_flow *dev_flow,
7809                                 const struct rte_flow_action_count *count,
7810                                 const struct rte_flow_action_age *age)
7811 {
7812         uint32_t counter;
7813         struct mlx5_age_param *age_param;
7814
7815         counter = flow_dv_counter_alloc(dev,
7816                                 count ? count->shared : 0,
7817                                 count ? count->id : 0,
7818                                 dev_flow->dv.group, !!age);
7819         if (!counter || age == NULL)
7820                 return counter;
7821         age_param  = flow_dv_counter_idx_get_age(dev, counter);
7822         /*
7823          * The counter age accuracy may have a bit delay. Have 3/4
7824          * second bias on the timeount in order to let it age in time.
7825          */
7826         age_param->context = age->context ? age->context :
7827                 (void *)(uintptr_t)(dev_flow->flow_idx);
7828         /*
7829          * The counter age accuracy may have a bit delay. Have 3/4
7830          * second bias on the timeount in order to let it age in time.
7831          */
7832         age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY;
7833         /* Set expire time in unit of 0.1 sec. */
7834         age_param->port_id = dev->data->port_id;
7835         age_param->expire = age_param->timeout +
7836                         rte_rdtsc() / (rte_get_tsc_hz() / 10);
7837         rte_atomic16_set(&age_param->state, AGE_CANDIDATE);
7838         return counter;
7839 }
7840 /**
7841  * Add Tx queue matcher
7842  *
7843  * @param[in] dev
7844  *   Pointer to the dev struct.
7845  * @param[in, out] matcher
7846  *   Flow matcher.
7847  * @param[in, out] key
7848  *   Flow matcher value.
7849  * @param[in] item
7850  *   Flow pattern to translate.
7851  * @param[in] inner
7852  *   Item is inner pattern.
7853  */
7854 static void
7855 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
7856                                 void *matcher, void *key,
7857                                 const struct rte_flow_item *item)
7858 {
7859         const struct mlx5_rte_flow_item_tx_queue *queue_m;
7860         const struct mlx5_rte_flow_item_tx_queue *queue_v;
7861         void *misc_m =
7862                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7863         void *misc_v =
7864                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7865         struct mlx5_txq_ctrl *txq;
7866         uint32_t queue;
7867
7868
7869         queue_m = (const void *)item->mask;
7870         if (!queue_m)
7871                 return;
7872         queue_v = (const void *)item->spec;
7873         if (!queue_v)
7874                 return;
7875         txq = mlx5_txq_get(dev, queue_v->queue);
7876         if (!txq)
7877                 return;
7878         queue = txq->obj->sq->id;
7879         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
7880         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
7881                  queue & queue_m->queue);
7882         mlx5_txq_release(dev, queue_v->queue);
7883 }
7884
7885 /**
7886  * Set the hash fields according to the @p flow information.
7887  *
7888  * @param[in] dev_flow
7889  *   Pointer to the mlx5_flow.
7890  * @param[in] rss_desc
7891  *   Pointer to the mlx5_flow_rss_desc.
7892  */
7893 static void
7894 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
7895                        struct mlx5_flow_rss_desc *rss_desc)
7896 {
7897         uint64_t items = dev_flow->handle->layers;
7898         int rss_inner = 0;
7899         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
7900
7901         dev_flow->hash_fields = 0;
7902 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
7903         if (rss_desc->level >= 2) {
7904                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
7905                 rss_inner = 1;
7906         }
7907 #endif
7908         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
7909             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
7910                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
7911                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
7912                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
7913                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
7914                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
7915                         else
7916                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
7917                 }
7918         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
7919                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
7920                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
7921                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
7922                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
7923                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
7924                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
7925                         else
7926                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
7927                 }
7928         }
7929         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
7930             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
7931                 if (rss_types & ETH_RSS_UDP) {
7932                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
7933                                 dev_flow->hash_fields |=
7934                                                 IBV_RX_HASH_SRC_PORT_UDP;
7935                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
7936                                 dev_flow->hash_fields |=
7937                                                 IBV_RX_HASH_DST_PORT_UDP;
7938                         else
7939                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
7940                 }
7941         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
7942                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
7943                 if (rss_types & ETH_RSS_TCP) {
7944                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
7945                                 dev_flow->hash_fields |=
7946                                                 IBV_RX_HASH_SRC_PORT_TCP;
7947                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
7948                                 dev_flow->hash_fields |=
7949                                                 IBV_RX_HASH_DST_PORT_TCP;
7950                         else
7951                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
7952                 }
7953         }
7954 }
7955
7956 /**
7957  * Fill the flow with DV spec, lock free
7958  * (mutex should be acquired by caller).
7959  *
7960  * @param[in] dev
7961  *   Pointer to rte_eth_dev structure.
7962  * @param[in, out] dev_flow
7963  *   Pointer to the sub flow.
7964  * @param[in] attr
7965  *   Pointer to the flow attributes.
7966  * @param[in] items
7967  *   Pointer to the list of items.
7968  * @param[in] actions
7969  *   Pointer to the list of actions.
7970  * @param[out] error
7971  *   Pointer to the error structure.
7972  *
7973  * @return
7974  *   0 on success, a negative errno value otherwise and rte_errno is set.
7975  */
7976 static int
7977 __flow_dv_translate(struct rte_eth_dev *dev,
7978                     struct mlx5_flow *dev_flow,
7979                     const struct rte_flow_attr *attr,
7980                     const struct rte_flow_item items[],
7981                     const struct rte_flow_action actions[],
7982                     struct rte_flow_error *error)
7983 {
7984         struct mlx5_priv *priv = dev->data->dev_private;
7985         struct mlx5_dev_config *dev_conf = &priv->config;
7986         struct rte_flow *flow = dev_flow->flow;
7987         struct mlx5_flow_handle *handle = dev_flow->handle;
7988         struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
7989                                               priv->rss_desc)
7990                                               [!!priv->flow_nested_idx];
7991         uint64_t item_flags = 0;
7992         uint64_t last_item = 0;
7993         uint64_t action_flags = 0;
7994         uint64_t priority = attr->priority;
7995         struct mlx5_flow_dv_matcher matcher = {
7996                 .mask = {
7997                         .size = sizeof(matcher.mask.buf) -
7998                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
7999                 },
8000         };
8001         int actions_n = 0;
8002         bool actions_end = false;
8003         union {
8004                 struct mlx5_flow_dv_modify_hdr_resource res;
8005                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
8006                             sizeof(struct mlx5_modification_cmd) *
8007                             (MLX5_MAX_MODIFY_NUM + 1)];
8008         } mhdr_dummy;
8009         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
8010         const struct rte_flow_action_count *count = NULL;
8011         const struct rte_flow_action_age *age = NULL;
8012         union flow_dv_attr flow_attr = { .attr = 0 };
8013         uint32_t tag_be;
8014         union mlx5_flow_tbl_key tbl_key;
8015         uint32_t modify_action_position = UINT32_MAX;
8016         void *match_mask = matcher.mask.buf;
8017         void *match_value = dev_flow->dv.value.buf;
8018         uint8_t next_protocol = 0xff;
8019         struct rte_vlan_hdr vlan = { 0 };
8020         uint32_t table;
8021         int ret = 0;
8022
8023         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
8024                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
8025         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
8026                                        !!priv->fdb_def_rule, &table, error);
8027         if (ret)
8028                 return ret;
8029         dev_flow->dv.group = table;
8030         if (attr->transfer)
8031                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
8032         if (priority == MLX5_FLOW_PRIO_RSVD)
8033                 priority = dev_conf->flow_prio - 1;
8034         /* number of actions must be set to 0 in case of dirty stack. */
8035         mhdr_res->actions_num = 0;
8036         for (; !actions_end ; actions++) {
8037                 const struct rte_flow_action_queue *queue;
8038                 const struct rte_flow_action_rss *rss;
8039                 const struct rte_flow_action *action = actions;
8040                 const uint8_t *rss_key;
8041                 const struct rte_flow_action_jump *jump_data;
8042                 const struct rte_flow_action_meter *mtr;
8043                 struct mlx5_flow_tbl_resource *tbl;
8044                 uint32_t port_id = 0;
8045                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
8046                 int action_type = actions->type;
8047                 const struct rte_flow_action *found_action = NULL;
8048                 struct mlx5_flow_meter *fm = NULL;
8049
8050                 if (!mlx5_flow_os_action_supported(action_type))
8051                         return rte_flow_error_set(error, ENOTSUP,
8052                                                   RTE_FLOW_ERROR_TYPE_ACTION,
8053                                                   actions,
8054                                                   "action not supported");
8055                 switch (action_type) {
8056                 case RTE_FLOW_ACTION_TYPE_VOID:
8057                         break;
8058                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
8059                         if (flow_dv_translate_action_port_id(dev, action,
8060                                                              &port_id, error))
8061                                 return -rte_errno;
8062                         port_id_resource.port_id = port_id;
8063                         MLX5_ASSERT(!handle->rix_port_id_action);
8064                         if (flow_dv_port_id_action_resource_register
8065                             (dev, &port_id_resource, dev_flow, error))
8066                                 return -rte_errno;
8067                         dev_flow->dv.actions[actions_n++] =
8068                                         dev_flow->dv.port_id_action->action;
8069                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
8070                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
8071                         break;
8072                 case RTE_FLOW_ACTION_TYPE_FLAG:
8073                         action_flags |= MLX5_FLOW_ACTION_FLAG;
8074                         dev_flow->handle->mark = 1;
8075                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
8076                                 struct rte_flow_action_mark mark = {
8077                                         .id = MLX5_FLOW_MARK_DEFAULT,
8078                                 };
8079
8080                                 if (flow_dv_convert_action_mark(dev, &mark,
8081                                                                 mhdr_res,
8082                                                                 error))
8083                                         return -rte_errno;
8084                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
8085                                 break;
8086                         }
8087                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
8088                         /*
8089                          * Only one FLAG or MARK is supported per device flow
8090                          * right now. So the pointer to the tag resource must be
8091                          * zero before the register process.
8092                          */
8093                         MLX5_ASSERT(!handle->dvh.rix_tag);
8094                         if (flow_dv_tag_resource_register(dev, tag_be,
8095                                                           dev_flow, error))
8096                                 return -rte_errno;
8097                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8098                         dev_flow->dv.actions[actions_n++] =
8099                                         dev_flow->dv.tag_resource->action;
8100                         break;
8101                 case RTE_FLOW_ACTION_TYPE_MARK:
8102                         action_flags |= MLX5_FLOW_ACTION_MARK;
8103                         dev_flow->handle->mark = 1;
8104                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
8105                                 const struct rte_flow_action_mark *mark =
8106                                         (const struct rte_flow_action_mark *)
8107                                                 actions->conf;
8108
8109                                 if (flow_dv_convert_action_mark(dev, mark,
8110                                                                 mhdr_res,
8111                                                                 error))
8112                                         return -rte_errno;
8113                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
8114                                 break;
8115                         }
8116                         /* Fall-through */
8117                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
8118                         /* Legacy (non-extensive) MARK action. */
8119                         tag_be = mlx5_flow_mark_set
8120                               (((const struct rte_flow_action_mark *)
8121                                (actions->conf))->id);
8122                         MLX5_ASSERT(!handle->dvh.rix_tag);
8123                         if (flow_dv_tag_resource_register(dev, tag_be,
8124                                                           dev_flow, error))
8125                                 return -rte_errno;
8126                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8127                         dev_flow->dv.actions[actions_n++] =
8128                                         dev_flow->dv.tag_resource->action;
8129                         break;
8130                 case RTE_FLOW_ACTION_TYPE_SET_META:
8131                         if (flow_dv_convert_action_set_meta
8132                                 (dev, mhdr_res, attr,
8133                                  (const struct rte_flow_action_set_meta *)
8134                                   actions->conf, error))
8135                                 return -rte_errno;
8136                         action_flags |= MLX5_FLOW_ACTION_SET_META;
8137                         break;
8138                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
8139                         if (flow_dv_convert_action_set_tag
8140                                 (dev, mhdr_res,
8141                                  (const struct rte_flow_action_set_tag *)
8142                                   actions->conf, error))
8143                                 return -rte_errno;
8144                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8145                         break;
8146                 case RTE_FLOW_ACTION_TYPE_DROP:
8147                         action_flags |= MLX5_FLOW_ACTION_DROP;
8148                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
8149                         break;
8150                 case RTE_FLOW_ACTION_TYPE_QUEUE:
8151                         queue = actions->conf;
8152                         rss_desc->queue_num = 1;
8153                         rss_desc->queue[0] = queue->index;
8154                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
8155                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
8156                         break;
8157                 case RTE_FLOW_ACTION_TYPE_RSS:
8158                         rss = actions->conf;
8159                         memcpy(rss_desc->queue, rss->queue,
8160                                rss->queue_num * sizeof(uint16_t));
8161                         rss_desc->queue_num = rss->queue_num;
8162                         /* NULL RSS key indicates default RSS key. */
8163                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
8164                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
8165                         /*
8166                          * rss->level and rss.types should be set in advance
8167                          * when expanding items for RSS.
8168                          */
8169                         action_flags |= MLX5_FLOW_ACTION_RSS;
8170                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
8171                         break;
8172                 case RTE_FLOW_ACTION_TYPE_AGE:
8173                 case RTE_FLOW_ACTION_TYPE_COUNT:
8174                         if (!dev_conf->devx) {
8175                                 return rte_flow_error_set
8176                                               (error, ENOTSUP,
8177                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8178                                                NULL,
8179                                                "count action not supported");
8180                         }
8181                         /* Save information first, will apply later. */
8182                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
8183                                 count = action->conf;
8184                         else
8185                                 age = action->conf;
8186                         action_flags |= MLX5_FLOW_ACTION_COUNT;
8187                         break;
8188                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
8189                         dev_flow->dv.actions[actions_n++] =
8190                                                 priv->sh->pop_vlan_action;
8191                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
8192                         break;
8193                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
8194                         if (!(action_flags &
8195                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
8196                                 flow_dev_get_vlan_info_from_items(items, &vlan);
8197                         vlan.eth_proto = rte_be_to_cpu_16
8198                              ((((const struct rte_flow_action_of_push_vlan *)
8199                                                    actions->conf)->ethertype));
8200                         found_action = mlx5_flow_find_action
8201                                         (actions + 1,
8202                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
8203                         if (found_action)
8204                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
8205                         found_action = mlx5_flow_find_action
8206                                         (actions + 1,
8207                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
8208                         if (found_action)
8209                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
8210                         if (flow_dv_create_action_push_vlan
8211                                             (dev, attr, &vlan, dev_flow, error))
8212                                 return -rte_errno;
8213                         dev_flow->dv.actions[actions_n++] =
8214                                         dev_flow->dv.push_vlan_res->action;
8215                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
8216                         break;
8217                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
8218                         /* of_vlan_push action handled this action */
8219                         MLX5_ASSERT(action_flags &
8220                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
8221                         break;
8222                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
8223                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8224                                 break;
8225                         flow_dev_get_vlan_info_from_items(items, &vlan);
8226                         mlx5_update_vlan_vid_pcp(actions, &vlan);
8227                         /* If no VLAN push - this is a modify header action */
8228                         if (flow_dv_convert_action_modify_vlan_vid
8229                                                 (mhdr_res, actions, error))
8230                                 return -rte_errno;
8231                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
8232                         break;
8233                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
8234                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
8235                         if (flow_dv_create_action_l2_encap(dev, actions,
8236                                                            dev_flow,
8237                                                            attr->transfer,
8238                                                            error))
8239                                 return -rte_errno;
8240                         dev_flow->dv.actions[actions_n++] =
8241                                         dev_flow->dv.encap_decap->action;
8242                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
8243                         break;
8244                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
8245                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
8246                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
8247                                                            attr->transfer,
8248                                                            error))
8249                                 return -rte_errno;
8250                         dev_flow->dv.actions[actions_n++] =
8251                                         dev_flow->dv.encap_decap->action;
8252                         action_flags |= MLX5_FLOW_ACTION_DECAP;
8253                         break;
8254                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
8255                         /* Handle encap with preceding decap. */
8256                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
8257                                 if (flow_dv_create_action_raw_encap
8258                                         (dev, actions, dev_flow, attr, error))
8259                                         return -rte_errno;
8260                                 dev_flow->dv.actions[actions_n++] =
8261                                         dev_flow->dv.encap_decap->action;
8262                         } else {
8263                                 /* Handle encap without preceding decap. */
8264                                 if (flow_dv_create_action_l2_encap
8265                                     (dev, actions, dev_flow, attr->transfer,
8266                                      error))
8267                                         return -rte_errno;
8268                                 dev_flow->dv.actions[actions_n++] =
8269                                         dev_flow->dv.encap_decap->action;
8270                         }
8271                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
8272                         break;
8273                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
8274                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
8275                                 ;
8276                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
8277                                 if (flow_dv_create_action_l2_decap
8278                                     (dev, dev_flow, attr->transfer, error))
8279                                         return -rte_errno;
8280                                 dev_flow->dv.actions[actions_n++] =
8281                                         dev_flow->dv.encap_decap->action;
8282                         }
8283                         /* If decap is followed by encap, handle it at encap. */
8284                         action_flags |= MLX5_FLOW_ACTION_DECAP;
8285                         break;
8286                 case RTE_FLOW_ACTION_TYPE_JUMP:
8287                         jump_data = action->conf;
8288                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
8289                                                        jump_data->group,
8290                                                        !!priv->fdb_def_rule,
8291                                                        &table, error);
8292                         if (ret)
8293                                 return ret;
8294                         tbl = flow_dv_tbl_resource_get(dev, table,
8295                                                        attr->egress,
8296                                                        attr->transfer, error);
8297                         if (!tbl)
8298                                 return rte_flow_error_set
8299                                                 (error, errno,
8300                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8301                                                  NULL,
8302                                                  "cannot create jump action.");
8303                         if (flow_dv_jump_tbl_resource_register
8304                             (dev, tbl, dev_flow, error)) {
8305                                 flow_dv_tbl_resource_release(dev, tbl);
8306                                 return rte_flow_error_set
8307                                                 (error, errno,
8308                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8309                                                  NULL,
8310                                                  "cannot create jump action.");
8311                         }
8312                         dev_flow->dv.actions[actions_n++] =
8313                                         dev_flow->dv.jump->action;
8314                         action_flags |= MLX5_FLOW_ACTION_JUMP;
8315                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
8316                         break;
8317                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
8318                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
8319                         if (flow_dv_convert_action_modify_mac
8320                                         (mhdr_res, actions, error))
8321                                 return -rte_errno;
8322                         action_flags |= actions->type ==
8323                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
8324                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
8325                                         MLX5_FLOW_ACTION_SET_MAC_DST;
8326                         break;
8327                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
8328                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
8329                         if (flow_dv_convert_action_modify_ipv4
8330                                         (mhdr_res, actions, error))
8331                                 return -rte_errno;
8332                         action_flags |= actions->type ==
8333                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
8334                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
8335                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
8336                         break;
8337                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
8338                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
8339                         if (flow_dv_convert_action_modify_ipv6
8340                                         (mhdr_res, actions, error))
8341                                 return -rte_errno;
8342                         action_flags |= actions->type ==
8343                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
8344                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
8345                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
8346                         break;
8347                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
8348                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
8349                         if (flow_dv_convert_action_modify_tp
8350                                         (mhdr_res, actions, items,
8351                                          &flow_attr, dev_flow, !!(action_flags &
8352                                          MLX5_FLOW_ACTION_DECAP), error))
8353                                 return -rte_errno;
8354                         action_flags |= actions->type ==
8355                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
8356                                         MLX5_FLOW_ACTION_SET_TP_SRC :
8357                                         MLX5_FLOW_ACTION_SET_TP_DST;
8358                         break;
8359                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
8360                         if (flow_dv_convert_action_modify_dec_ttl
8361                                         (mhdr_res, items, &flow_attr, dev_flow,
8362                                          !!(action_flags &
8363                                          MLX5_FLOW_ACTION_DECAP), error))
8364                                 return -rte_errno;
8365                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
8366                         break;
8367                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
8368                         if (flow_dv_convert_action_modify_ttl
8369                                         (mhdr_res, actions, items, &flow_attr,
8370                                          dev_flow, !!(action_flags &
8371                                          MLX5_FLOW_ACTION_DECAP), error))
8372                                 return -rte_errno;
8373                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
8374                         break;
8375                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
8376                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
8377                         if (flow_dv_convert_action_modify_tcp_seq
8378                                         (mhdr_res, actions, error))
8379                                 return -rte_errno;
8380                         action_flags |= actions->type ==
8381                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
8382                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
8383                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
8384                         break;
8385
8386                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
8387                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
8388                         if (flow_dv_convert_action_modify_tcp_ack
8389                                         (mhdr_res, actions, error))
8390                                 return -rte_errno;
8391                         action_flags |= actions->type ==
8392                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
8393                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
8394                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
8395                         break;
8396                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
8397                         if (flow_dv_convert_action_set_reg
8398                                         (mhdr_res, actions, error))
8399                                 return -rte_errno;
8400                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8401                         break;
8402                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
8403                         if (flow_dv_convert_action_copy_mreg
8404                                         (dev, mhdr_res, actions, error))
8405                                 return -rte_errno;
8406                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8407                         break;
8408                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
8409                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
8410                         dev_flow->handle->fate_action =
8411                                         MLX5_FLOW_FATE_DEFAULT_MISS;
8412                         break;
8413                 case RTE_FLOW_ACTION_TYPE_METER:
8414                         mtr = actions->conf;
8415                         if (!flow->meter) {
8416                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
8417                                                             attr, error);
8418                                 if (!fm)
8419                                         return rte_flow_error_set(error,
8420                                                 rte_errno,
8421                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8422                                                 NULL,
8423                                                 "meter not found "
8424                                                 "or invalid parameters");
8425                                 flow->meter = fm->idx;
8426                         }
8427                         /* Set the meter action. */
8428                         if (!fm) {
8429                                 fm = mlx5_ipool_get(priv->sh->ipool
8430                                                 [MLX5_IPOOL_MTR], flow->meter);
8431                                 if (!fm)
8432                                         return rte_flow_error_set(error,
8433                                                 rte_errno,
8434                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8435                                                 NULL,
8436                                                 "meter not found "
8437                                                 "or invalid parameters");
8438                         }
8439                         dev_flow->dv.actions[actions_n++] =
8440                                 fm->mfts->meter_action;
8441                         action_flags |= MLX5_FLOW_ACTION_METER;
8442                         break;
8443                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
8444                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
8445                                                               actions, error))
8446                                 return -rte_errno;
8447                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
8448                         break;
8449                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
8450                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
8451                                                               actions, error))
8452                                 return -rte_errno;
8453                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
8454                         break;
8455                 case RTE_FLOW_ACTION_TYPE_END:
8456                         actions_end = true;
8457                         if (mhdr_res->actions_num) {
8458                                 /* create modify action if needed. */
8459                                 if (flow_dv_modify_hdr_resource_register
8460                                         (dev, mhdr_res, dev_flow, error))
8461                                         return -rte_errno;
8462                                 dev_flow->dv.actions[modify_action_position] =
8463                                         handle->dvh.modify_hdr->action;
8464                         }
8465                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
8466                                 flow->counter =
8467                                         flow_dv_translate_create_counter(dev,
8468                                                 dev_flow, count, age);
8469
8470                                 if (!flow->counter)
8471                                         return rte_flow_error_set
8472                                                 (error, rte_errno,
8473                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8474                                                 NULL,
8475                                                 "cannot create counter"
8476                                                 " object.");
8477                                 dev_flow->dv.actions[actions_n++] =
8478                                           (flow_dv_counter_get_by_idx(dev,
8479                                           flow->counter, NULL))->action;
8480                         }
8481                         break;
8482                 default:
8483                         break;
8484                 }
8485                 if (mhdr_res->actions_num &&
8486                     modify_action_position == UINT32_MAX)
8487                         modify_action_position = actions_n++;
8488         }
8489         dev_flow->dv.actions_n = actions_n;
8490         dev_flow->act_flags = action_flags;
8491         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
8492                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
8493                 int item_type = items->type;
8494
8495                 if (!mlx5_flow_os_item_supported(item_type))
8496                         return rte_flow_error_set(error, ENOTSUP,
8497                                                   RTE_FLOW_ERROR_TYPE_ITEM,
8498                                                   NULL, "item not supported");
8499                 switch (item_type) {
8500                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
8501                         flow_dv_translate_item_port_id(dev, match_mask,
8502                                                        match_value, items);
8503                         last_item = MLX5_FLOW_ITEM_PORT_ID;
8504                         break;
8505                 case RTE_FLOW_ITEM_TYPE_ETH:
8506                         flow_dv_translate_item_eth(match_mask, match_value,
8507                                                    items, tunnel,
8508                                                    dev_flow->dv.group);
8509                         matcher.priority = action_flags &
8510                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
8511                                         !dev_flow->external ?
8512                                         MLX5_PRIORITY_MAP_L3 :
8513                                         MLX5_PRIORITY_MAP_L2;
8514                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
8515                                              MLX5_FLOW_LAYER_OUTER_L2;
8516                         break;
8517                 case RTE_FLOW_ITEM_TYPE_VLAN:
8518                         flow_dv_translate_item_vlan(dev_flow,
8519                                                     match_mask, match_value,
8520                                                     items, tunnel,
8521                                                     dev_flow->dv.group);
8522                         matcher.priority = MLX5_PRIORITY_MAP_L2;
8523                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
8524                                               MLX5_FLOW_LAYER_INNER_VLAN) :
8525                                              (MLX5_FLOW_LAYER_OUTER_L2 |
8526                                               MLX5_FLOW_LAYER_OUTER_VLAN);
8527                         break;
8528                 case RTE_FLOW_ITEM_TYPE_IPV4:
8529                         mlx5_flow_tunnel_ip_check(items, next_protocol,
8530                                                   &item_flags, &tunnel);
8531                         flow_dv_translate_item_ipv4(match_mask, match_value,
8532                                                     items, item_flags, tunnel,
8533                                                     dev_flow->dv.group);
8534                         matcher.priority = MLX5_PRIORITY_MAP_L3;
8535                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
8536                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
8537                         if (items->mask != NULL &&
8538                             ((const struct rte_flow_item_ipv4 *)
8539                              items->mask)->hdr.next_proto_id) {
8540                                 next_protocol =
8541                                         ((const struct rte_flow_item_ipv4 *)
8542                                          (items->spec))->hdr.next_proto_id;
8543                                 next_protocol &=
8544                                         ((const struct rte_flow_item_ipv4 *)
8545                                          (items->mask))->hdr.next_proto_id;
8546                         } else {
8547                                 /* Reset for inner layer. */
8548                                 next_protocol = 0xff;
8549                         }
8550                         break;
8551                 case RTE_FLOW_ITEM_TYPE_IPV6:
8552                         mlx5_flow_tunnel_ip_check(items, next_protocol,
8553                                                   &item_flags, &tunnel);
8554                         flow_dv_translate_item_ipv6(match_mask, match_value,
8555                                                     items, item_flags, tunnel,
8556                                                     dev_flow->dv.group);
8557                         matcher.priority = MLX5_PRIORITY_MAP_L3;
8558                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
8559                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
8560                         if (items->mask != NULL &&
8561                             ((const struct rte_flow_item_ipv6 *)
8562                              items->mask)->hdr.proto) {
8563                                 next_protocol =
8564                                         ((const struct rte_flow_item_ipv6 *)
8565                                          items->spec)->hdr.proto;
8566                                 next_protocol &=
8567                                         ((const struct rte_flow_item_ipv6 *)
8568                                          items->mask)->hdr.proto;
8569                         } else {
8570                                 /* Reset for inner layer. */
8571                                 next_protocol = 0xff;
8572                         }
8573                         break;
8574                 case RTE_FLOW_ITEM_TYPE_TCP:
8575                         flow_dv_translate_item_tcp(match_mask, match_value,
8576                                                    items, tunnel);
8577                         matcher.priority = MLX5_PRIORITY_MAP_L4;
8578                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
8579                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
8580                         break;
8581                 case RTE_FLOW_ITEM_TYPE_UDP:
8582                         flow_dv_translate_item_udp(match_mask, match_value,
8583                                                    items, tunnel);
8584                         matcher.priority = MLX5_PRIORITY_MAP_L4;
8585                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
8586                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
8587                         break;
8588                 case RTE_FLOW_ITEM_TYPE_GRE:
8589                         flow_dv_translate_item_gre(match_mask, match_value,
8590                                                    items, tunnel);
8591                         matcher.priority = rss_desc->level >= 2 ?
8592                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8593                         last_item = MLX5_FLOW_LAYER_GRE;
8594                         break;
8595                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
8596                         flow_dv_translate_item_gre_key(match_mask,
8597                                                        match_value, items);
8598                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
8599                         break;
8600                 case RTE_FLOW_ITEM_TYPE_NVGRE:
8601                         flow_dv_translate_item_nvgre(match_mask, match_value,
8602                                                      items, tunnel);
8603                         matcher.priority = rss_desc->level >= 2 ?
8604                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8605                         last_item = MLX5_FLOW_LAYER_GRE;
8606                         break;
8607                 case RTE_FLOW_ITEM_TYPE_VXLAN:
8608                         flow_dv_translate_item_vxlan(match_mask, match_value,
8609                                                      items, tunnel);
8610                         matcher.priority = rss_desc->level >= 2 ?
8611                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8612                         last_item = MLX5_FLOW_LAYER_VXLAN;
8613                         break;
8614                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
8615                         flow_dv_translate_item_vxlan_gpe(match_mask,
8616                                                          match_value, items,
8617                                                          tunnel);
8618                         matcher.priority = rss_desc->level >= 2 ?
8619                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8620                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
8621                         break;
8622                 case RTE_FLOW_ITEM_TYPE_GENEVE:
8623                         flow_dv_translate_item_geneve(match_mask, match_value,
8624                                                       items, tunnel);
8625                         matcher.priority = rss_desc->level >= 2 ?
8626                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8627                         last_item = MLX5_FLOW_LAYER_GENEVE;
8628                         break;
8629                 case RTE_FLOW_ITEM_TYPE_MPLS:
8630                         flow_dv_translate_item_mpls(match_mask, match_value,
8631                                                     items, last_item, tunnel);
8632                         matcher.priority = rss_desc->level >= 2 ?
8633                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8634                         last_item = MLX5_FLOW_LAYER_MPLS;
8635                         break;
8636                 case RTE_FLOW_ITEM_TYPE_MARK:
8637                         flow_dv_translate_item_mark(dev, match_mask,
8638                                                     match_value, items);
8639                         last_item = MLX5_FLOW_ITEM_MARK;
8640                         break;
8641                 case RTE_FLOW_ITEM_TYPE_META:
8642                         flow_dv_translate_item_meta(dev, match_mask,
8643                                                     match_value, attr, items);
8644                         last_item = MLX5_FLOW_ITEM_METADATA;
8645                         break;
8646                 case RTE_FLOW_ITEM_TYPE_ICMP:
8647                         flow_dv_translate_item_icmp(match_mask, match_value,
8648                                                     items, tunnel);
8649                         last_item = MLX5_FLOW_LAYER_ICMP;
8650                         break;
8651                 case RTE_FLOW_ITEM_TYPE_ICMP6:
8652                         flow_dv_translate_item_icmp6(match_mask, match_value,
8653                                                       items, tunnel);
8654                         last_item = MLX5_FLOW_LAYER_ICMP6;
8655                         break;
8656                 case RTE_FLOW_ITEM_TYPE_TAG:
8657                         flow_dv_translate_item_tag(dev, match_mask,
8658                                                    match_value, items);
8659                         last_item = MLX5_FLOW_ITEM_TAG;
8660                         break;
8661                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
8662                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
8663                                                         match_value, items);
8664                         last_item = MLX5_FLOW_ITEM_TAG;
8665                         break;
8666                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
8667                         flow_dv_translate_item_tx_queue(dev, match_mask,
8668                                                         match_value,
8669                                                         items);
8670                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
8671                         break;
8672                 case RTE_FLOW_ITEM_TYPE_GTP:
8673                         flow_dv_translate_item_gtp(match_mask, match_value,
8674                                                    items, tunnel);
8675                         matcher.priority = rss_desc->level >= 2 ?
8676                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8677                         last_item = MLX5_FLOW_LAYER_GTP;
8678                         break;
8679                 case RTE_FLOW_ITEM_TYPE_ECPRI:
8680                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
8681                                 /* Create it only the first time to be used. */
8682                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
8683                                 if (ret)
8684                                         return rte_flow_error_set
8685                                                 (error, -ret,
8686                                                 RTE_FLOW_ERROR_TYPE_ITEM,
8687                                                 NULL,
8688                                                 "cannot create eCPRI parser");
8689                         }
8690                         /* Adjust the length matcher and device flow value. */
8691                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
8692                         dev_flow->dv.value.size =
8693                                         MLX5_ST_SZ_BYTES(fte_match_param);
8694                         flow_dv_translate_item_ecpri(dev, match_mask,
8695                                                      match_value, items);
8696                         /* No other protocol should follow eCPRI layer. */
8697                         last_item = MLX5_FLOW_LAYER_ECPRI;
8698                         break;
8699                 default:
8700                         break;
8701                 }
8702                 item_flags |= last_item;
8703         }
8704         /*
8705          * When E-Switch mode is enabled, we have two cases where we need to
8706          * set the source port manually.
8707          * The first one, is in case of Nic steering rule, and the second is
8708          * E-Switch rule where no port_id item was found. In both cases
8709          * the source port is set according the current port in use.
8710          */
8711         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
8712             (priv->representor || priv->master)) {
8713                 if (flow_dv_translate_item_port_id(dev, match_mask,
8714                                                    match_value, NULL))
8715                         return -rte_errno;
8716         }
8717 #ifdef RTE_LIBRTE_MLX5_DEBUG
8718         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
8719                                               dev_flow->dv.value.buf));
8720 #endif
8721         /*
8722          * Layers may be already initialized from prefix flow if this dev_flow
8723          * is the suffix flow.
8724          */
8725         handle->layers |= item_flags;
8726         if (action_flags & MLX5_FLOW_ACTION_RSS)
8727                 flow_dv_hashfields_set(dev_flow, rss_desc);
8728         /* Register matcher. */
8729         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
8730                                     matcher.mask.size);
8731         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
8732                                                      matcher.priority);
8733         /* reserved field no needs to be set to 0 here. */
8734         tbl_key.domain = attr->transfer;
8735         tbl_key.direction = attr->egress;
8736         tbl_key.table_id = dev_flow->dv.group;
8737         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
8738                 return -rte_errno;
8739         return 0;
8740 }
8741
8742 /**
8743  * Apply the flow to the NIC, lock free,
8744  * (mutex should be acquired by caller).
8745  *
8746  * @param[in] dev
8747  *   Pointer to the Ethernet device structure.
8748  * @param[in, out] flow
8749  *   Pointer to flow structure.
8750  * @param[out] error
8751  *   Pointer to error structure.
8752  *
8753  * @return
8754  *   0 on success, a negative errno value otherwise and rte_errno is set.
8755  */
8756 static int
8757 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
8758                 struct rte_flow_error *error)
8759 {
8760         struct mlx5_flow_dv_workspace *dv;
8761         struct mlx5_flow_handle *dh;
8762         struct mlx5_flow_handle_dv *dv_h;
8763         struct mlx5_flow *dev_flow;
8764         struct mlx5_priv *priv = dev->data->dev_private;
8765         uint32_t handle_idx;
8766         int n;
8767         int err;
8768         int idx;
8769
8770         for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
8771                 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
8772                 dv = &dev_flow->dv;
8773                 dh = dev_flow->handle;
8774                 dv_h = &dh->dvh;
8775                 n = dv->actions_n;
8776                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
8777                         if (dv->transfer) {
8778                                 dv->actions[n++] = priv->sh->esw_drop_action;
8779                         } else {
8780                                 struct mlx5_hrxq *drop_hrxq;
8781                                 drop_hrxq = mlx5_hrxq_drop_new(dev);
8782                                 if (!drop_hrxq) {
8783                                         rte_flow_error_set
8784                                                 (error, errno,
8785                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8786                                                  NULL,
8787                                                  "cannot get drop hash queue");
8788                                         goto error;
8789                                 }
8790                                 /*
8791                                  * Drop queues will be released by the specify
8792                                  * mlx5_hrxq_drop_release() function. Assign
8793                                  * the special index to hrxq to mark the queue
8794                                  * has been allocated.
8795                                  */
8796                                 dh->rix_hrxq = UINT32_MAX;
8797                                 dv->actions[n++] = drop_hrxq->action;
8798                         }
8799                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
8800                         struct mlx5_hrxq *hrxq;
8801                         uint32_t hrxq_idx;
8802                         struct mlx5_flow_rss_desc *rss_desc =
8803                                 &((struct mlx5_flow_rss_desc *)priv->rss_desc)
8804                                 [!!priv->flow_nested_idx];
8805
8806                         MLX5_ASSERT(rss_desc->queue_num);
8807                         hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
8808                                                  MLX5_RSS_HASH_KEY_LEN,
8809                                                  dev_flow->hash_fields,
8810                                                  rss_desc->queue,
8811                                                  rss_desc->queue_num);
8812                         if (!hrxq_idx) {
8813                                 hrxq_idx = mlx5_hrxq_new
8814                                                 (dev, rss_desc->key,
8815                                                 MLX5_RSS_HASH_KEY_LEN,
8816                                                 dev_flow->hash_fields,
8817                                                 rss_desc->queue,
8818                                                 rss_desc->queue_num,
8819                                                 !!(dh->layers &
8820                                                 MLX5_FLOW_LAYER_TUNNEL));
8821                         }
8822                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8823                                               hrxq_idx);
8824                         if (!hrxq) {
8825                                 rte_flow_error_set
8826                                         (error, rte_errno,
8827                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8828                                          "cannot get hash queue");
8829                                 goto error;
8830                         }
8831                         dh->rix_hrxq = hrxq_idx;
8832                         dv->actions[n++] = hrxq->action;
8833                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
8834                         if (flow_dv_default_miss_resource_register
8835                                         (dev, error)) {
8836                                 rte_flow_error_set
8837                                         (error, rte_errno,
8838                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8839                                          "cannot create default miss resource");
8840                                 goto error_default_miss;
8841                         }
8842                         dh->rix_default_fate =  MLX5_FLOW_FATE_DEFAULT_MISS;
8843                         dv->actions[n++] = priv->sh->default_miss.action;
8844                 }
8845                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
8846                                                (void *)&dv->value, n,
8847                                                dv->actions, &dh->drv_flow);
8848                 if (err) {
8849                         rte_flow_error_set(error, errno,
8850                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8851                                            NULL,
8852                                            "hardware refuses to create flow");
8853                         goto error;
8854                 }
8855                 if (priv->vmwa_context &&
8856                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
8857                         /*
8858                          * The rule contains the VLAN pattern.
8859                          * For VF we are going to create VLAN
8860                          * interface to make hypervisor set correct
8861                          * e-Switch vport context.
8862                          */
8863                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
8864                 }
8865         }
8866         return 0;
8867 error:
8868         if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
8869                 flow_dv_default_miss_resource_release(dev);
8870 error_default_miss:
8871         err = rte_errno; /* Save rte_errno before cleanup. */
8872         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
8873                        handle_idx, dh, next) {
8874                 /* hrxq is union, don't clear it if the flag is not set. */
8875                 if (dh->rix_hrxq) {
8876                         if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
8877                                 mlx5_hrxq_drop_release(dev);
8878                                 dh->rix_hrxq = 0;
8879                         } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
8880                                 mlx5_hrxq_release(dev, dh->rix_hrxq);
8881                                 dh->rix_hrxq = 0;
8882                         }
8883                 }
8884                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
8885                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
8886         }
8887         rte_errno = err; /* Restore rte_errno. */
8888         return -rte_errno;
8889 }
8890
8891 /**
8892  * Release the flow matcher.
8893  *
8894  * @param dev
8895  *   Pointer to Ethernet device.
8896  * @param handle
8897  *   Pointer to mlx5_flow_handle.
8898  *
8899  * @return
8900  *   1 while a reference on it exists, 0 when freed.
8901  */
8902 static int
8903 flow_dv_matcher_release(struct rte_eth_dev *dev,
8904                         struct mlx5_flow_handle *handle)
8905 {
8906         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
8907
8908         MLX5_ASSERT(matcher->matcher_object);
8909         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
8910                 dev->data->port_id, (void *)matcher,
8911                 rte_atomic32_read(&matcher->refcnt));
8912         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
8913                 claim_zero(mlx5_flow_os_destroy_flow_matcher
8914                            (matcher->matcher_object));
8915                 LIST_REMOVE(matcher, next);
8916                 /* table ref-- in release interface. */
8917                 flow_dv_tbl_resource_release(dev, matcher->tbl);
8918                 mlx5_free(matcher);
8919                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
8920                         dev->data->port_id, (void *)matcher);
8921                 return 0;
8922         }
8923         return 1;
8924 }
8925
8926 /**
8927  * Release an encap/decap resource.
8928  *
8929  * @param dev
8930  *   Pointer to Ethernet device.
8931  * @param handle
8932  *   Pointer to mlx5_flow_handle.
8933  *
8934  * @return
8935  *   1 while a reference on it exists, 0 when freed.
8936  */
8937 static int
8938 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
8939                                      struct mlx5_flow_handle *handle)
8940 {
8941         struct mlx5_priv *priv = dev->data->dev_private;
8942         uint32_t idx = handle->dvh.rix_encap_decap;
8943         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
8944
8945         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
8946                          idx);
8947         if (!cache_resource)
8948                 return 0;
8949         MLX5_ASSERT(cache_resource->action);
8950         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
8951                 (void *)cache_resource,
8952                 rte_atomic32_read(&cache_resource->refcnt));
8953         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8954                 claim_zero(mlx5_flow_os_destroy_flow_action
8955                                                 (cache_resource->action));
8956                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
8957                              &priv->sh->encaps_decaps, idx,
8958                              cache_resource, next);
8959                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
8960                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
8961                         (void *)cache_resource);
8962                 return 0;
8963         }
8964         return 1;
8965 }
8966
8967 /**
8968  * Release an jump to table action resource.
8969  *
8970  * @param dev
8971  *   Pointer to Ethernet device.
8972  * @param handle
8973  *   Pointer to mlx5_flow_handle.
8974  *
8975  * @return
8976  *   1 while a reference on it exists, 0 when freed.
8977  */
8978 static int
8979 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
8980                                   struct mlx5_flow_handle *handle)
8981 {
8982         struct mlx5_priv *priv = dev->data->dev_private;
8983         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
8984         struct mlx5_flow_tbl_data_entry *tbl_data;
8985
8986         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
8987                              handle->rix_jump);
8988         if (!tbl_data)
8989                 return 0;
8990         cache_resource = &tbl_data->jump;
8991         MLX5_ASSERT(cache_resource->action);
8992         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
8993                 (void *)cache_resource,
8994                 rte_atomic32_read(&cache_resource->refcnt));
8995         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8996                 claim_zero(mlx5_flow_os_destroy_flow_action
8997                                                 (cache_resource->action));
8998                 /* jump action memory free is inside the table release. */
8999                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
9000                 DRV_LOG(DEBUG, "jump table resource %p: removed",
9001                         (void *)cache_resource);
9002                 return 0;
9003         }
9004         return 1;
9005 }
9006
9007 /**
9008  * Release a default miss resource.
9009  *
9010  * @param dev
9011  *   Pointer to Ethernet device.
9012  * @return
9013  *   1 while a reference on it exists, 0 when freed.
9014  */
9015 static int
9016 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev)
9017 {
9018         struct mlx5_priv *priv = dev->data->dev_private;
9019         struct mlx5_dev_ctx_shared *sh = priv->sh;
9020         struct mlx5_flow_default_miss_resource *cache_resource =
9021                         &sh->default_miss;
9022
9023         MLX5_ASSERT(cache_resource->action);
9024         DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--",
9025                         (void *)cache_resource->action,
9026                         rte_atomic32_read(&cache_resource->refcnt));
9027         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9028                 claim_zero(mlx5_glue->destroy_flow_action
9029                                 (cache_resource->action));
9030                 DRV_LOG(DEBUG, "default miss resource %p: removed",
9031                                 (void *)cache_resource->action);
9032                 return 0;
9033         }
9034         return 1;
9035 }
9036
9037 /**
9038  * Release a modify-header resource.
9039  *
9040  * @param handle
9041  *   Pointer to mlx5_flow_handle.
9042  *
9043  * @return
9044  *   1 while a reference on it exists, 0 when freed.
9045  */
9046 static int
9047 flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
9048 {
9049         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
9050                                                         handle->dvh.modify_hdr;
9051
9052         MLX5_ASSERT(cache_resource->action);
9053         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
9054                 (void *)cache_resource,
9055                 rte_atomic32_read(&cache_resource->refcnt));
9056         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9057                 claim_zero(mlx5_flow_os_destroy_flow_action
9058                                                 (cache_resource->action));
9059                 LIST_REMOVE(cache_resource, next);
9060                 mlx5_free(cache_resource);
9061                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
9062                         (void *)cache_resource);
9063                 return 0;
9064         }
9065         return 1;
9066 }
9067
9068 /**
9069  * Release port ID action resource.
9070  *
9071  * @param dev
9072  *   Pointer to Ethernet device.
9073  * @param handle
9074  *   Pointer to mlx5_flow_handle.
9075  *
9076  * @return
9077  *   1 while a reference on it exists, 0 when freed.
9078  */
9079 static int
9080 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
9081                                         struct mlx5_flow_handle *handle)
9082 {
9083         struct mlx5_priv *priv = dev->data->dev_private;
9084         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
9085         uint32_t idx = handle->rix_port_id_action;
9086
9087         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
9088                                         idx);
9089         if (!cache_resource)
9090                 return 0;
9091         MLX5_ASSERT(cache_resource->action);
9092         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
9093                 (void *)cache_resource,
9094                 rte_atomic32_read(&cache_resource->refcnt));
9095         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9096                 claim_zero(mlx5_flow_os_destroy_flow_action
9097                                                 (cache_resource->action));
9098                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
9099                              &priv->sh->port_id_action_list, idx,
9100                              cache_resource, next);
9101                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx);
9102                 DRV_LOG(DEBUG, "port id action resource %p: removed",
9103                         (void *)cache_resource);
9104                 return 0;
9105         }
9106         return 1;
9107 }
9108
9109 /**
9110  * Release push vlan action resource.
9111  *
9112  * @param dev
9113  *   Pointer to Ethernet device.
9114  * @param handle
9115  *   Pointer to mlx5_flow_handle.
9116  *
9117  * @return
9118  *   1 while a reference on it exists, 0 when freed.
9119  */
9120 static int
9121 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
9122                                           struct mlx5_flow_handle *handle)
9123 {
9124         struct mlx5_priv *priv = dev->data->dev_private;
9125         uint32_t idx = handle->dvh.rix_push_vlan;
9126         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
9127
9128         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
9129                                         idx);
9130         if (!cache_resource)
9131                 return 0;
9132         MLX5_ASSERT(cache_resource->action);
9133         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
9134                 (void *)cache_resource,
9135                 rte_atomic32_read(&cache_resource->refcnt));
9136         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9137                 claim_zero(mlx5_flow_os_destroy_flow_action
9138                                                 (cache_resource->action));
9139                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
9140                              &priv->sh->push_vlan_action_list, idx,
9141                              cache_resource, next);
9142                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
9143                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
9144                         (void *)cache_resource);
9145                 return 0;
9146         }
9147         return 1;
9148 }
9149
9150 /**
9151  * Release the fate resource.
9152  *
9153  * @param dev
9154  *   Pointer to Ethernet device.
9155  * @param handle
9156  *   Pointer to mlx5_flow_handle.
9157  */
9158 static void
9159 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
9160                                struct mlx5_flow_handle *handle)
9161 {
9162         if (!handle->rix_fate)
9163                 return;
9164         switch (handle->fate_action) {
9165         case MLX5_FLOW_FATE_DROP:
9166                 mlx5_hrxq_drop_release(dev);
9167                 break;
9168         case MLX5_FLOW_FATE_QUEUE:
9169                 mlx5_hrxq_release(dev, handle->rix_hrxq);
9170                 break;
9171         case MLX5_FLOW_FATE_JUMP:
9172                 flow_dv_jump_tbl_resource_release(dev, handle);
9173                 break;
9174         case MLX5_FLOW_FATE_PORT_ID:
9175                 flow_dv_port_id_action_resource_release(dev, handle);
9176                 break;
9177         case MLX5_FLOW_FATE_DEFAULT_MISS:
9178                 flow_dv_default_miss_resource_release(dev);
9179                 break;
9180         default:
9181                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
9182                 break;
9183         }
9184         handle->rix_fate = 0;
9185 }
9186
9187 /**
9188  * Remove the flow from the NIC but keeps it in memory.
9189  * Lock free, (mutex should be acquired by caller).
9190  *
9191  * @param[in] dev
9192  *   Pointer to Ethernet device.
9193  * @param[in, out] flow
9194  *   Pointer to flow structure.
9195  */
9196 static void
9197 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
9198 {
9199         struct mlx5_flow_handle *dh;
9200         uint32_t handle_idx;
9201         struct mlx5_priv *priv = dev->data->dev_private;
9202
9203         if (!flow)
9204                 return;
9205         handle_idx = flow->dev_handles;
9206         while (handle_idx) {
9207                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9208                                     handle_idx);
9209                 if (!dh)
9210                         return;
9211                 if (dh->drv_flow) {
9212                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
9213                         dh->drv_flow = NULL;
9214                 }
9215                 if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
9216                     dh->fate_action == MLX5_FLOW_FATE_QUEUE ||
9217                     dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
9218                         flow_dv_fate_resource_release(dev, dh);
9219                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
9220                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
9221                 handle_idx = dh->next.next;
9222         }
9223 }
9224
9225 /**
9226  * Remove the flow from the NIC and the memory.
9227  * Lock free, (mutex should be acquired by caller).
9228  *
9229  * @param[in] dev
9230  *   Pointer to the Ethernet device structure.
9231  * @param[in, out] flow
9232  *   Pointer to flow structure.
9233  */
9234 static void
9235 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
9236 {
9237         struct mlx5_flow_handle *dev_handle;
9238         struct mlx5_priv *priv = dev->data->dev_private;
9239
9240         if (!flow)
9241                 return;
9242         __flow_dv_remove(dev, flow);
9243         if (flow->counter) {
9244                 flow_dv_counter_release(dev, flow->counter);
9245                 flow->counter = 0;
9246         }
9247         if (flow->meter) {
9248                 struct mlx5_flow_meter *fm;
9249
9250                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
9251                                     flow->meter);
9252                 if (fm)
9253                         mlx5_flow_meter_detach(fm);
9254                 flow->meter = 0;
9255         }
9256         while (flow->dev_handles) {
9257                 uint32_t tmp_idx = flow->dev_handles;
9258
9259                 dev_handle = mlx5_ipool_get(priv->sh->ipool
9260                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
9261                 if (!dev_handle)
9262                         return;
9263                 flow->dev_handles = dev_handle->next.next;
9264                 if (dev_handle->dvh.matcher)
9265                         flow_dv_matcher_release(dev, dev_handle);
9266                 if (dev_handle->dvh.rix_encap_decap)
9267                         flow_dv_encap_decap_resource_release(dev, dev_handle);
9268                 if (dev_handle->dvh.modify_hdr)
9269                         flow_dv_modify_hdr_resource_release(dev_handle);
9270                 if (dev_handle->dvh.rix_push_vlan)
9271                         flow_dv_push_vlan_action_resource_release(dev,
9272                                                                   dev_handle);
9273                 if (dev_handle->dvh.rix_tag)
9274                         flow_dv_tag_release(dev,
9275                                             dev_handle->dvh.rix_tag);
9276                 flow_dv_fate_resource_release(dev, dev_handle);
9277                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9278                            tmp_idx);
9279         }
9280 }
9281
9282 /**
9283  * Query a dv flow  rule for its statistics via devx.
9284  *
9285  * @param[in] dev
9286  *   Pointer to Ethernet device.
9287  * @param[in] flow
9288  *   Pointer to the sub flow.
9289  * @param[out] data
9290  *   data retrieved by the query.
9291  * @param[out] error
9292  *   Perform verbose error reporting if not NULL.
9293  *
9294  * @return
9295  *   0 on success, a negative errno value otherwise and rte_errno is set.
9296  */
9297 static int
9298 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
9299                     void *data, struct rte_flow_error *error)
9300 {
9301         struct mlx5_priv *priv = dev->data->dev_private;
9302         struct rte_flow_query_count *qc = data;
9303
9304         if (!priv->config.devx)
9305                 return rte_flow_error_set(error, ENOTSUP,
9306                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9307                                           NULL,
9308                                           "counters are not supported");
9309         if (flow->counter) {
9310                 uint64_t pkts, bytes;
9311                 struct mlx5_flow_counter *cnt;
9312
9313                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
9314                                                  NULL);
9315                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
9316                                                &bytes);
9317
9318                 if (err)
9319                         return rte_flow_error_set(error, -err,
9320                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9321                                         NULL, "cannot read counters");
9322                 qc->hits_set = 1;
9323                 qc->bytes_set = 1;
9324                 qc->hits = pkts - cnt->hits;
9325                 qc->bytes = bytes - cnt->bytes;
9326                 if (qc->reset) {
9327                         cnt->hits = pkts;
9328                         cnt->bytes = bytes;
9329                 }
9330                 return 0;
9331         }
9332         return rte_flow_error_set(error, EINVAL,
9333                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9334                                   NULL,
9335                                   "counters are not available");
9336 }
9337
9338 /**
9339  * Query a flow.
9340  *
9341  * @see rte_flow_query()
9342  * @see rte_flow_ops
9343  */
9344 static int
9345 flow_dv_query(struct rte_eth_dev *dev,
9346               struct rte_flow *flow __rte_unused,
9347               const struct rte_flow_action *actions __rte_unused,
9348               void *data __rte_unused,
9349               struct rte_flow_error *error __rte_unused)
9350 {
9351         int ret = -EINVAL;
9352
9353         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
9354                 switch (actions->type) {
9355                 case RTE_FLOW_ACTION_TYPE_VOID:
9356                         break;
9357                 case RTE_FLOW_ACTION_TYPE_COUNT:
9358                         ret = flow_dv_query_count(dev, flow, data, error);
9359                         break;
9360                 default:
9361                         return rte_flow_error_set(error, ENOTSUP,
9362                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9363                                                   actions,
9364                                                   "action not supported");
9365                 }
9366         }
9367         return ret;
9368 }
9369
9370 /**
9371  * Destroy the meter table set.
9372  * Lock free, (mutex should be acquired by caller).
9373  *
9374  * @param[in] dev
9375  *   Pointer to Ethernet device.
9376  * @param[in] tbl
9377  *   Pointer to the meter table set.
9378  *
9379  * @return
9380  *   Always 0.
9381  */
9382 static int
9383 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
9384                         struct mlx5_meter_domains_infos *tbl)
9385 {
9386         struct mlx5_priv *priv = dev->data->dev_private;
9387         struct mlx5_meter_domains_infos *mtd =
9388                                 (struct mlx5_meter_domains_infos *)tbl;
9389
9390         if (!mtd || !priv->config.dv_flow_en)
9391                 return 0;
9392         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
9393                 claim_zero(mlx5_flow_os_destroy_flow
9394                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
9395         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
9396                 claim_zero(mlx5_flow_os_destroy_flow
9397                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
9398         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
9399                 claim_zero(mlx5_flow_os_destroy_flow
9400                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
9401         if (mtd->egress.color_matcher)
9402                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9403                            (mtd->egress.color_matcher));
9404         if (mtd->egress.any_matcher)
9405                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9406                            (mtd->egress.any_matcher));
9407         if (mtd->egress.tbl)
9408                 flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
9409         if (mtd->egress.sfx_tbl)
9410                 flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
9411         if (mtd->ingress.color_matcher)
9412                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9413                            (mtd->ingress.color_matcher));
9414         if (mtd->ingress.any_matcher)
9415                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9416                            (mtd->ingress.any_matcher));
9417         if (mtd->ingress.tbl)
9418                 flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
9419         if (mtd->ingress.sfx_tbl)
9420                 flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
9421         if (mtd->transfer.color_matcher)
9422                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9423                            (mtd->transfer.color_matcher));
9424         if (mtd->transfer.any_matcher)
9425                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9426                            (mtd->transfer.any_matcher));
9427         if (mtd->transfer.tbl)
9428                 flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
9429         if (mtd->transfer.sfx_tbl)
9430                 flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
9431         if (mtd->drop_actn)
9432                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
9433         mlx5_free(mtd);
9434         return 0;
9435 }
9436
9437 /* Number of meter flow actions, count and jump or count and drop. */
9438 #define METER_ACTIONS 2
9439
9440 /**
9441  * Create specify domain meter table and suffix table.
9442  *
9443  * @param[in] dev
9444  *   Pointer to Ethernet device.
9445  * @param[in,out] mtb
9446  *   Pointer to DV meter table set.
9447  * @param[in] egress
9448  *   Table attribute.
9449  * @param[in] transfer
9450  *   Table attribute.
9451  * @param[in] color_reg_c_idx
9452  *   Reg C index for color match.
9453  *
9454  * @return
9455  *   0 on success, -1 otherwise and rte_errno is set.
9456  */
9457 static int
9458 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
9459                            struct mlx5_meter_domains_infos *mtb,
9460                            uint8_t egress, uint8_t transfer,
9461                            uint32_t color_reg_c_idx)
9462 {
9463         struct mlx5_priv *priv = dev->data->dev_private;
9464         struct mlx5_dev_ctx_shared *sh = priv->sh;
9465         struct mlx5_flow_dv_match_params mask = {
9466                 .size = sizeof(mask.buf),
9467         };
9468         struct mlx5_flow_dv_match_params value = {
9469                 .size = sizeof(value.buf),
9470         };
9471         struct mlx5dv_flow_matcher_attr dv_attr = {
9472                 .type = IBV_FLOW_ATTR_NORMAL,
9473                 .priority = 0,
9474                 .match_criteria_enable = 0,
9475                 .match_mask = (void *)&mask,
9476         };
9477         void *actions[METER_ACTIONS];
9478         struct mlx5_meter_domain_info *dtb;
9479         struct rte_flow_error error;
9480         int i = 0;
9481         int ret;
9482
9483         if (transfer)
9484                 dtb = &mtb->transfer;
9485         else if (egress)
9486                 dtb = &mtb->egress;
9487         else
9488                 dtb = &mtb->ingress;
9489         /* Create the meter table with METER level. */
9490         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
9491                                             egress, transfer, &error);
9492         if (!dtb->tbl) {
9493                 DRV_LOG(ERR, "Failed to create meter policer table.");
9494                 return -1;
9495         }
9496         /* Create the meter suffix table with SUFFIX level. */
9497         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
9498                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
9499                                             egress, transfer, &error);
9500         if (!dtb->sfx_tbl) {
9501                 DRV_LOG(ERR, "Failed to create meter suffix table.");
9502                 return -1;
9503         }
9504         /* Create matchers, Any and Color. */
9505         dv_attr.priority = 3;
9506         dv_attr.match_criteria_enable = 0;
9507         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
9508                                                &dtb->any_matcher);
9509         if (ret) {
9510                 DRV_LOG(ERR, "Failed to create meter"
9511                              " policer default matcher.");
9512                 goto error_exit;
9513         }
9514         dv_attr.priority = 0;
9515         dv_attr.match_criteria_enable =
9516                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9517         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
9518                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
9519         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
9520                                                &dtb->color_matcher);
9521         if (ret) {
9522                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
9523                 goto error_exit;
9524         }
9525         if (mtb->count_actns[RTE_MTR_DROPPED])
9526                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
9527         actions[i++] = mtb->drop_actn;
9528         /* Default rule: lowest priority, match any, actions: drop. */
9529         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
9530                                        actions,
9531                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
9532         if (ret) {
9533                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
9534                 goto error_exit;
9535         }
9536         return 0;
9537 error_exit:
9538         return -1;
9539 }
9540
9541 /**
9542  * Create the needed meter and suffix tables.
9543  * Lock free, (mutex should be acquired by caller).
9544  *
9545  * @param[in] dev
9546  *   Pointer to Ethernet device.
9547  * @param[in] fm
9548  *   Pointer to the flow meter.
9549  *
9550  * @return
9551  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
9552  */
9553 static struct mlx5_meter_domains_infos *
9554 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
9555                        const struct mlx5_flow_meter *fm)
9556 {
9557         struct mlx5_priv *priv = dev->data->dev_private;
9558         struct mlx5_meter_domains_infos *mtb;
9559         int ret;
9560         int i;
9561
9562         if (!priv->mtr_en) {
9563                 rte_errno = ENOTSUP;
9564                 return NULL;
9565         }
9566         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
9567         if (!mtb) {
9568                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
9569                 return NULL;
9570         }
9571         /* Create meter count actions */
9572         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
9573                 struct mlx5_flow_counter *cnt;
9574                 if (!fm->policer_stats.cnt[i])
9575                         continue;
9576                 cnt = flow_dv_counter_get_by_idx(dev,
9577                       fm->policer_stats.cnt[i], NULL);
9578                 mtb->count_actns[i] = cnt->action;
9579         }
9580         /* Create drop action. */
9581         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
9582         if (ret) {
9583                 DRV_LOG(ERR, "Failed to create drop action.");
9584                 goto error_exit;
9585         }
9586         /* Egress meter table. */
9587         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
9588         if (ret) {
9589                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
9590                 goto error_exit;
9591         }
9592         /* Ingress meter table. */
9593         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
9594         if (ret) {
9595                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
9596                 goto error_exit;
9597         }
9598         /* FDB meter table. */
9599         if (priv->config.dv_esw_en) {
9600                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
9601                                                  priv->mtr_color_reg);
9602                 if (ret) {
9603                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
9604                         goto error_exit;
9605                 }
9606         }
9607         return mtb;
9608 error_exit:
9609         flow_dv_destroy_mtr_tbl(dev, mtb);
9610         return NULL;
9611 }
9612
9613 /**
9614  * Destroy domain policer rule.
9615  *
9616  * @param[in] dt
9617  *   Pointer to domain table.
9618  */
9619 static void
9620 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
9621 {
9622         int i;
9623
9624         for (i = 0; i < RTE_MTR_DROPPED; i++) {
9625                 if (dt->policer_rules[i]) {
9626                         claim_zero(mlx5_flow_os_destroy_flow
9627                                    (dt->policer_rules[i]));
9628                         dt->policer_rules[i] = NULL;
9629                 }
9630         }
9631         if (dt->jump_actn) {
9632                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
9633                 dt->jump_actn = NULL;
9634         }
9635 }
9636
9637 /**
9638  * Destroy policer rules.
9639  *
9640  * @param[in] dev
9641  *   Pointer to Ethernet device.
9642  * @param[in] fm
9643  *   Pointer to flow meter structure.
9644  * @param[in] attr
9645  *   Pointer to flow attributes.
9646  *
9647  * @return
9648  *   Always 0.
9649  */
9650 static int
9651 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
9652                               const struct mlx5_flow_meter *fm,
9653                               const struct rte_flow_attr *attr)
9654 {
9655         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
9656
9657         if (!mtb)
9658                 return 0;
9659         if (attr->egress)
9660                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
9661         if (attr->ingress)
9662                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
9663         if (attr->transfer)
9664                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
9665         return 0;
9666 }
9667
9668 /**
9669  * Create specify domain meter policer rule.
9670  *
9671  * @param[in] fm
9672  *   Pointer to flow meter structure.
9673  * @param[in] mtb
9674  *   Pointer to DV meter table set.
9675  * @param[in] mtr_reg_c
9676  *   Color match REG_C.
9677  *
9678  * @return
9679  *   0 on success, -1 otherwise.
9680  */
9681 static int
9682 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
9683                                     struct mlx5_meter_domain_info *dtb,
9684                                     uint8_t mtr_reg_c)
9685 {
9686         struct mlx5_flow_dv_match_params matcher = {
9687                 .size = sizeof(matcher.buf),
9688         };
9689         struct mlx5_flow_dv_match_params value = {
9690                 .size = sizeof(value.buf),
9691         };
9692         struct mlx5_meter_domains_infos *mtb = fm->mfts;
9693         void *actions[METER_ACTIONS];
9694         int i;
9695         int ret = 0;
9696
9697         /* Create jump action. */
9698         if (!dtb->jump_actn)
9699                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9700                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
9701         if (ret) {
9702                 DRV_LOG(ERR, "Failed to create policer jump action.");
9703                 goto error;
9704         }
9705         for (i = 0; i < RTE_MTR_DROPPED; i++) {
9706                 int j = 0;
9707
9708                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
9709                                        rte_col_2_mlx5_col(i), UINT8_MAX);
9710                 if (mtb->count_actns[i])
9711                         actions[j++] = mtb->count_actns[i];
9712                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
9713                         actions[j++] = mtb->drop_actn;
9714                 else
9715                         actions[j++] = dtb->jump_actn;
9716                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
9717                                                (void *)&value, j, actions,
9718                                                &dtb->policer_rules[i]);
9719                 if (ret) {
9720                         DRV_LOG(ERR, "Failed to create policer rule.");
9721                         goto error;
9722                 }
9723         }
9724         return 0;
9725 error:
9726         rte_errno = errno;
9727         return -1;
9728 }
9729
9730 /**
9731  * Create policer rules.
9732  *
9733  * @param[in] dev
9734  *   Pointer to Ethernet device.
9735  * @param[in] fm
9736  *   Pointer to flow meter structure.
9737  * @param[in] attr
9738  *   Pointer to flow attributes.
9739  *
9740  * @return
9741  *   0 on success, -1 otherwise.
9742  */
9743 static int
9744 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
9745                              struct mlx5_flow_meter *fm,
9746                              const struct rte_flow_attr *attr)
9747 {
9748         struct mlx5_priv *priv = dev->data->dev_private;
9749         struct mlx5_meter_domains_infos *mtb = fm->mfts;
9750         int ret;
9751
9752         if (attr->egress) {
9753                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
9754                                                 priv->mtr_color_reg);
9755                 if (ret) {
9756                         DRV_LOG(ERR, "Failed to create egress policer.");
9757                         goto error;
9758                 }
9759         }
9760         if (attr->ingress) {
9761                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
9762                                                 priv->mtr_color_reg);
9763                 if (ret) {
9764                         DRV_LOG(ERR, "Failed to create ingress policer.");
9765                         goto error;
9766                 }
9767         }
9768         if (attr->transfer) {
9769                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
9770                                                 priv->mtr_color_reg);
9771                 if (ret) {
9772                         DRV_LOG(ERR, "Failed to create transfer policer.");
9773                         goto error;
9774                 }
9775         }
9776         return 0;
9777 error:
9778         flow_dv_destroy_policer_rules(dev, fm, attr);
9779         return -1;
9780 }
9781
9782 /**
9783  * Query a devx counter.
9784  *
9785  * @param[in] dev
9786  *   Pointer to the Ethernet device structure.
9787  * @param[in] cnt
9788  *   Index to the flow counter.
9789  * @param[in] clear
9790  *   Set to clear the counter statistics.
9791  * @param[out] pkts
9792  *   The statistics value of packets.
9793  * @param[out] bytes
9794  *   The statistics value of bytes.
9795  *
9796  * @return
9797  *   0 on success, otherwise return -1.
9798  */
9799 static int
9800 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
9801                       uint64_t *pkts, uint64_t *bytes)
9802 {
9803         struct mlx5_priv *priv = dev->data->dev_private;
9804         struct mlx5_flow_counter *cnt;
9805         uint64_t inn_pkts, inn_bytes;
9806         int ret;
9807
9808         if (!priv->config.devx)
9809                 return -1;
9810
9811         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
9812         if (ret)
9813                 return -1;
9814         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
9815         *pkts = inn_pkts - cnt->hits;
9816         *bytes = inn_bytes - cnt->bytes;
9817         if (clear) {
9818                 cnt->hits = inn_pkts;
9819                 cnt->bytes = inn_bytes;
9820         }
9821         return 0;
9822 }
9823
9824 /**
9825  * Get aged-out flows.
9826  *
9827  * @param[in] dev
9828  *   Pointer to the Ethernet device structure.
9829  * @param[in] context
9830  *   The address of an array of pointers to the aged-out flows contexts.
9831  * @param[in] nb_contexts
9832  *   The length of context array pointers.
9833  * @param[out] error
9834  *   Perform verbose error reporting if not NULL. Initialized in case of
9835  *   error only.
9836  *
9837  * @return
9838  *   how many contexts get in success, otherwise negative errno value.
9839  *   if nb_contexts is 0, return the amount of all aged contexts.
9840  *   if nb_contexts is not 0 , return the amount of aged flows reported
9841  *   in the context array.
9842  * @note: only stub for now
9843  */
9844 static int
9845 flow_get_aged_flows(struct rte_eth_dev *dev,
9846                     void **context,
9847                     uint32_t nb_contexts,
9848                     struct rte_flow_error *error)
9849 {
9850         struct mlx5_priv *priv = dev->data->dev_private;
9851         struct mlx5_age_info *age_info;
9852         struct mlx5_age_param *age_param;
9853         struct mlx5_flow_counter *counter;
9854         int nb_flows = 0;
9855
9856         if (nb_contexts && !context)
9857                 return rte_flow_error_set(error, EINVAL,
9858                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9859                                           NULL,
9860                                           "Should assign at least one flow or"
9861                                           " context to get if nb_contexts != 0");
9862         age_info = GET_PORT_AGE_INFO(priv);
9863         rte_spinlock_lock(&age_info->aged_sl);
9864         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
9865                 nb_flows++;
9866                 if (nb_contexts) {
9867                         age_param = MLX5_CNT_TO_AGE(counter);
9868                         context[nb_flows - 1] = age_param->context;
9869                         if (!(--nb_contexts))
9870                                 break;
9871                 }
9872         }
9873         rte_spinlock_unlock(&age_info->aged_sl);
9874         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
9875         return nb_flows;
9876 }
9877
9878 /*
9879  * Mutex-protected thunk to lock-free  __flow_dv_translate().
9880  */
9881 static int
9882 flow_dv_translate(struct rte_eth_dev *dev,
9883                   struct mlx5_flow *dev_flow,
9884                   const struct rte_flow_attr *attr,
9885                   const struct rte_flow_item items[],
9886                   const struct rte_flow_action actions[],
9887                   struct rte_flow_error *error)
9888 {
9889         int ret;
9890
9891         flow_dv_shared_lock(dev);
9892         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
9893         flow_dv_shared_unlock(dev);
9894         return ret;
9895 }
9896
9897 /*
9898  * Mutex-protected thunk to lock-free  __flow_dv_apply().
9899  */
9900 static int
9901 flow_dv_apply(struct rte_eth_dev *dev,
9902               struct rte_flow *flow,
9903               struct rte_flow_error *error)
9904 {
9905         int ret;
9906
9907         flow_dv_shared_lock(dev);
9908         ret = __flow_dv_apply(dev, flow, error);
9909         flow_dv_shared_unlock(dev);
9910         return ret;
9911 }
9912
9913 /*
9914  * Mutex-protected thunk to lock-free __flow_dv_remove().
9915  */
9916 static void
9917 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
9918 {
9919         flow_dv_shared_lock(dev);
9920         __flow_dv_remove(dev, flow);
9921         flow_dv_shared_unlock(dev);
9922 }
9923
9924 /*
9925  * Mutex-protected thunk to lock-free __flow_dv_destroy().
9926  */
9927 static void
9928 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
9929 {
9930         flow_dv_shared_lock(dev);
9931         __flow_dv_destroy(dev, flow);
9932         flow_dv_shared_unlock(dev);
9933 }
9934
9935 /*
9936  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
9937  */
9938 static uint32_t
9939 flow_dv_counter_allocate(struct rte_eth_dev *dev)
9940 {
9941         uint32_t cnt;
9942
9943         flow_dv_shared_lock(dev);
9944         cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0);
9945         flow_dv_shared_unlock(dev);
9946         return cnt;
9947 }
9948
9949 /*
9950  * Mutex-protected thunk to lock-free flow_dv_counter_release().
9951  */
9952 static void
9953 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
9954 {
9955         flow_dv_shared_lock(dev);
9956         flow_dv_counter_release(dev, cnt);
9957         flow_dv_shared_unlock(dev);
9958 }
9959
9960 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
9961         .validate = flow_dv_validate,
9962         .prepare = flow_dv_prepare,
9963         .translate = flow_dv_translate,
9964         .apply = flow_dv_apply,
9965         .remove = flow_dv_remove,
9966         .destroy = flow_dv_destroy,
9967         .query = flow_dv_query,
9968         .create_mtr_tbls = flow_dv_create_mtr_tbl,
9969         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
9970         .create_policer_rules = flow_dv_create_policer_rules,
9971         .destroy_policer_rules = flow_dv_destroy_policer_rules,
9972         .counter_alloc = flow_dv_counter_allocate,
9973         .counter_free = flow_dv_counter_free,
9974         .counter_query = flow_dv_counter_query,
9975         .get_aged_flows = flow_get_aged_flows,
9976 };
9977
9978 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */