common/sfc_efx/base: replace PCI efsys macros with functions
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36
37 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38
39 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
40 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
41 #endif
42
43 #ifndef HAVE_MLX5DV_DR_ESWITCH
44 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
45 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
46 #endif
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR
50 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
51 #endif
52
53 /* VLAN header definitions */
54 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
55 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
56 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
57 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
58 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
59
60 union flow_dv_attr {
61         struct {
62                 uint32_t valid:1;
63                 uint32_t ipv4:1;
64                 uint32_t ipv6:1;
65                 uint32_t tcp:1;
66                 uint32_t udp:1;
67                 uint32_t reserved:27;
68         };
69         uint32_t attr;
70 };
71
72 static int
73 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
74                              struct mlx5_flow_tbl_resource *tbl);
75
76 static int
77 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev);
78
79 /**
80  * Initialize flow attributes structure according to flow items' types.
81  *
82  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
83  * mode. For tunnel mode, the items to be modified are the outermost ones.
84  *
85  * @param[in] item
86  *   Pointer to item specification.
87  * @param[out] attr
88  *   Pointer to flow attributes structure.
89  * @param[in] dev_flow
90  *   Pointer to the sub flow.
91  * @param[in] tunnel_decap
92  *   Whether action is after tunnel decapsulation.
93  */
94 static void
95 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
96                   struct mlx5_flow *dev_flow, bool tunnel_decap)
97 {
98         uint64_t layers = dev_flow->handle->layers;
99
100         /*
101          * If layers is already initialized, it means this dev_flow is the
102          * suffix flow, the layers flags is set by the prefix flow. Need to
103          * use the layer flags from prefix flow as the suffix flow may not
104          * have the user defined items as the flow is split.
105          */
106         if (layers) {
107                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
108                         attr->ipv4 = 1;
109                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
110                         attr->ipv6 = 1;
111                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
112                         attr->tcp = 1;
113                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
114                         attr->udp = 1;
115                 attr->valid = 1;
116                 return;
117         }
118         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
119                 uint8_t next_protocol = 0xff;
120                 switch (item->type) {
121                 case RTE_FLOW_ITEM_TYPE_GRE:
122                 case RTE_FLOW_ITEM_TYPE_NVGRE:
123                 case RTE_FLOW_ITEM_TYPE_VXLAN:
124                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
125                 case RTE_FLOW_ITEM_TYPE_GENEVE:
126                 case RTE_FLOW_ITEM_TYPE_MPLS:
127                         if (tunnel_decap)
128                                 attr->attr = 0;
129                         break;
130                 case RTE_FLOW_ITEM_TYPE_IPV4:
131                         if (!attr->ipv6)
132                                 attr->ipv4 = 1;
133                         if (item->mask != NULL &&
134                             ((const struct rte_flow_item_ipv4 *)
135                             item->mask)->hdr.next_proto_id)
136                                 next_protocol =
137                                     ((const struct rte_flow_item_ipv4 *)
138                                       (item->spec))->hdr.next_proto_id &
139                                     ((const struct rte_flow_item_ipv4 *)
140                                       (item->mask))->hdr.next_proto_id;
141                         if ((next_protocol == IPPROTO_IPIP ||
142                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
143                                 attr->attr = 0;
144                         break;
145                 case RTE_FLOW_ITEM_TYPE_IPV6:
146                         if (!attr->ipv4)
147                                 attr->ipv6 = 1;
148                         if (item->mask != NULL &&
149                             ((const struct rte_flow_item_ipv6 *)
150                             item->mask)->hdr.proto)
151                                 next_protocol =
152                                     ((const struct rte_flow_item_ipv6 *)
153                                       (item->spec))->hdr.proto &
154                                     ((const struct rte_flow_item_ipv6 *)
155                                       (item->mask))->hdr.proto;
156                         if ((next_protocol == IPPROTO_IPIP ||
157                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
158                                 attr->attr = 0;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_UDP:
161                         if (!attr->tcp)
162                                 attr->udp = 1;
163                         break;
164                 case RTE_FLOW_ITEM_TYPE_TCP:
165                         if (!attr->udp)
166                                 attr->tcp = 1;
167                         break;
168                 default:
169                         break;
170                 }
171         }
172         attr->valid = 1;
173 }
174
175 /**
176  * Convert rte_mtr_color to mlx5 color.
177  *
178  * @param[in] rcol
179  *   rte_mtr_color.
180  *
181  * @return
182  *   mlx5 color.
183  */
184 static int
185 rte_col_2_mlx5_col(enum rte_color rcol)
186 {
187         switch (rcol) {
188         case RTE_COLOR_GREEN:
189                 return MLX5_FLOW_COLOR_GREEN;
190         case RTE_COLOR_YELLOW:
191                 return MLX5_FLOW_COLOR_YELLOW;
192         case RTE_COLOR_RED:
193                 return MLX5_FLOW_COLOR_RED;
194         default:
195                 break;
196         }
197         return MLX5_FLOW_COLOR_UNDEFINED;
198 }
199
200 struct field_modify_info {
201         uint32_t size; /* Size of field in protocol header, in bytes. */
202         uint32_t offset; /* Offset of field in protocol header, in bytes. */
203         enum mlx5_modification_field id;
204 };
205
206 struct field_modify_info modify_eth[] = {
207         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
208         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
209         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
210         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
211         {0, 0, 0},
212 };
213
214 struct field_modify_info modify_vlan_out_first_vid[] = {
215         /* Size in bits !!! */
216         {12, 0, MLX5_MODI_OUT_FIRST_VID},
217         {0, 0, 0},
218 };
219
220 struct field_modify_info modify_ipv4[] = {
221         {1,  1, MLX5_MODI_OUT_IP_DSCP},
222         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
223         {4, 12, MLX5_MODI_OUT_SIPV4},
224         {4, 16, MLX5_MODI_OUT_DIPV4},
225         {0, 0, 0},
226 };
227
228 struct field_modify_info modify_ipv6[] = {
229         {1,  0, MLX5_MODI_OUT_IP_DSCP},
230         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
231         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
232         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
233         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
234         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
235         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
236         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
237         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
238         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
239         {0, 0, 0},
240 };
241
242 struct field_modify_info modify_udp[] = {
243         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
244         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_tcp[] = {
249         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
250         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
251         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
252         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
253         {0, 0, 0},
254 };
255
256 static void
257 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
258                           uint8_t next_protocol, uint64_t *item_flags,
259                           int *tunnel)
260 {
261         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
262                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
263         if (next_protocol == IPPROTO_IPIP) {
264                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
265                 *tunnel = 1;
266         }
267         if (next_protocol == IPPROTO_IPV6) {
268                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
269                 *tunnel = 1;
270         }
271 }
272
273 /**
274  * Acquire the synchronizing object to protect multithreaded access
275  * to shared dv context. Lock occurs only if context is actually
276  * shared, i.e. we have multiport IB device and representors are
277  * created.
278  *
279  * @param[in] dev
280  *   Pointer to the rte_eth_dev structure.
281  */
282 static void
283 flow_dv_shared_lock(struct rte_eth_dev *dev)
284 {
285         struct mlx5_priv *priv = dev->data->dev_private;
286         struct mlx5_dev_ctx_shared *sh = priv->sh;
287
288         if (sh->dv_refcnt > 1) {
289                 int ret;
290
291                 ret = pthread_mutex_lock(&sh->dv_mutex);
292                 MLX5_ASSERT(!ret);
293                 (void)ret;
294         }
295 }
296
297 static void
298 flow_dv_shared_unlock(struct rte_eth_dev *dev)
299 {
300         struct mlx5_priv *priv = dev->data->dev_private;
301         struct mlx5_dev_ctx_shared *sh = priv->sh;
302
303         if (sh->dv_refcnt > 1) {
304                 int ret;
305
306                 ret = pthread_mutex_unlock(&sh->dv_mutex);
307                 MLX5_ASSERT(!ret);
308                 (void)ret;
309         }
310 }
311
312 /* Update VLAN's VID/PCP based on input rte_flow_action.
313  *
314  * @param[in] action
315  *   Pointer to struct rte_flow_action.
316  * @param[out] vlan
317  *   Pointer to struct rte_vlan_hdr.
318  */
319 static void
320 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
321                          struct rte_vlan_hdr *vlan)
322 {
323         uint16_t vlan_tci;
324         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
325                 vlan_tci =
326                     ((const struct rte_flow_action_of_set_vlan_pcp *)
327                                                action->conf)->vlan_pcp;
328                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
329                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
330                 vlan->vlan_tci |= vlan_tci;
331         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
332                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
333                 vlan->vlan_tci |= rte_be_to_cpu_16
334                     (((const struct rte_flow_action_of_set_vlan_vid *)
335                                              action->conf)->vlan_vid);
336         }
337 }
338
339 /**
340  * Fetch 1, 2, 3 or 4 byte field from the byte array
341  * and return as unsigned integer in host-endian format.
342  *
343  * @param[in] data
344  *   Pointer to data array.
345  * @param[in] size
346  *   Size of field to extract.
347  *
348  * @return
349  *   converted field in host endian format.
350  */
351 static inline uint32_t
352 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
353 {
354         uint32_t ret;
355
356         switch (size) {
357         case 1:
358                 ret = *data;
359                 break;
360         case 2:
361                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
362                 break;
363         case 3:
364                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
365                 ret = (ret << 8) | *(data + sizeof(uint16_t));
366                 break;
367         case 4:
368                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
369                 break;
370         default:
371                 MLX5_ASSERT(false);
372                 ret = 0;
373                 break;
374         }
375         return ret;
376 }
377
378 /**
379  * Convert modify-header action to DV specification.
380  *
381  * Data length of each action is determined by provided field description
382  * and the item mask. Data bit offset and width of each action is determined
383  * by provided item mask.
384  *
385  * @param[in] item
386  *   Pointer to item specification.
387  * @param[in] field
388  *   Pointer to field modification information.
389  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
390  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
391  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
392  * @param[in] dcopy
393  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
394  *   Negative offset value sets the same offset as source offset.
395  *   size field is ignored, value is taken from source field.
396  * @param[in,out] resource
397  *   Pointer to the modify-header resource.
398  * @param[in] type
399  *   Type of modification.
400  * @param[out] error
401  *   Pointer to the error structure.
402  *
403  * @return
404  *   0 on success, a negative errno value otherwise and rte_errno is set.
405  */
406 static int
407 flow_dv_convert_modify_action(struct rte_flow_item *item,
408                               struct field_modify_info *field,
409                               struct field_modify_info *dcopy,
410                               struct mlx5_flow_dv_modify_hdr_resource *resource,
411                               uint32_t type, struct rte_flow_error *error)
412 {
413         uint32_t i = resource->actions_num;
414         struct mlx5_modification_cmd *actions = resource->actions;
415
416         /*
417          * The item and mask are provided in big-endian format.
418          * The fields should be presented as in big-endian format either.
419          * Mask must be always present, it defines the actual field width.
420          */
421         MLX5_ASSERT(item->mask);
422         MLX5_ASSERT(field->size);
423         do {
424                 unsigned int size_b;
425                 unsigned int off_b;
426                 uint32_t mask;
427                 uint32_t data;
428
429                 if (i >= MLX5_MAX_MODIFY_NUM)
430                         return rte_flow_error_set(error, EINVAL,
431                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
432                                  "too many items to modify");
433                 /* Fetch variable byte size mask from the array. */
434                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
435                                            field->offset, field->size);
436                 if (!mask) {
437                         ++field;
438                         continue;
439                 }
440                 /* Deduce actual data width in bits from mask value. */
441                 off_b = rte_bsf32(mask);
442                 size_b = sizeof(uint32_t) * CHAR_BIT -
443                          off_b - __builtin_clz(mask);
444                 MLX5_ASSERT(size_b);
445                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
446                 actions[i] = (struct mlx5_modification_cmd) {
447                         .action_type = type,
448                         .field = field->id,
449                         .offset = off_b,
450                         .length = size_b,
451                 };
452                 /* Convert entire record to expected big-endian format. */
453                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
454                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
455                         MLX5_ASSERT(dcopy);
456                         actions[i].dst_field = dcopy->id;
457                         actions[i].dst_offset =
458                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
459                         /* Convert entire record to big-endian format. */
460                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
461                 } else {
462                         MLX5_ASSERT(item->spec);
463                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
464                                                    field->offset, field->size);
465                         /* Shift out the trailing masked bits from data. */
466                         data = (data & mask) >> off_b;
467                         actions[i].data1 = rte_cpu_to_be_32(data);
468                 }
469                 ++i;
470                 ++field;
471         } while (field->size);
472         if (resource->actions_num == i)
473                 return rte_flow_error_set(error, EINVAL,
474                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
475                                           "invalid modification flow item");
476         resource->actions_num = i;
477         return 0;
478 }
479
480 /**
481  * Convert modify-header set IPv4 address action to DV specification.
482  *
483  * @param[in,out] resource
484  *   Pointer to the modify-header resource.
485  * @param[in] action
486  *   Pointer to action specification.
487  * @param[out] error
488  *   Pointer to the error structure.
489  *
490  * @return
491  *   0 on success, a negative errno value otherwise and rte_errno is set.
492  */
493 static int
494 flow_dv_convert_action_modify_ipv4
495                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
496                          const struct rte_flow_action *action,
497                          struct rte_flow_error *error)
498 {
499         const struct rte_flow_action_set_ipv4 *conf =
500                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
501         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
502         struct rte_flow_item_ipv4 ipv4;
503         struct rte_flow_item_ipv4 ipv4_mask;
504
505         memset(&ipv4, 0, sizeof(ipv4));
506         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
507         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
508                 ipv4.hdr.src_addr = conf->ipv4_addr;
509                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
510         } else {
511                 ipv4.hdr.dst_addr = conf->ipv4_addr;
512                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
513         }
514         item.spec = &ipv4;
515         item.mask = &ipv4_mask;
516         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
517                                              MLX5_MODIFICATION_TYPE_SET, error);
518 }
519
520 /**
521  * Convert modify-header set IPv6 address action to DV specification.
522  *
523  * @param[in,out] resource
524  *   Pointer to the modify-header resource.
525  * @param[in] action
526  *   Pointer to action specification.
527  * @param[out] error
528  *   Pointer to the error structure.
529  *
530  * @return
531  *   0 on success, a negative errno value otherwise and rte_errno is set.
532  */
533 static int
534 flow_dv_convert_action_modify_ipv6
535                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
536                          const struct rte_flow_action *action,
537                          struct rte_flow_error *error)
538 {
539         const struct rte_flow_action_set_ipv6 *conf =
540                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
541         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
542         struct rte_flow_item_ipv6 ipv6;
543         struct rte_flow_item_ipv6 ipv6_mask;
544
545         memset(&ipv6, 0, sizeof(ipv6));
546         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
547         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
548                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
549                        sizeof(ipv6.hdr.src_addr));
550                 memcpy(&ipv6_mask.hdr.src_addr,
551                        &rte_flow_item_ipv6_mask.hdr.src_addr,
552                        sizeof(ipv6.hdr.src_addr));
553         } else {
554                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
555                        sizeof(ipv6.hdr.dst_addr));
556                 memcpy(&ipv6_mask.hdr.dst_addr,
557                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
558                        sizeof(ipv6.hdr.dst_addr));
559         }
560         item.spec = &ipv6;
561         item.mask = &ipv6_mask;
562         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
563                                              MLX5_MODIFICATION_TYPE_SET, error);
564 }
565
566 /**
567  * Convert modify-header set MAC address action to DV specification.
568  *
569  * @param[in,out] resource
570  *   Pointer to the modify-header resource.
571  * @param[in] action
572  *   Pointer to action specification.
573  * @param[out] error
574  *   Pointer to the error structure.
575  *
576  * @return
577  *   0 on success, a negative errno value otherwise and rte_errno is set.
578  */
579 static int
580 flow_dv_convert_action_modify_mac
581                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
582                          const struct rte_flow_action *action,
583                          struct rte_flow_error *error)
584 {
585         const struct rte_flow_action_set_mac *conf =
586                 (const struct rte_flow_action_set_mac *)(action->conf);
587         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
588         struct rte_flow_item_eth eth;
589         struct rte_flow_item_eth eth_mask;
590
591         memset(&eth, 0, sizeof(eth));
592         memset(&eth_mask, 0, sizeof(eth_mask));
593         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
594                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
595                        sizeof(eth.src.addr_bytes));
596                 memcpy(&eth_mask.src.addr_bytes,
597                        &rte_flow_item_eth_mask.src.addr_bytes,
598                        sizeof(eth_mask.src.addr_bytes));
599         } else {
600                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
601                        sizeof(eth.dst.addr_bytes));
602                 memcpy(&eth_mask.dst.addr_bytes,
603                        &rte_flow_item_eth_mask.dst.addr_bytes,
604                        sizeof(eth_mask.dst.addr_bytes));
605         }
606         item.spec = &eth;
607         item.mask = &eth_mask;
608         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
609                                              MLX5_MODIFICATION_TYPE_SET, error);
610 }
611
612 /**
613  * Convert modify-header set VLAN VID action to DV specification.
614  *
615  * @param[in,out] resource
616  *   Pointer to the modify-header resource.
617  * @param[in] action
618  *   Pointer to action specification.
619  * @param[out] error
620  *   Pointer to the error structure.
621  *
622  * @return
623  *   0 on success, a negative errno value otherwise and rte_errno is set.
624  */
625 static int
626 flow_dv_convert_action_modify_vlan_vid
627                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
628                          const struct rte_flow_action *action,
629                          struct rte_flow_error *error)
630 {
631         const struct rte_flow_action_of_set_vlan_vid *conf =
632                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
633         int i = resource->actions_num;
634         struct mlx5_modification_cmd *actions = resource->actions;
635         struct field_modify_info *field = modify_vlan_out_first_vid;
636
637         if (i >= MLX5_MAX_MODIFY_NUM)
638                 return rte_flow_error_set(error, EINVAL,
639                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
640                          "too many items to modify");
641         actions[i] = (struct mlx5_modification_cmd) {
642                 .action_type = MLX5_MODIFICATION_TYPE_SET,
643                 .field = field->id,
644                 .length = field->size,
645                 .offset = field->offset,
646         };
647         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
648         actions[i].data1 = conf->vlan_vid;
649         actions[i].data1 = actions[i].data1 << 16;
650         resource->actions_num = ++i;
651         return 0;
652 }
653
654 /**
655  * Convert modify-header set TP action to DV specification.
656  *
657  * @param[in,out] resource
658  *   Pointer to the modify-header resource.
659  * @param[in] action
660  *   Pointer to action specification.
661  * @param[in] items
662  *   Pointer to rte_flow_item objects list.
663  * @param[in] attr
664  *   Pointer to flow attributes structure.
665  * @param[in] dev_flow
666  *   Pointer to the sub flow.
667  * @param[in] tunnel_decap
668  *   Whether action is after tunnel decapsulation.
669  * @param[out] error
670  *   Pointer to the error structure.
671  *
672  * @return
673  *   0 on success, a negative errno value otherwise and rte_errno is set.
674  */
675 static int
676 flow_dv_convert_action_modify_tp
677                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
678                          const struct rte_flow_action *action,
679                          const struct rte_flow_item *items,
680                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
681                          bool tunnel_decap, struct rte_flow_error *error)
682 {
683         const struct rte_flow_action_set_tp *conf =
684                 (const struct rte_flow_action_set_tp *)(action->conf);
685         struct rte_flow_item item;
686         struct rte_flow_item_udp udp;
687         struct rte_flow_item_udp udp_mask;
688         struct rte_flow_item_tcp tcp;
689         struct rte_flow_item_tcp tcp_mask;
690         struct field_modify_info *field;
691
692         if (!attr->valid)
693                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
694         if (attr->udp) {
695                 memset(&udp, 0, sizeof(udp));
696                 memset(&udp_mask, 0, sizeof(udp_mask));
697                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
698                         udp.hdr.src_port = conf->port;
699                         udp_mask.hdr.src_port =
700                                         rte_flow_item_udp_mask.hdr.src_port;
701                 } else {
702                         udp.hdr.dst_port = conf->port;
703                         udp_mask.hdr.dst_port =
704                                         rte_flow_item_udp_mask.hdr.dst_port;
705                 }
706                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
707                 item.spec = &udp;
708                 item.mask = &udp_mask;
709                 field = modify_udp;
710         } else {
711                 MLX5_ASSERT(attr->tcp);
712                 memset(&tcp, 0, sizeof(tcp));
713                 memset(&tcp_mask, 0, sizeof(tcp_mask));
714                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
715                         tcp.hdr.src_port = conf->port;
716                         tcp_mask.hdr.src_port =
717                                         rte_flow_item_tcp_mask.hdr.src_port;
718                 } else {
719                         tcp.hdr.dst_port = conf->port;
720                         tcp_mask.hdr.dst_port =
721                                         rte_flow_item_tcp_mask.hdr.dst_port;
722                 }
723                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
724                 item.spec = &tcp;
725                 item.mask = &tcp_mask;
726                 field = modify_tcp;
727         }
728         return flow_dv_convert_modify_action(&item, field, NULL, resource,
729                                              MLX5_MODIFICATION_TYPE_SET, error);
730 }
731
732 /**
733  * Convert modify-header set TTL action to DV specification.
734  *
735  * @param[in,out] resource
736  *   Pointer to the modify-header resource.
737  * @param[in] action
738  *   Pointer to action specification.
739  * @param[in] items
740  *   Pointer to rte_flow_item objects list.
741  * @param[in] attr
742  *   Pointer to flow attributes structure.
743  * @param[in] dev_flow
744  *   Pointer to the sub flow.
745  * @param[in] tunnel_decap
746  *   Whether action is after tunnel decapsulation.
747  * @param[out] error
748  *   Pointer to the error structure.
749  *
750  * @return
751  *   0 on success, a negative errno value otherwise and rte_errno is set.
752  */
753 static int
754 flow_dv_convert_action_modify_ttl
755                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
756                          const struct rte_flow_action *action,
757                          const struct rte_flow_item *items,
758                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
759                          bool tunnel_decap, struct rte_flow_error *error)
760 {
761         const struct rte_flow_action_set_ttl *conf =
762                 (const struct rte_flow_action_set_ttl *)(action->conf);
763         struct rte_flow_item item;
764         struct rte_flow_item_ipv4 ipv4;
765         struct rte_flow_item_ipv4 ipv4_mask;
766         struct rte_flow_item_ipv6 ipv6;
767         struct rte_flow_item_ipv6 ipv6_mask;
768         struct field_modify_info *field;
769
770         if (!attr->valid)
771                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
772         if (attr->ipv4) {
773                 memset(&ipv4, 0, sizeof(ipv4));
774                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
775                 ipv4.hdr.time_to_live = conf->ttl_value;
776                 ipv4_mask.hdr.time_to_live = 0xFF;
777                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
778                 item.spec = &ipv4;
779                 item.mask = &ipv4_mask;
780                 field = modify_ipv4;
781         } else {
782                 MLX5_ASSERT(attr->ipv6);
783                 memset(&ipv6, 0, sizeof(ipv6));
784                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
785                 ipv6.hdr.hop_limits = conf->ttl_value;
786                 ipv6_mask.hdr.hop_limits = 0xFF;
787                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
788                 item.spec = &ipv6;
789                 item.mask = &ipv6_mask;
790                 field = modify_ipv6;
791         }
792         return flow_dv_convert_modify_action(&item, field, NULL, resource,
793                                              MLX5_MODIFICATION_TYPE_SET, error);
794 }
795
796 /**
797  * Convert modify-header decrement TTL action to DV specification.
798  *
799  * @param[in,out] resource
800  *   Pointer to the modify-header resource.
801  * @param[in] action
802  *   Pointer to action specification.
803  * @param[in] items
804  *   Pointer to rte_flow_item objects list.
805  * @param[in] attr
806  *   Pointer to flow attributes structure.
807  * @param[in] dev_flow
808  *   Pointer to the sub flow.
809  * @param[in] tunnel_decap
810  *   Whether action is after tunnel decapsulation.
811  * @param[out] error
812  *   Pointer to the error structure.
813  *
814  * @return
815  *   0 on success, a negative errno value otherwise and rte_errno is set.
816  */
817 static int
818 flow_dv_convert_action_modify_dec_ttl
819                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
820                          const struct rte_flow_item *items,
821                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
822                          bool tunnel_decap, struct rte_flow_error *error)
823 {
824         struct rte_flow_item item;
825         struct rte_flow_item_ipv4 ipv4;
826         struct rte_flow_item_ipv4 ipv4_mask;
827         struct rte_flow_item_ipv6 ipv6;
828         struct rte_flow_item_ipv6 ipv6_mask;
829         struct field_modify_info *field;
830
831         if (!attr->valid)
832                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
833         if (attr->ipv4) {
834                 memset(&ipv4, 0, sizeof(ipv4));
835                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
836                 ipv4.hdr.time_to_live = 0xFF;
837                 ipv4_mask.hdr.time_to_live = 0xFF;
838                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
839                 item.spec = &ipv4;
840                 item.mask = &ipv4_mask;
841                 field = modify_ipv4;
842         } else {
843                 MLX5_ASSERT(attr->ipv6);
844                 memset(&ipv6, 0, sizeof(ipv6));
845                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
846                 ipv6.hdr.hop_limits = 0xFF;
847                 ipv6_mask.hdr.hop_limits = 0xFF;
848                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
849                 item.spec = &ipv6;
850                 item.mask = &ipv6_mask;
851                 field = modify_ipv6;
852         }
853         return flow_dv_convert_modify_action(&item, field, NULL, resource,
854                                              MLX5_MODIFICATION_TYPE_ADD, error);
855 }
856
857 /**
858  * Convert modify-header increment/decrement TCP Sequence number
859  * to DV specification.
860  *
861  * @param[in,out] resource
862  *   Pointer to the modify-header resource.
863  * @param[in] action
864  *   Pointer to action specification.
865  * @param[out] error
866  *   Pointer to the error structure.
867  *
868  * @return
869  *   0 on success, a negative errno value otherwise and rte_errno is set.
870  */
871 static int
872 flow_dv_convert_action_modify_tcp_seq
873                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
874                          const struct rte_flow_action *action,
875                          struct rte_flow_error *error)
876 {
877         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
878         uint64_t value = rte_be_to_cpu_32(*conf);
879         struct rte_flow_item item;
880         struct rte_flow_item_tcp tcp;
881         struct rte_flow_item_tcp tcp_mask;
882
883         memset(&tcp, 0, sizeof(tcp));
884         memset(&tcp_mask, 0, sizeof(tcp_mask));
885         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
886                 /*
887                  * The HW has no decrement operation, only increment operation.
888                  * To simulate decrement X from Y using increment operation
889                  * we need to add UINT32_MAX X times to Y.
890                  * Each adding of UINT32_MAX decrements Y by 1.
891                  */
892                 value *= UINT32_MAX;
893         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
894         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
895         item.type = RTE_FLOW_ITEM_TYPE_TCP;
896         item.spec = &tcp;
897         item.mask = &tcp_mask;
898         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
899                                              MLX5_MODIFICATION_TYPE_ADD, error);
900 }
901
902 /**
903  * Convert modify-header increment/decrement TCP Acknowledgment number
904  * to DV specification.
905  *
906  * @param[in,out] resource
907  *   Pointer to the modify-header resource.
908  * @param[in] action
909  *   Pointer to action specification.
910  * @param[out] error
911  *   Pointer to the error structure.
912  *
913  * @return
914  *   0 on success, a negative errno value otherwise and rte_errno is set.
915  */
916 static int
917 flow_dv_convert_action_modify_tcp_ack
918                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
919                          const struct rte_flow_action *action,
920                          struct rte_flow_error *error)
921 {
922         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
923         uint64_t value = rte_be_to_cpu_32(*conf);
924         struct rte_flow_item item;
925         struct rte_flow_item_tcp tcp;
926         struct rte_flow_item_tcp tcp_mask;
927
928         memset(&tcp, 0, sizeof(tcp));
929         memset(&tcp_mask, 0, sizeof(tcp_mask));
930         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
931                 /*
932                  * The HW has no decrement operation, only increment operation.
933                  * To simulate decrement X from Y using increment operation
934                  * we need to add UINT32_MAX X times to Y.
935                  * Each adding of UINT32_MAX decrements Y by 1.
936                  */
937                 value *= UINT32_MAX;
938         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
939         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
940         item.type = RTE_FLOW_ITEM_TYPE_TCP;
941         item.spec = &tcp;
942         item.mask = &tcp_mask;
943         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
944                                              MLX5_MODIFICATION_TYPE_ADD, error);
945 }
946
947 static enum mlx5_modification_field reg_to_field[] = {
948         [REG_NON] = MLX5_MODI_OUT_NONE,
949         [REG_A] = MLX5_MODI_META_DATA_REG_A,
950         [REG_B] = MLX5_MODI_META_DATA_REG_B,
951         [REG_C_0] = MLX5_MODI_META_REG_C_0,
952         [REG_C_1] = MLX5_MODI_META_REG_C_1,
953         [REG_C_2] = MLX5_MODI_META_REG_C_2,
954         [REG_C_3] = MLX5_MODI_META_REG_C_3,
955         [REG_C_4] = MLX5_MODI_META_REG_C_4,
956         [REG_C_5] = MLX5_MODI_META_REG_C_5,
957         [REG_C_6] = MLX5_MODI_META_REG_C_6,
958         [REG_C_7] = MLX5_MODI_META_REG_C_7,
959 };
960
961 /**
962  * Convert register set to DV specification.
963  *
964  * @param[in,out] resource
965  *   Pointer to the modify-header resource.
966  * @param[in] action
967  *   Pointer to action specification.
968  * @param[out] error
969  *   Pointer to the error structure.
970  *
971  * @return
972  *   0 on success, a negative errno value otherwise and rte_errno is set.
973  */
974 static int
975 flow_dv_convert_action_set_reg
976                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
977                          const struct rte_flow_action *action,
978                          struct rte_flow_error *error)
979 {
980         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
981         struct mlx5_modification_cmd *actions = resource->actions;
982         uint32_t i = resource->actions_num;
983
984         if (i >= MLX5_MAX_MODIFY_NUM)
985                 return rte_flow_error_set(error, EINVAL,
986                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
987                                           "too many items to modify");
988         MLX5_ASSERT(conf->id != REG_NON);
989         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
990         actions[i] = (struct mlx5_modification_cmd) {
991                 .action_type = MLX5_MODIFICATION_TYPE_SET,
992                 .field = reg_to_field[conf->id],
993         };
994         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
995         actions[i].data1 = rte_cpu_to_be_32(conf->data);
996         ++i;
997         resource->actions_num = i;
998         return 0;
999 }
1000
1001 /**
1002  * Convert SET_TAG action to DV specification.
1003  *
1004  * @param[in] dev
1005  *   Pointer to the rte_eth_dev structure.
1006  * @param[in,out] resource
1007  *   Pointer to the modify-header resource.
1008  * @param[in] conf
1009  *   Pointer to action specification.
1010  * @param[out] error
1011  *   Pointer to the error structure.
1012  *
1013  * @return
1014  *   0 on success, a negative errno value otherwise and rte_errno is set.
1015  */
1016 static int
1017 flow_dv_convert_action_set_tag
1018                         (struct rte_eth_dev *dev,
1019                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1020                          const struct rte_flow_action_set_tag *conf,
1021                          struct rte_flow_error *error)
1022 {
1023         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1024         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1025         struct rte_flow_item item = {
1026                 .spec = &data,
1027                 .mask = &mask,
1028         };
1029         struct field_modify_info reg_c_x[] = {
1030                 [1] = {0, 0, 0},
1031         };
1032         enum mlx5_modification_field reg_type;
1033         int ret;
1034
1035         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1036         if (ret < 0)
1037                 return ret;
1038         MLX5_ASSERT(ret != REG_NON);
1039         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1040         reg_type = reg_to_field[ret];
1041         MLX5_ASSERT(reg_type > 0);
1042         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1043         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1044                                              MLX5_MODIFICATION_TYPE_SET, error);
1045 }
1046
1047 /**
1048  * Convert internal COPY_REG action to DV specification.
1049  *
1050  * @param[in] dev
1051  *   Pointer to the rte_eth_dev structure.
1052  * @param[in,out] res
1053  *   Pointer to the modify-header resource.
1054  * @param[in] action
1055  *   Pointer to action specification.
1056  * @param[out] error
1057  *   Pointer to the error structure.
1058  *
1059  * @return
1060  *   0 on success, a negative errno value otherwise and rte_errno is set.
1061  */
1062 static int
1063 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1064                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1065                                  const struct rte_flow_action *action,
1066                                  struct rte_flow_error *error)
1067 {
1068         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1069         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1070         struct rte_flow_item item = {
1071                 .spec = NULL,
1072                 .mask = &mask,
1073         };
1074         struct field_modify_info reg_src[] = {
1075                 {4, 0, reg_to_field[conf->src]},
1076                 {0, 0, 0},
1077         };
1078         struct field_modify_info reg_dst = {
1079                 .offset = 0,
1080                 .id = reg_to_field[conf->dst],
1081         };
1082         /* Adjust reg_c[0] usage according to reported mask. */
1083         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1084                 struct mlx5_priv *priv = dev->data->dev_private;
1085                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1086
1087                 MLX5_ASSERT(reg_c0);
1088                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1089                 if (conf->dst == REG_C_0) {
1090                         /* Copy to reg_c[0], within mask only. */
1091                         reg_dst.offset = rte_bsf32(reg_c0);
1092                         /*
1093                          * Mask is ignoring the enianness, because
1094                          * there is no conversion in datapath.
1095                          */
1096 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1097                         /* Copy from destination lower bits to reg_c[0]. */
1098                         mask = reg_c0 >> reg_dst.offset;
1099 #else
1100                         /* Copy from destination upper bits to reg_c[0]. */
1101                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1102                                           rte_fls_u32(reg_c0));
1103 #endif
1104                 } else {
1105                         mask = rte_cpu_to_be_32(reg_c0);
1106 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1107                         /* Copy from reg_c[0] to destination lower bits. */
1108                         reg_dst.offset = 0;
1109 #else
1110                         /* Copy from reg_c[0] to destination upper bits. */
1111                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1112                                          (rte_fls_u32(reg_c0) -
1113                                           rte_bsf32(reg_c0));
1114 #endif
1115                 }
1116         }
1117         return flow_dv_convert_modify_action(&item,
1118                                              reg_src, &reg_dst, res,
1119                                              MLX5_MODIFICATION_TYPE_COPY,
1120                                              error);
1121 }
1122
1123 /**
1124  * Convert MARK action to DV specification. This routine is used
1125  * in extensive metadata only and requires metadata register to be
1126  * handled. In legacy mode hardware tag resource is engaged.
1127  *
1128  * @param[in] dev
1129  *   Pointer to the rte_eth_dev structure.
1130  * @param[in] conf
1131  *   Pointer to MARK action specification.
1132  * @param[in,out] resource
1133  *   Pointer to the modify-header resource.
1134  * @param[out] error
1135  *   Pointer to the error structure.
1136  *
1137  * @return
1138  *   0 on success, a negative errno value otherwise and rte_errno is set.
1139  */
1140 static int
1141 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1142                             const struct rte_flow_action_mark *conf,
1143                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1144                             struct rte_flow_error *error)
1145 {
1146         struct mlx5_priv *priv = dev->data->dev_private;
1147         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1148                                            priv->sh->dv_mark_mask);
1149         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1150         struct rte_flow_item item = {
1151                 .spec = &data,
1152                 .mask = &mask,
1153         };
1154         struct field_modify_info reg_c_x[] = {
1155                 [1] = {0, 0, 0},
1156         };
1157         int reg;
1158
1159         if (!mask)
1160                 return rte_flow_error_set(error, EINVAL,
1161                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1162                                           NULL, "zero mark action mask");
1163         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1164         if (reg < 0)
1165                 return reg;
1166         MLX5_ASSERT(reg > 0);
1167         if (reg == REG_C_0) {
1168                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1169                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1170
1171                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1172                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1173                 mask = rte_cpu_to_be_32(mask << shl_c0);
1174         }
1175         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1176         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1177                                              MLX5_MODIFICATION_TYPE_SET, error);
1178 }
1179
1180 /**
1181  * Get metadata register index for specified steering domain.
1182  *
1183  * @param[in] dev
1184  *   Pointer to the rte_eth_dev structure.
1185  * @param[in] attr
1186  *   Attributes of flow to determine steering domain.
1187  * @param[out] error
1188  *   Pointer to the error structure.
1189  *
1190  * @return
1191  *   positive index on success, a negative errno value otherwise
1192  *   and rte_errno is set.
1193  */
1194 static enum modify_reg
1195 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1196                          const struct rte_flow_attr *attr,
1197                          struct rte_flow_error *error)
1198 {
1199         int reg =
1200                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1201                                           MLX5_METADATA_FDB :
1202                                             attr->egress ?
1203                                             MLX5_METADATA_TX :
1204                                             MLX5_METADATA_RX, 0, error);
1205         if (reg < 0)
1206                 return rte_flow_error_set(error,
1207                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1208                                           NULL, "unavailable "
1209                                           "metadata register");
1210         return reg;
1211 }
1212
1213 /**
1214  * Convert SET_META action to DV specification.
1215  *
1216  * @param[in] dev
1217  *   Pointer to the rte_eth_dev structure.
1218  * @param[in,out] resource
1219  *   Pointer to the modify-header resource.
1220  * @param[in] attr
1221  *   Attributes of flow that includes this item.
1222  * @param[in] conf
1223  *   Pointer to action specification.
1224  * @param[out] error
1225  *   Pointer to the error structure.
1226  *
1227  * @return
1228  *   0 on success, a negative errno value otherwise and rte_errno is set.
1229  */
1230 static int
1231 flow_dv_convert_action_set_meta
1232                         (struct rte_eth_dev *dev,
1233                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1234                          const struct rte_flow_attr *attr,
1235                          const struct rte_flow_action_set_meta *conf,
1236                          struct rte_flow_error *error)
1237 {
1238         uint32_t data = conf->data;
1239         uint32_t mask = conf->mask;
1240         struct rte_flow_item item = {
1241                 .spec = &data,
1242                 .mask = &mask,
1243         };
1244         struct field_modify_info reg_c_x[] = {
1245                 [1] = {0, 0, 0},
1246         };
1247         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1248
1249         if (reg < 0)
1250                 return reg;
1251         /*
1252          * In datapath code there is no endianness
1253          * coversions for perfromance reasons, all
1254          * pattern conversions are done in rte_flow.
1255          */
1256         if (reg == REG_C_0) {
1257                 struct mlx5_priv *priv = dev->data->dev_private;
1258                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1259                 uint32_t shl_c0;
1260
1261                 MLX5_ASSERT(msk_c0);
1262 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1263                 shl_c0 = rte_bsf32(msk_c0);
1264 #else
1265                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1266 #endif
1267                 mask <<= shl_c0;
1268                 data <<= shl_c0;
1269                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1270         }
1271         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1272         /* The routine expects parameters in memory as big-endian ones. */
1273         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1274                                              MLX5_MODIFICATION_TYPE_SET, error);
1275 }
1276
1277 /**
1278  * Convert modify-header set IPv4 DSCP action to DV specification.
1279  *
1280  * @param[in,out] resource
1281  *   Pointer to the modify-header resource.
1282  * @param[in] action
1283  *   Pointer to action specification.
1284  * @param[out] error
1285  *   Pointer to the error structure.
1286  *
1287  * @return
1288  *   0 on success, a negative errno value otherwise and rte_errno is set.
1289  */
1290 static int
1291 flow_dv_convert_action_modify_ipv4_dscp
1292                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1293                          const struct rte_flow_action *action,
1294                          struct rte_flow_error *error)
1295 {
1296         const struct rte_flow_action_set_dscp *conf =
1297                 (const struct rte_flow_action_set_dscp *)(action->conf);
1298         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1299         struct rte_flow_item_ipv4 ipv4;
1300         struct rte_flow_item_ipv4 ipv4_mask;
1301
1302         memset(&ipv4, 0, sizeof(ipv4));
1303         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1304         ipv4.hdr.type_of_service = conf->dscp;
1305         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1306         item.spec = &ipv4;
1307         item.mask = &ipv4_mask;
1308         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1309                                              MLX5_MODIFICATION_TYPE_SET, error);
1310 }
1311
1312 /**
1313  * Convert modify-header set IPv6 DSCP action to DV specification.
1314  *
1315  * @param[in,out] resource
1316  *   Pointer to the modify-header resource.
1317  * @param[in] action
1318  *   Pointer to action specification.
1319  * @param[out] error
1320  *   Pointer to the error structure.
1321  *
1322  * @return
1323  *   0 on success, a negative errno value otherwise and rte_errno is set.
1324  */
1325 static int
1326 flow_dv_convert_action_modify_ipv6_dscp
1327                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1328                          const struct rte_flow_action *action,
1329                          struct rte_flow_error *error)
1330 {
1331         const struct rte_flow_action_set_dscp *conf =
1332                 (const struct rte_flow_action_set_dscp *)(action->conf);
1333         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1334         struct rte_flow_item_ipv6 ipv6;
1335         struct rte_flow_item_ipv6 ipv6_mask;
1336
1337         memset(&ipv6, 0, sizeof(ipv6));
1338         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1339         /*
1340          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1341          * rdma-core only accept the DSCP bits byte aligned start from
1342          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1343          * bits in IPv6 case as rdma-core requires byte aligned value.
1344          */
1345         ipv6.hdr.vtc_flow = conf->dscp;
1346         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1347         item.spec = &ipv6;
1348         item.mask = &ipv6_mask;
1349         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1350                                              MLX5_MODIFICATION_TYPE_SET, error);
1351 }
1352
1353 /**
1354  * Validate MARK item.
1355  *
1356  * @param[in] dev
1357  *   Pointer to the rte_eth_dev structure.
1358  * @param[in] item
1359  *   Item specification.
1360  * @param[in] attr
1361  *   Attributes of flow that includes this item.
1362  * @param[out] error
1363  *   Pointer to error structure.
1364  *
1365  * @return
1366  *   0 on success, a negative errno value otherwise and rte_errno is set.
1367  */
1368 static int
1369 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1370                            const struct rte_flow_item *item,
1371                            const struct rte_flow_attr *attr __rte_unused,
1372                            struct rte_flow_error *error)
1373 {
1374         struct mlx5_priv *priv = dev->data->dev_private;
1375         struct mlx5_dev_config *config = &priv->config;
1376         const struct rte_flow_item_mark *spec = item->spec;
1377         const struct rte_flow_item_mark *mask = item->mask;
1378         const struct rte_flow_item_mark nic_mask = {
1379                 .id = priv->sh->dv_mark_mask,
1380         };
1381         int ret;
1382
1383         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1384                 return rte_flow_error_set(error, ENOTSUP,
1385                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1386                                           "extended metadata feature"
1387                                           " isn't enabled");
1388         if (!mlx5_flow_ext_mreg_supported(dev))
1389                 return rte_flow_error_set(error, ENOTSUP,
1390                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1391                                           "extended metadata register"
1392                                           " isn't supported");
1393         if (!nic_mask.id)
1394                 return rte_flow_error_set(error, ENOTSUP,
1395                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1396                                           "extended metadata register"
1397                                           " isn't available");
1398         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1399         if (ret < 0)
1400                 return ret;
1401         if (!spec)
1402                 return rte_flow_error_set(error, EINVAL,
1403                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1404                                           item->spec,
1405                                           "data cannot be empty");
1406         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1407                 return rte_flow_error_set(error, EINVAL,
1408                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1409                                           &spec->id,
1410                                           "mark id exceeds the limit");
1411         if (!mask)
1412                 mask = &nic_mask;
1413         if (!mask->id)
1414                 return rte_flow_error_set(error, EINVAL,
1415                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1416                                         "mask cannot be zero");
1417
1418         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1419                                         (const uint8_t *)&nic_mask,
1420                                         sizeof(struct rte_flow_item_mark),
1421                                         error);
1422         if (ret < 0)
1423                 return ret;
1424         return 0;
1425 }
1426
1427 /**
1428  * Validate META item.
1429  *
1430  * @param[in] dev
1431  *   Pointer to the rte_eth_dev structure.
1432  * @param[in] item
1433  *   Item specification.
1434  * @param[in] attr
1435  *   Attributes of flow that includes this item.
1436  * @param[out] error
1437  *   Pointer to error structure.
1438  *
1439  * @return
1440  *   0 on success, a negative errno value otherwise and rte_errno is set.
1441  */
1442 static int
1443 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1444                            const struct rte_flow_item *item,
1445                            const struct rte_flow_attr *attr,
1446                            struct rte_flow_error *error)
1447 {
1448         struct mlx5_priv *priv = dev->data->dev_private;
1449         struct mlx5_dev_config *config = &priv->config;
1450         const struct rte_flow_item_meta *spec = item->spec;
1451         const struct rte_flow_item_meta *mask = item->mask;
1452         struct rte_flow_item_meta nic_mask = {
1453                 .data = UINT32_MAX
1454         };
1455         int reg;
1456         int ret;
1457
1458         if (!spec)
1459                 return rte_flow_error_set(error, EINVAL,
1460                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1461                                           item->spec,
1462                                           "data cannot be empty");
1463         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1464                 if (!mlx5_flow_ext_mreg_supported(dev))
1465                         return rte_flow_error_set(error, ENOTSUP,
1466                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1467                                           "extended metadata register"
1468                                           " isn't supported");
1469                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1470                 if (reg < 0)
1471                         return reg;
1472                 if (reg == REG_B)
1473                         return rte_flow_error_set(error, ENOTSUP,
1474                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1475                                           "match on reg_b "
1476                                           "isn't supported");
1477                 if (reg != REG_A)
1478                         nic_mask.data = priv->sh->dv_meta_mask;
1479         } else if (attr->transfer) {
1480                 return rte_flow_error_set(error, ENOTSUP,
1481                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1482                                         "extended metadata feature "
1483                                         "should be enabled when "
1484                                         "meta item is requested "
1485                                         "with e-switch mode ");
1486         }
1487         if (!mask)
1488                 mask = &rte_flow_item_meta_mask;
1489         if (!mask->data)
1490                 return rte_flow_error_set(error, EINVAL,
1491                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1492                                         "mask cannot be zero");
1493
1494         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1495                                         (const uint8_t *)&nic_mask,
1496                                         sizeof(struct rte_flow_item_meta),
1497                                         error);
1498         return ret;
1499 }
1500
1501 /**
1502  * Validate TAG item.
1503  *
1504  * @param[in] dev
1505  *   Pointer to the rte_eth_dev structure.
1506  * @param[in] item
1507  *   Item specification.
1508  * @param[in] attr
1509  *   Attributes of flow that includes this item.
1510  * @param[out] error
1511  *   Pointer to error structure.
1512  *
1513  * @return
1514  *   0 on success, a negative errno value otherwise and rte_errno is set.
1515  */
1516 static int
1517 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1518                           const struct rte_flow_item *item,
1519                           const struct rte_flow_attr *attr __rte_unused,
1520                           struct rte_flow_error *error)
1521 {
1522         const struct rte_flow_item_tag *spec = item->spec;
1523         const struct rte_flow_item_tag *mask = item->mask;
1524         const struct rte_flow_item_tag nic_mask = {
1525                 .data = RTE_BE32(UINT32_MAX),
1526                 .index = 0xff,
1527         };
1528         int ret;
1529
1530         if (!mlx5_flow_ext_mreg_supported(dev))
1531                 return rte_flow_error_set(error, ENOTSUP,
1532                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1533                                           "extensive metadata register"
1534                                           " isn't supported");
1535         if (!spec)
1536                 return rte_flow_error_set(error, EINVAL,
1537                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1538                                           item->spec,
1539                                           "data cannot be empty");
1540         if (!mask)
1541                 mask = &rte_flow_item_tag_mask;
1542         if (!mask->data)
1543                 return rte_flow_error_set(error, EINVAL,
1544                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1545                                         "mask cannot be zero");
1546
1547         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1548                                         (const uint8_t *)&nic_mask,
1549                                         sizeof(struct rte_flow_item_tag),
1550                                         error);
1551         if (ret < 0)
1552                 return ret;
1553         if (mask->index != 0xff)
1554                 return rte_flow_error_set(error, EINVAL,
1555                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1556                                           "partial mask for tag index"
1557                                           " is not supported");
1558         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1559         if (ret < 0)
1560                 return ret;
1561         MLX5_ASSERT(ret != REG_NON);
1562         return 0;
1563 }
1564
1565 /**
1566  * Validate vport item.
1567  *
1568  * @param[in] dev
1569  *   Pointer to the rte_eth_dev structure.
1570  * @param[in] item
1571  *   Item specification.
1572  * @param[in] attr
1573  *   Attributes of flow that includes this item.
1574  * @param[in] item_flags
1575  *   Bit-fields that holds the items detected until now.
1576  * @param[out] error
1577  *   Pointer to error structure.
1578  *
1579  * @return
1580  *   0 on success, a negative errno value otherwise and rte_errno is set.
1581  */
1582 static int
1583 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1584                               const struct rte_flow_item *item,
1585                               const struct rte_flow_attr *attr,
1586                               uint64_t item_flags,
1587                               struct rte_flow_error *error)
1588 {
1589         const struct rte_flow_item_port_id *spec = item->spec;
1590         const struct rte_flow_item_port_id *mask = item->mask;
1591         const struct rte_flow_item_port_id switch_mask = {
1592                         .id = 0xffffffff,
1593         };
1594         struct mlx5_priv *esw_priv;
1595         struct mlx5_priv *dev_priv;
1596         int ret;
1597
1598         if (!attr->transfer)
1599                 return rte_flow_error_set(error, EINVAL,
1600                                           RTE_FLOW_ERROR_TYPE_ITEM,
1601                                           NULL,
1602                                           "match on port id is valid only"
1603                                           " when transfer flag is enabled");
1604         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1605                 return rte_flow_error_set(error, ENOTSUP,
1606                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1607                                           "multiple source ports are not"
1608                                           " supported");
1609         if (!mask)
1610                 mask = &switch_mask;
1611         if (mask->id != 0xffffffff)
1612                 return rte_flow_error_set(error, ENOTSUP,
1613                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1614                                            mask,
1615                                            "no support for partial mask on"
1616                                            " \"id\" field");
1617         ret = mlx5_flow_item_acceptable
1618                                 (item, (const uint8_t *)mask,
1619                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1620                                  sizeof(struct rte_flow_item_port_id),
1621                                  error);
1622         if (ret)
1623                 return ret;
1624         if (!spec)
1625                 return 0;
1626         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1627         if (!esw_priv)
1628                 return rte_flow_error_set(error, rte_errno,
1629                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1630                                           "failed to obtain E-Switch info for"
1631                                           " port");
1632         dev_priv = mlx5_dev_to_eswitch_info(dev);
1633         if (!dev_priv)
1634                 return rte_flow_error_set(error, rte_errno,
1635                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1636                                           NULL,
1637                                           "failed to obtain E-Switch info");
1638         if (esw_priv->domain_id != dev_priv->domain_id)
1639                 return rte_flow_error_set(error, EINVAL,
1640                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1641                                           "cannot match on a port from a"
1642                                           " different E-Switch");
1643         return 0;
1644 }
1645
1646 /**
1647  * Validate VLAN item.
1648  *
1649  * @param[in] item
1650  *   Item specification.
1651  * @param[in] item_flags
1652  *   Bit-fields that holds the items detected until now.
1653  * @param[in] dev
1654  *   Ethernet device flow is being created on.
1655  * @param[out] error
1656  *   Pointer to error structure.
1657  *
1658  * @return
1659  *   0 on success, a negative errno value otherwise and rte_errno is set.
1660  */
1661 static int
1662 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1663                            uint64_t item_flags,
1664                            struct rte_eth_dev *dev,
1665                            struct rte_flow_error *error)
1666 {
1667         const struct rte_flow_item_vlan *mask = item->mask;
1668         const struct rte_flow_item_vlan nic_mask = {
1669                 .tci = RTE_BE16(UINT16_MAX),
1670                 .inner_type = RTE_BE16(UINT16_MAX),
1671         };
1672         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1673         int ret;
1674         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1675                                         MLX5_FLOW_LAYER_INNER_L4) :
1676                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1677                                         MLX5_FLOW_LAYER_OUTER_L4);
1678         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1679                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1680
1681         if (item_flags & vlanm)
1682                 return rte_flow_error_set(error, EINVAL,
1683                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1684                                           "multiple VLAN layers not supported");
1685         else if ((item_flags & l34m) != 0)
1686                 return rte_flow_error_set(error, EINVAL,
1687                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1688                                           "VLAN cannot follow L3/L4 layer");
1689         if (!mask)
1690                 mask = &rte_flow_item_vlan_mask;
1691         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1692                                         (const uint8_t *)&nic_mask,
1693                                         sizeof(struct rte_flow_item_vlan),
1694                                         error);
1695         if (ret)
1696                 return ret;
1697         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1698                 struct mlx5_priv *priv = dev->data->dev_private;
1699
1700                 if (priv->vmwa_context) {
1701                         /*
1702                          * Non-NULL context means we have a virtual machine
1703                          * and SR-IOV enabled, we have to create VLAN interface
1704                          * to make hypervisor to setup E-Switch vport
1705                          * context correctly. We avoid creating the multiple
1706                          * VLAN interfaces, so we cannot support VLAN tag mask.
1707                          */
1708                         return rte_flow_error_set(error, EINVAL,
1709                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1710                                                   item,
1711                                                   "VLAN tag mask is not"
1712                                                   " supported in virtual"
1713                                                   " environment");
1714                 }
1715         }
1716         return 0;
1717 }
1718
1719 /*
1720  * GTP flags are contained in 1 byte of the format:
1721  * -------------------------------------------
1722  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1723  * |-----------------------------------------|
1724  * | value | Version | PT | Res | E | S | PN |
1725  * -------------------------------------------
1726  *
1727  * Matching is supported only for GTP flags E, S, PN.
1728  */
1729 #define MLX5_GTP_FLAGS_MASK     0x07
1730
1731 /**
1732  * Validate GTP item.
1733  *
1734  * @param[in] dev
1735  *   Pointer to the rte_eth_dev structure.
1736  * @param[in] item
1737  *   Item specification.
1738  * @param[in] item_flags
1739  *   Bit-fields that holds the items detected until now.
1740  * @param[out] error
1741  *   Pointer to error structure.
1742  *
1743  * @return
1744  *   0 on success, a negative errno value otherwise and rte_errno is set.
1745  */
1746 static int
1747 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1748                           const struct rte_flow_item *item,
1749                           uint64_t item_flags,
1750                           struct rte_flow_error *error)
1751 {
1752         struct mlx5_priv *priv = dev->data->dev_private;
1753         const struct rte_flow_item_gtp *spec = item->spec;
1754         const struct rte_flow_item_gtp *mask = item->mask;
1755         const struct rte_flow_item_gtp nic_mask = {
1756                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1757                 .msg_type = 0xff,
1758                 .teid = RTE_BE32(0xffffffff),
1759         };
1760
1761         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1762                 return rte_flow_error_set(error, ENOTSUP,
1763                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1764                                           "GTP support is not enabled");
1765         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1766                 return rte_flow_error_set(error, ENOTSUP,
1767                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1768                                           "multiple tunnel layers not"
1769                                           " supported");
1770         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1771                 return rte_flow_error_set(error, EINVAL,
1772                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1773                                           "no outer UDP layer found");
1774         if (!mask)
1775                 mask = &rte_flow_item_gtp_mask;
1776         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1777                 return rte_flow_error_set(error, ENOTSUP,
1778                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1779                                           "Match is supported for GTP"
1780                                           " flags only");
1781         return mlx5_flow_item_acceptable
1782                 (item, (const uint8_t *)mask,
1783                  (const uint8_t *)&nic_mask,
1784                  sizeof(struct rte_flow_item_gtp),
1785                  error);
1786 }
1787
1788 /**
1789  * Validate the pop VLAN action.
1790  *
1791  * @param[in] dev
1792  *   Pointer to the rte_eth_dev structure.
1793  * @param[in] action_flags
1794  *   Holds the actions detected until now.
1795  * @param[in] action
1796  *   Pointer to the pop vlan action.
1797  * @param[in] item_flags
1798  *   The items found in this flow rule.
1799  * @param[in] attr
1800  *   Pointer to flow attributes.
1801  * @param[out] error
1802  *   Pointer to error structure.
1803  *
1804  * @return
1805  *   0 on success, a negative errno value otherwise and rte_errno is set.
1806  */
1807 static int
1808 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1809                                  uint64_t action_flags,
1810                                  const struct rte_flow_action *action,
1811                                  uint64_t item_flags,
1812                                  const struct rte_flow_attr *attr,
1813                                  struct rte_flow_error *error)
1814 {
1815         const struct mlx5_priv *priv = dev->data->dev_private;
1816
1817         (void)action;
1818         (void)attr;
1819         if (!priv->sh->pop_vlan_action)
1820                 return rte_flow_error_set(error, ENOTSUP,
1821                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1822                                           NULL,
1823                                           "pop vlan action is not supported");
1824         if (attr->egress)
1825                 return rte_flow_error_set(error, ENOTSUP,
1826                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1827                                           NULL,
1828                                           "pop vlan action not supported for "
1829                                           "egress");
1830         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1831                 return rte_flow_error_set(error, ENOTSUP,
1832                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1833                                           "no support for multiple VLAN "
1834                                           "actions");
1835         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
1836         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
1837             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
1838                 return rte_flow_error_set(error, ENOTSUP,
1839                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1840                                           NULL,
1841                                           "cannot pop vlan after decap without "
1842                                           "match on inner vlan in the flow");
1843         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
1844         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
1845             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1846                 return rte_flow_error_set(error, ENOTSUP,
1847                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1848                                           NULL,
1849                                           "cannot pop vlan without a "
1850                                           "match on (outer) vlan in the flow");
1851         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1852                 return rte_flow_error_set(error, EINVAL,
1853                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1854                                           "wrong action order, port_id should "
1855                                           "be after pop VLAN action");
1856         if (!attr->transfer && priv->representor)
1857                 return rte_flow_error_set(error, ENOTSUP,
1858                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1859                                           "pop vlan action for VF representor "
1860                                           "not supported on NIC table");
1861         return 0;
1862 }
1863
1864 /**
1865  * Get VLAN default info from vlan match info.
1866  *
1867  * @param[in] items
1868  *   the list of item specifications.
1869  * @param[out] vlan
1870  *   pointer VLAN info to fill to.
1871  *
1872  * @return
1873  *   0 on success, a negative errno value otherwise and rte_errno is set.
1874  */
1875 static void
1876 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1877                                   struct rte_vlan_hdr *vlan)
1878 {
1879         const struct rte_flow_item_vlan nic_mask = {
1880                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1881                                 MLX5DV_FLOW_VLAN_VID_MASK),
1882                 .inner_type = RTE_BE16(0xffff),
1883         };
1884
1885         if (items == NULL)
1886                 return;
1887         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1888                 int type = items->type;
1889
1890                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
1891                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
1892                         break;
1893         }
1894         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
1895                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1896                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1897
1898                 /* If VLAN item in pattern doesn't contain data, return here. */
1899                 if (!vlan_v)
1900                         return;
1901                 if (!vlan_m)
1902                         vlan_m = &nic_mask;
1903                 /* Only full match values are accepted */
1904                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1905                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1906                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
1907                         vlan->vlan_tci |=
1908                                 rte_be_to_cpu_16(vlan_v->tci &
1909                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1910                 }
1911                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1912                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1913                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1914                         vlan->vlan_tci |=
1915                                 rte_be_to_cpu_16(vlan_v->tci &
1916                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1917                 }
1918                 if (vlan_m->inner_type == nic_mask.inner_type)
1919                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1920                                                            vlan_m->inner_type);
1921         }
1922 }
1923
1924 /**
1925  * Validate the push VLAN action.
1926  *
1927  * @param[in] dev
1928  *   Pointer to the rte_eth_dev structure.
1929  * @param[in] action_flags
1930  *   Holds the actions detected until now.
1931  * @param[in] item_flags
1932  *   The items found in this flow rule.
1933  * @param[in] action
1934  *   Pointer to the action structure.
1935  * @param[in] attr
1936  *   Pointer to flow attributes
1937  * @param[out] error
1938  *   Pointer to error structure.
1939  *
1940  * @return
1941  *   0 on success, a negative errno value otherwise and rte_errno is set.
1942  */
1943 static int
1944 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
1945                                   uint64_t action_flags,
1946                                   const struct rte_flow_item_vlan *vlan_m,
1947                                   const struct rte_flow_action *action,
1948                                   const struct rte_flow_attr *attr,
1949                                   struct rte_flow_error *error)
1950 {
1951         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1952         const struct mlx5_priv *priv = dev->data->dev_private;
1953
1954         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1955             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1956                 return rte_flow_error_set(error, EINVAL,
1957                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1958                                           "invalid vlan ethertype");
1959         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1960                 return rte_flow_error_set(error, EINVAL,
1961                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1962                                           "wrong action order, port_id should "
1963                                           "be after push VLAN");
1964         if (!attr->transfer && priv->representor)
1965                 return rte_flow_error_set(error, ENOTSUP,
1966                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1967                                           "push vlan action for VF representor "
1968                                           "not supported on NIC table");
1969         if (vlan_m &&
1970             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
1971             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
1972                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
1973             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
1974             !(mlx5_flow_find_action
1975                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
1976                 return rte_flow_error_set(error, EINVAL,
1977                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1978                                           "not full match mask on VLAN PCP and "
1979                                           "there is no of_set_vlan_pcp action, "
1980                                           "push VLAN action cannot figure out "
1981                                           "PCP value");
1982         if (vlan_m &&
1983             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
1984             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
1985                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
1986             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
1987             !(mlx5_flow_find_action
1988                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
1989                 return rte_flow_error_set(error, EINVAL,
1990                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1991                                           "not full match mask on VLAN VID and "
1992                                           "there is no of_set_vlan_vid action, "
1993                                           "push VLAN action cannot figure out "
1994                                           "VID value");
1995         (void)attr;
1996         return 0;
1997 }
1998
1999 /**
2000  * Validate the set VLAN PCP.
2001  *
2002  * @param[in] action_flags
2003  *   Holds the actions detected until now.
2004  * @param[in] actions
2005  *   Pointer to the list of actions remaining in the flow rule.
2006  * @param[out] error
2007  *   Pointer to error structure.
2008  *
2009  * @return
2010  *   0 on success, a negative errno value otherwise and rte_errno is set.
2011  */
2012 static int
2013 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2014                                      const struct rte_flow_action actions[],
2015                                      struct rte_flow_error *error)
2016 {
2017         const struct rte_flow_action *action = actions;
2018         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2019
2020         if (conf->vlan_pcp > 7)
2021                 return rte_flow_error_set(error, EINVAL,
2022                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2023                                           "VLAN PCP value is too big");
2024         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2025                 return rte_flow_error_set(error, ENOTSUP,
2026                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2027                                           "set VLAN PCP action must follow "
2028                                           "the push VLAN action");
2029         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2030                 return rte_flow_error_set(error, ENOTSUP,
2031                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2032                                           "Multiple VLAN PCP modification are "
2033                                           "not supported");
2034         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2035                 return rte_flow_error_set(error, EINVAL,
2036                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2037                                           "wrong action order, port_id should "
2038                                           "be after set VLAN PCP");
2039         return 0;
2040 }
2041
2042 /**
2043  * Validate the set VLAN VID.
2044  *
2045  * @param[in] item_flags
2046  *   Holds the items detected in this rule.
2047  * @param[in] action_flags
2048  *   Holds the actions detected until now.
2049  * @param[in] actions
2050  *   Pointer to the list of actions remaining in the flow rule.
2051  * @param[out] error
2052  *   Pointer to error structure.
2053  *
2054  * @return
2055  *   0 on success, a negative errno value otherwise and rte_errno is set.
2056  */
2057 static int
2058 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2059                                      uint64_t action_flags,
2060                                      const struct rte_flow_action actions[],
2061                                      struct rte_flow_error *error)
2062 {
2063         const struct rte_flow_action *action = actions;
2064         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2065
2066         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2067                 return rte_flow_error_set(error, EINVAL,
2068                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2069                                           "VLAN VID value is too big");
2070         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2071             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2072                 return rte_flow_error_set(error, ENOTSUP,
2073                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2074                                           "set VLAN VID action must follow push"
2075                                           " VLAN action or match on VLAN item");
2076         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2077                 return rte_flow_error_set(error, ENOTSUP,
2078                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2079                                           "Multiple VLAN VID modifications are "
2080                                           "not supported");
2081         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2082                 return rte_flow_error_set(error, EINVAL,
2083                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2084                                           "wrong action order, port_id should "
2085                                           "be after set VLAN VID");
2086         return 0;
2087 }
2088
2089 /*
2090  * Validate the FLAG action.
2091  *
2092  * @param[in] dev
2093  *   Pointer to the rte_eth_dev structure.
2094  * @param[in] action_flags
2095  *   Holds the actions detected until now.
2096  * @param[in] attr
2097  *   Pointer to flow attributes
2098  * @param[out] error
2099  *   Pointer to error structure.
2100  *
2101  * @return
2102  *   0 on success, a negative errno value otherwise and rte_errno is set.
2103  */
2104 static int
2105 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2106                              uint64_t action_flags,
2107                              const struct rte_flow_attr *attr,
2108                              struct rte_flow_error *error)
2109 {
2110         struct mlx5_priv *priv = dev->data->dev_private;
2111         struct mlx5_dev_config *config = &priv->config;
2112         int ret;
2113
2114         /* Fall back if no extended metadata register support. */
2115         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2116                 return mlx5_flow_validate_action_flag(action_flags, attr,
2117                                                       error);
2118         /* Extensive metadata mode requires registers. */
2119         if (!mlx5_flow_ext_mreg_supported(dev))
2120                 return rte_flow_error_set(error, ENOTSUP,
2121                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2122                                           "no metadata registers "
2123                                           "to support flag action");
2124         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2125                 return rte_flow_error_set(error, ENOTSUP,
2126                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2127                                           "extended metadata register"
2128                                           " isn't available");
2129         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2130         if (ret < 0)
2131                 return ret;
2132         MLX5_ASSERT(ret > 0);
2133         if (action_flags & MLX5_FLOW_ACTION_MARK)
2134                 return rte_flow_error_set(error, EINVAL,
2135                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2136                                           "can't mark and flag in same flow");
2137         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2138                 return rte_flow_error_set(error, EINVAL,
2139                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2140                                           "can't have 2 flag"
2141                                           " actions in same flow");
2142         return 0;
2143 }
2144
2145 /**
2146  * Validate MARK action.
2147  *
2148  * @param[in] dev
2149  *   Pointer to the rte_eth_dev structure.
2150  * @param[in] action
2151  *   Pointer to action.
2152  * @param[in] action_flags
2153  *   Holds the actions detected until now.
2154  * @param[in] attr
2155  *   Pointer to flow attributes
2156  * @param[out] error
2157  *   Pointer to error structure.
2158  *
2159  * @return
2160  *   0 on success, a negative errno value otherwise and rte_errno is set.
2161  */
2162 static int
2163 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2164                              const struct rte_flow_action *action,
2165                              uint64_t action_flags,
2166                              const struct rte_flow_attr *attr,
2167                              struct rte_flow_error *error)
2168 {
2169         struct mlx5_priv *priv = dev->data->dev_private;
2170         struct mlx5_dev_config *config = &priv->config;
2171         const struct rte_flow_action_mark *mark = action->conf;
2172         int ret;
2173
2174         /* Fall back if no extended metadata register support. */
2175         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2176                 return mlx5_flow_validate_action_mark(action, action_flags,
2177                                                       attr, error);
2178         /* Extensive metadata mode requires registers. */
2179         if (!mlx5_flow_ext_mreg_supported(dev))
2180                 return rte_flow_error_set(error, ENOTSUP,
2181                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2182                                           "no metadata registers "
2183                                           "to support mark action");
2184         if (!priv->sh->dv_mark_mask)
2185                 return rte_flow_error_set(error, ENOTSUP,
2186                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2187                                           "extended metadata register"
2188                                           " isn't available");
2189         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2190         if (ret < 0)
2191                 return ret;
2192         MLX5_ASSERT(ret > 0);
2193         if (!mark)
2194                 return rte_flow_error_set(error, EINVAL,
2195                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2196                                           "configuration cannot be null");
2197         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2198                 return rte_flow_error_set(error, EINVAL,
2199                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2200                                           &mark->id,
2201                                           "mark id exceeds the limit");
2202         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2203                 return rte_flow_error_set(error, EINVAL,
2204                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2205                                           "can't flag and mark in same flow");
2206         if (action_flags & MLX5_FLOW_ACTION_MARK)
2207                 return rte_flow_error_set(error, EINVAL,
2208                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2209                                           "can't have 2 mark actions in same"
2210                                           " flow");
2211         return 0;
2212 }
2213
2214 /**
2215  * Validate SET_META action.
2216  *
2217  * @param[in] dev
2218  *   Pointer to the rte_eth_dev structure.
2219  * @param[in] action
2220  *   Pointer to the action structure.
2221  * @param[in] action_flags
2222  *   Holds the actions detected until now.
2223  * @param[in] attr
2224  *   Pointer to flow attributes
2225  * @param[out] error
2226  *   Pointer to error structure.
2227  *
2228  * @return
2229  *   0 on success, a negative errno value otherwise and rte_errno is set.
2230  */
2231 static int
2232 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2233                                  const struct rte_flow_action *action,
2234                                  uint64_t action_flags __rte_unused,
2235                                  const struct rte_flow_attr *attr,
2236                                  struct rte_flow_error *error)
2237 {
2238         const struct rte_flow_action_set_meta *conf;
2239         uint32_t nic_mask = UINT32_MAX;
2240         int reg;
2241
2242         if (!mlx5_flow_ext_mreg_supported(dev))
2243                 return rte_flow_error_set(error, ENOTSUP,
2244                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2245                                           "extended metadata register"
2246                                           " isn't supported");
2247         reg = flow_dv_get_metadata_reg(dev, attr, error);
2248         if (reg < 0)
2249                 return reg;
2250         if (reg != REG_A && reg != REG_B) {
2251                 struct mlx5_priv *priv = dev->data->dev_private;
2252
2253                 nic_mask = priv->sh->dv_meta_mask;
2254         }
2255         if (!(action->conf))
2256                 return rte_flow_error_set(error, EINVAL,
2257                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2258                                           "configuration cannot be null");
2259         conf = (const struct rte_flow_action_set_meta *)action->conf;
2260         if (!conf->mask)
2261                 return rte_flow_error_set(error, EINVAL,
2262                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2263                                           "zero mask doesn't have any effect");
2264         if (conf->mask & ~nic_mask)
2265                 return rte_flow_error_set(error, EINVAL,
2266                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2267                                           "meta data must be within reg C0");
2268         return 0;
2269 }
2270
2271 /**
2272  * Validate SET_TAG action.
2273  *
2274  * @param[in] dev
2275  *   Pointer to the rte_eth_dev structure.
2276  * @param[in] action
2277  *   Pointer to the action structure.
2278  * @param[in] action_flags
2279  *   Holds the actions detected until now.
2280  * @param[in] attr
2281  *   Pointer to flow attributes
2282  * @param[out] error
2283  *   Pointer to error structure.
2284  *
2285  * @return
2286  *   0 on success, a negative errno value otherwise and rte_errno is set.
2287  */
2288 static int
2289 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2290                                 const struct rte_flow_action *action,
2291                                 uint64_t action_flags,
2292                                 const struct rte_flow_attr *attr,
2293                                 struct rte_flow_error *error)
2294 {
2295         const struct rte_flow_action_set_tag *conf;
2296         const uint64_t terminal_action_flags =
2297                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2298                 MLX5_FLOW_ACTION_RSS;
2299         int ret;
2300
2301         if (!mlx5_flow_ext_mreg_supported(dev))
2302                 return rte_flow_error_set(error, ENOTSUP,
2303                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2304                                           "extensive metadata register"
2305                                           " isn't supported");
2306         if (!(action->conf))
2307                 return rte_flow_error_set(error, EINVAL,
2308                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2309                                           "configuration cannot be null");
2310         conf = (const struct rte_flow_action_set_tag *)action->conf;
2311         if (!conf->mask)
2312                 return rte_flow_error_set(error, EINVAL,
2313                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2314                                           "zero mask doesn't have any effect");
2315         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2316         if (ret < 0)
2317                 return ret;
2318         if (!attr->transfer && attr->ingress &&
2319             (action_flags & terminal_action_flags))
2320                 return rte_flow_error_set(error, EINVAL,
2321                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2322                                           "set_tag has no effect"
2323                                           " with terminal actions");
2324         return 0;
2325 }
2326
2327 /**
2328  * Validate count action.
2329  *
2330  * @param[in] dev
2331  *   Pointer to rte_eth_dev structure.
2332  * @param[out] error
2333  *   Pointer to error structure.
2334  *
2335  * @return
2336  *   0 on success, a negative errno value otherwise and rte_errno is set.
2337  */
2338 static int
2339 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2340                               struct rte_flow_error *error)
2341 {
2342         struct mlx5_priv *priv = dev->data->dev_private;
2343
2344         if (!priv->config.devx)
2345                 goto notsup_err;
2346 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2347         return 0;
2348 #endif
2349 notsup_err:
2350         return rte_flow_error_set
2351                       (error, ENOTSUP,
2352                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2353                        NULL,
2354                        "count action not supported");
2355 }
2356
2357 /**
2358  * Validate the L2 encap action.
2359  *
2360  * @param[in] dev
2361  *   Pointer to the rte_eth_dev structure.
2362  * @param[in] action_flags
2363  *   Holds the actions detected until now.
2364  * @param[in] action
2365  *   Pointer to the action structure.
2366  * @param[in] attr
2367  *   Pointer to flow attributes.
2368  * @param[out] error
2369  *   Pointer to error structure.
2370  *
2371  * @return
2372  *   0 on success, a negative errno value otherwise and rte_errno is set.
2373  */
2374 static int
2375 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2376                                  uint64_t action_flags,
2377                                  const struct rte_flow_action *action,
2378                                  const struct rte_flow_attr *attr,
2379                                  struct rte_flow_error *error)
2380 {
2381         const struct mlx5_priv *priv = dev->data->dev_private;
2382
2383         if (!(action->conf))
2384                 return rte_flow_error_set(error, EINVAL,
2385                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2386                                           "configuration cannot be null");
2387         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2388                 return rte_flow_error_set(error, EINVAL,
2389                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2390                                           "can only have a single encap action "
2391                                           "in a flow");
2392         if (!attr->transfer && priv->representor)
2393                 return rte_flow_error_set(error, ENOTSUP,
2394                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2395                                           "encap action for VF representor "
2396                                           "not supported on NIC table");
2397         return 0;
2398 }
2399
2400 /**
2401  * Validate a decap action.
2402  *
2403  * @param[in] dev
2404  *   Pointer to the rte_eth_dev structure.
2405  * @param[in] action_flags
2406  *   Holds the actions detected until now.
2407  * @param[in] attr
2408  *   Pointer to flow attributes
2409  * @param[out] error
2410  *   Pointer to error structure.
2411  *
2412  * @return
2413  *   0 on success, a negative errno value otherwise and rte_errno is set.
2414  */
2415 static int
2416 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2417                               uint64_t action_flags,
2418                               const struct rte_flow_attr *attr,
2419                               struct rte_flow_error *error)
2420 {
2421         const struct mlx5_priv *priv = dev->data->dev_private;
2422
2423         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2424             !priv->config.decap_en)
2425                 return rte_flow_error_set(error, ENOTSUP,
2426                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2427                                           "decap is not enabled");
2428         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2429                 return rte_flow_error_set(error, ENOTSUP,
2430                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2431                                           action_flags &
2432                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2433                                           "have a single decap action" : "decap "
2434                                           "after encap is not supported");
2435         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2436                 return rte_flow_error_set(error, EINVAL,
2437                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2438                                           "can't have decap action after"
2439                                           " modify action");
2440         if (attr->egress)
2441                 return rte_flow_error_set(error, ENOTSUP,
2442                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2443                                           NULL,
2444                                           "decap action not supported for "
2445                                           "egress");
2446         if (!attr->transfer && priv->representor)
2447                 return rte_flow_error_set(error, ENOTSUP,
2448                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2449                                           "decap action for VF representor "
2450                                           "not supported on NIC table");
2451         return 0;
2452 }
2453
2454 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2455
2456 /**
2457  * Validate the raw encap and decap actions.
2458  *
2459  * @param[in] dev
2460  *   Pointer to the rte_eth_dev structure.
2461  * @param[in] decap
2462  *   Pointer to the decap action.
2463  * @param[in] encap
2464  *   Pointer to the encap action.
2465  * @param[in] attr
2466  *   Pointer to flow attributes
2467  * @param[in/out] action_flags
2468  *   Holds the actions detected until now.
2469  * @param[out] actions_n
2470  *   pointer to the number of actions counter.
2471  * @param[out] error
2472  *   Pointer to error structure.
2473  *
2474  * @return
2475  *   0 on success, a negative errno value otherwise and rte_errno is set.
2476  */
2477 static int
2478 flow_dv_validate_action_raw_encap_decap
2479         (struct rte_eth_dev *dev,
2480          const struct rte_flow_action_raw_decap *decap,
2481          const struct rte_flow_action_raw_encap *encap,
2482          const struct rte_flow_attr *attr, uint64_t *action_flags,
2483          int *actions_n, struct rte_flow_error *error)
2484 {
2485         const struct mlx5_priv *priv = dev->data->dev_private;
2486         int ret;
2487
2488         if (encap && (!encap->size || !encap->data))
2489                 return rte_flow_error_set(error, EINVAL,
2490                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2491                                           "raw encap data cannot be empty");
2492         if (decap && encap) {
2493                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2494                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2495                         /* L3 encap. */
2496                         decap = NULL;
2497                 else if (encap->size <=
2498                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2499                            decap->size >
2500                            MLX5_ENCAPSULATION_DECISION_SIZE)
2501                         /* L3 decap. */
2502                         encap = NULL;
2503                 else if (encap->size >
2504                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2505                            decap->size >
2506                            MLX5_ENCAPSULATION_DECISION_SIZE)
2507                         /* 2 L2 actions: encap and decap. */
2508                         ;
2509                 else
2510                         return rte_flow_error_set(error,
2511                                 ENOTSUP,
2512                                 RTE_FLOW_ERROR_TYPE_ACTION,
2513                                 NULL, "unsupported too small "
2514                                 "raw decap and too small raw "
2515                                 "encap combination");
2516         }
2517         if (decap) {
2518                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2519                                                     error);
2520                 if (ret < 0)
2521                         return ret;
2522                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2523                 ++(*actions_n);
2524         }
2525         if (encap) {
2526                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2527                         return rte_flow_error_set(error, ENOTSUP,
2528                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2529                                                   NULL,
2530                                                   "small raw encap size");
2531                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2532                         return rte_flow_error_set(error, EINVAL,
2533                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2534                                                   NULL,
2535                                                   "more than one encap action");
2536                 if (!attr->transfer && priv->representor)
2537                         return rte_flow_error_set
2538                                         (error, ENOTSUP,
2539                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2540                                          "encap action for VF representor "
2541                                          "not supported on NIC table");
2542                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2543                 ++(*actions_n);
2544         }
2545         return 0;
2546 }
2547
2548 /**
2549  * Match encap_decap resource.
2550  *
2551  * @param entry
2552  *   Pointer to exist resource entry object.
2553  * @param ctx
2554  *   Pointer to new encap_decap resource.
2555  *
2556  * @return
2557  *   0 on matching, -1 otherwise.
2558  */
2559 static int
2560 flow_dv_encap_decap_resource_match(struct mlx5_hlist_entry *entry, void *ctx)
2561 {
2562         struct mlx5_flow_dv_encap_decap_resource *resource;
2563         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2564
2565         resource = (struct mlx5_flow_dv_encap_decap_resource *)ctx;
2566         cache_resource = container_of(entry,
2567                                       struct mlx5_flow_dv_encap_decap_resource,
2568                                       entry);
2569         if (resource->entry.key == cache_resource->entry.key &&
2570             resource->reformat_type == cache_resource->reformat_type &&
2571             resource->ft_type == cache_resource->ft_type &&
2572             resource->flags == cache_resource->flags &&
2573             resource->size == cache_resource->size &&
2574             !memcmp((const void *)resource->buf,
2575                     (const void *)cache_resource->buf,
2576                     resource->size))
2577                 return 0;
2578         return -1;
2579 }
2580
2581 /**
2582  * Find existing encap/decap resource or create and register a new one.
2583  *
2584  * @param[in, out] dev
2585  *   Pointer to rte_eth_dev structure.
2586  * @param[in, out] resource
2587  *   Pointer to encap/decap resource.
2588  * @parm[in, out] dev_flow
2589  *   Pointer to the dev_flow.
2590  * @param[out] error
2591  *   pointer to error structure.
2592  *
2593  * @return
2594  *   0 on success otherwise -errno and errno is set.
2595  */
2596 static int
2597 flow_dv_encap_decap_resource_register
2598                         (struct rte_eth_dev *dev,
2599                          struct mlx5_flow_dv_encap_decap_resource *resource,
2600                          struct mlx5_flow *dev_flow,
2601                          struct rte_flow_error *error)
2602 {
2603         struct mlx5_priv *priv = dev->data->dev_private;
2604         struct mlx5_dev_ctx_shared *sh = priv->sh;
2605         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2606         struct mlx5dv_dr_domain *domain;
2607         struct mlx5_hlist_entry *entry;
2608         union mlx5_flow_encap_decap_key encap_decap_key = {
2609                 {
2610                         .ft_type = resource->ft_type,
2611                         .refmt_type = resource->reformat_type,
2612                         .buf_size = resource->size,
2613                         .table_level = !!dev_flow->dv.group,
2614                         .cksum = 0,
2615                 }
2616         };
2617         int ret;
2618
2619         resource->flags = dev_flow->dv.group ? 0 : 1;
2620         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2621                 domain = sh->fdb_domain;
2622         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2623                 domain = sh->rx_domain;
2624         else
2625                 domain = sh->tx_domain;
2626         encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2627                                                 resource->size, 0);
2628         resource->entry.key = encap_decap_key.v64;
2629         /* Lookup a matching resource from cache. */
2630         entry = mlx5_hlist_lookup_ex(sh->encaps_decaps, resource->entry.key,
2631                                      flow_dv_encap_decap_resource_match,
2632                                      (void *)resource);
2633         if (entry) {
2634                 cache_resource = container_of(entry,
2635                         struct mlx5_flow_dv_encap_decap_resource, entry);
2636                 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2637                         (void *)cache_resource,
2638                         rte_atomic32_read(&cache_resource->refcnt));
2639                 rte_atomic32_inc(&cache_resource->refcnt);
2640                 dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx;
2641                 dev_flow->dv.encap_decap = cache_resource;
2642                 return 0;
2643         }
2644         /* Register new encap/decap resource. */
2645         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2646                                        &dev_flow->handle->dvh.rix_encap_decap);
2647         if (!cache_resource)
2648                 return rte_flow_error_set(error, ENOMEM,
2649                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2650                                           "cannot allocate resource memory");
2651         *cache_resource = *resource;
2652         cache_resource->idx = dev_flow->handle->dvh.rix_encap_decap;
2653         ret = mlx5_flow_os_create_flow_action_packet_reformat
2654                                         (sh->ctx, domain, cache_resource,
2655                                          &cache_resource->action);
2656         if (ret) {
2657                 mlx5_free(cache_resource);
2658                 return rte_flow_error_set(error, ENOMEM,
2659                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2660                                           NULL, "cannot create action");
2661         }
2662         rte_atomic32_init(&cache_resource->refcnt);
2663         rte_atomic32_inc(&cache_resource->refcnt);
2664         if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry,
2665                                  flow_dv_encap_decap_resource_match,
2666                                  (void *)cache_resource)) {
2667                 claim_zero(mlx5_flow_os_destroy_flow_action
2668                                                 (cache_resource->action));
2669                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2670                                 cache_resource->idx);
2671                 return rte_flow_error_set(error, EEXIST,
2672                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2673                                           NULL, "action exist");
2674         }
2675         dev_flow->dv.encap_decap = cache_resource;
2676         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2677                 (void *)cache_resource,
2678                 rte_atomic32_read(&cache_resource->refcnt));
2679         return 0;
2680 }
2681
2682 /**
2683  * Find existing table jump resource or create and register a new one.
2684  *
2685  * @param[in, out] dev
2686  *   Pointer to rte_eth_dev structure.
2687  * @param[in, out] tbl
2688  *   Pointer to flow table resource.
2689  * @parm[in, out] dev_flow
2690  *   Pointer to the dev_flow.
2691  * @param[out] error
2692  *   pointer to error structure.
2693  *
2694  * @return
2695  *   0 on success otherwise -errno and errno is set.
2696  */
2697 static int
2698 flow_dv_jump_tbl_resource_register
2699                         (struct rte_eth_dev *dev __rte_unused,
2700                          struct mlx5_flow_tbl_resource *tbl,
2701                          struct mlx5_flow *dev_flow,
2702                          struct rte_flow_error *error)
2703 {
2704         struct mlx5_flow_tbl_data_entry *tbl_data =
2705                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2706         int cnt, ret;
2707
2708         MLX5_ASSERT(tbl);
2709         cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
2710         if (!cnt) {
2711                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
2712                                 (tbl->obj, &tbl_data->jump.action);
2713                 if (ret)
2714                         return rte_flow_error_set(error, ENOMEM,
2715                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2716                                         NULL, "cannot create jump action");
2717                 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2718                         (void *)&tbl_data->jump, cnt);
2719         } else {
2720                 /* old jump should not make the table ref++. */
2721                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
2722                 MLX5_ASSERT(tbl_data->jump.action);
2723                 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2724                         (void *)&tbl_data->jump, cnt);
2725         }
2726         rte_atomic32_inc(&tbl_data->jump.refcnt);
2727         dev_flow->handle->rix_jump = tbl_data->idx;
2728         dev_flow->dv.jump = &tbl_data->jump;
2729         return 0;
2730 }
2731
2732 /**
2733  * Find existing default miss resource or create and register a new one.
2734  *
2735  * @param[in, out] dev
2736  *   Pointer to rte_eth_dev structure.
2737  * @param[out] error
2738  *   pointer to error structure.
2739  *
2740  * @return
2741  *   0 on success otherwise -errno and errno is set.
2742  */
2743 static int
2744 flow_dv_default_miss_resource_register(struct rte_eth_dev *dev,
2745                 struct rte_flow_error *error)
2746 {
2747         struct mlx5_priv *priv = dev->data->dev_private;
2748         struct mlx5_dev_ctx_shared *sh = priv->sh;
2749         struct mlx5_flow_default_miss_resource *cache_resource =
2750                         &sh->default_miss;
2751         int cnt = rte_atomic32_read(&cache_resource->refcnt);
2752
2753         if (!cnt) {
2754                 MLX5_ASSERT(cache_resource->action);
2755                 cache_resource->action =
2756                 mlx5_glue->dr_create_flow_action_default_miss();
2757                 if (!cache_resource->action)
2758                         return rte_flow_error_set(error, ENOMEM,
2759                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2760                                         "cannot create default miss action");
2761                 DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++",
2762                                 (void *)cache_resource->action, cnt);
2763         }
2764         rte_atomic32_inc(&cache_resource->refcnt);
2765         return 0;
2766 }
2767
2768 /**
2769  * Find existing table port ID resource or create and register a new one.
2770  *
2771  * @param[in, out] dev
2772  *   Pointer to rte_eth_dev structure.
2773  * @param[in, out] resource
2774  *   Pointer to port ID action resource.
2775  * @parm[in, out] dev_flow
2776  *   Pointer to the dev_flow.
2777  * @param[out] error
2778  *   pointer to error structure.
2779  *
2780  * @return
2781  *   0 on success otherwise -errno and errno is set.
2782  */
2783 static int
2784 flow_dv_port_id_action_resource_register
2785                         (struct rte_eth_dev *dev,
2786                          struct mlx5_flow_dv_port_id_action_resource *resource,
2787                          struct mlx5_flow *dev_flow,
2788                          struct rte_flow_error *error)
2789 {
2790         struct mlx5_priv *priv = dev->data->dev_private;
2791         struct mlx5_dev_ctx_shared *sh = priv->sh;
2792         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2793         uint32_t idx = 0;
2794         int ret;
2795
2796         /* Lookup a matching resource from cache. */
2797         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
2798                       idx, cache_resource, next) {
2799                 if (resource->port_id == cache_resource->port_id) {
2800                         DRV_LOG(DEBUG, "port id action resource resource %p: "
2801                                 "refcnt %d++",
2802                                 (void *)cache_resource,
2803                                 rte_atomic32_read(&cache_resource->refcnt));
2804                         rte_atomic32_inc(&cache_resource->refcnt);
2805                         dev_flow->handle->rix_port_id_action = idx;
2806                         dev_flow->dv.port_id_action = cache_resource;
2807                         return 0;
2808                 }
2809         }
2810         /* Register new port id action resource. */
2811         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
2812                                        &dev_flow->handle->rix_port_id_action);
2813         if (!cache_resource)
2814                 return rte_flow_error_set(error, ENOMEM,
2815                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2816                                           "cannot allocate resource memory");
2817         *cache_resource = *resource;
2818         ret = mlx5_flow_os_create_flow_action_dest_port
2819                                 (priv->sh->fdb_domain, resource->port_id,
2820                                  &cache_resource->action);
2821         if (ret) {
2822                 mlx5_free(cache_resource);
2823                 return rte_flow_error_set(error, ENOMEM,
2824                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2825                                           NULL, "cannot create action");
2826         }
2827         rte_atomic32_init(&cache_resource->refcnt);
2828         rte_atomic32_inc(&cache_resource->refcnt);
2829         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
2830                      dev_flow->handle->rix_port_id_action, cache_resource,
2831                      next);
2832         dev_flow->dv.port_id_action = cache_resource;
2833         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
2834                 (void *)cache_resource,
2835                 rte_atomic32_read(&cache_resource->refcnt));
2836         return 0;
2837 }
2838
2839 /**
2840  * Find existing push vlan resource or create and register a new one.
2841  *
2842  * @param [in, out] dev
2843  *   Pointer to rte_eth_dev structure.
2844  * @param[in, out] resource
2845  *   Pointer to port ID action resource.
2846  * @parm[in, out] dev_flow
2847  *   Pointer to the dev_flow.
2848  * @param[out] error
2849  *   pointer to error structure.
2850  *
2851  * @return
2852  *   0 on success otherwise -errno and errno is set.
2853  */
2854 static int
2855 flow_dv_push_vlan_action_resource_register
2856                        (struct rte_eth_dev *dev,
2857                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
2858                         struct mlx5_flow *dev_flow,
2859                         struct rte_flow_error *error)
2860 {
2861         struct mlx5_priv *priv = dev->data->dev_private;
2862         struct mlx5_dev_ctx_shared *sh = priv->sh;
2863         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
2864         struct mlx5dv_dr_domain *domain;
2865         uint32_t idx = 0;
2866         int ret;
2867
2868         /* Lookup a matching resource from cache. */
2869         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2870                       sh->push_vlan_action_list, idx, cache_resource, next) {
2871                 if (resource->vlan_tag == cache_resource->vlan_tag &&
2872                     resource->ft_type == cache_resource->ft_type) {
2873                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
2874                                 "refcnt %d++",
2875                                 (void *)cache_resource,
2876                                 rte_atomic32_read(&cache_resource->refcnt));
2877                         rte_atomic32_inc(&cache_resource->refcnt);
2878                         dev_flow->handle->dvh.rix_push_vlan = idx;
2879                         dev_flow->dv.push_vlan_res = cache_resource;
2880                         return 0;
2881                 }
2882         }
2883         /* Register new push_vlan action resource. */
2884         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2885                                        &dev_flow->handle->dvh.rix_push_vlan);
2886         if (!cache_resource)
2887                 return rte_flow_error_set(error, ENOMEM,
2888                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2889                                           "cannot allocate resource memory");
2890         *cache_resource = *resource;
2891         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2892                 domain = sh->fdb_domain;
2893         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2894                 domain = sh->rx_domain;
2895         else
2896                 domain = sh->tx_domain;
2897         ret = mlx5_flow_os_create_flow_action_push_vlan
2898                                         (domain, resource->vlan_tag,
2899                                          &cache_resource->action);
2900         if (ret) {
2901                 mlx5_free(cache_resource);
2902                 return rte_flow_error_set(error, ENOMEM,
2903                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2904                                           NULL, "cannot create action");
2905         }
2906         rte_atomic32_init(&cache_resource->refcnt);
2907         rte_atomic32_inc(&cache_resource->refcnt);
2908         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2909                      &sh->push_vlan_action_list,
2910                      dev_flow->handle->dvh.rix_push_vlan,
2911                      cache_resource, next);
2912         dev_flow->dv.push_vlan_res = cache_resource;
2913         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
2914                 (void *)cache_resource,
2915                 rte_atomic32_read(&cache_resource->refcnt));
2916         return 0;
2917 }
2918 /**
2919  * Get the size of specific rte_flow_item_type hdr size
2920  *
2921  * @param[in] item_type
2922  *   Tested rte_flow_item_type.
2923  *
2924  * @return
2925  *   sizeof struct item_type, 0 if void or irrelevant.
2926  */
2927 static size_t
2928 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
2929 {
2930         size_t retval;
2931
2932         switch (item_type) {
2933         case RTE_FLOW_ITEM_TYPE_ETH:
2934                 retval = sizeof(struct rte_ether_hdr);
2935                 break;
2936         case RTE_FLOW_ITEM_TYPE_VLAN:
2937                 retval = sizeof(struct rte_vlan_hdr);
2938                 break;
2939         case RTE_FLOW_ITEM_TYPE_IPV4:
2940                 retval = sizeof(struct rte_ipv4_hdr);
2941                 break;
2942         case RTE_FLOW_ITEM_TYPE_IPV6:
2943                 retval = sizeof(struct rte_ipv6_hdr);
2944                 break;
2945         case RTE_FLOW_ITEM_TYPE_UDP:
2946                 retval = sizeof(struct rte_udp_hdr);
2947                 break;
2948         case RTE_FLOW_ITEM_TYPE_TCP:
2949                 retval = sizeof(struct rte_tcp_hdr);
2950                 break;
2951         case RTE_FLOW_ITEM_TYPE_VXLAN:
2952         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2953                 retval = sizeof(struct rte_vxlan_hdr);
2954                 break;
2955         case RTE_FLOW_ITEM_TYPE_GRE:
2956         case RTE_FLOW_ITEM_TYPE_NVGRE:
2957                 retval = sizeof(struct rte_gre_hdr);
2958                 break;
2959         case RTE_FLOW_ITEM_TYPE_MPLS:
2960                 retval = sizeof(struct rte_mpls_hdr);
2961                 break;
2962         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
2963         default:
2964                 retval = 0;
2965                 break;
2966         }
2967         return retval;
2968 }
2969
2970 #define MLX5_ENCAP_IPV4_VERSION         0x40
2971 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
2972 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
2973 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
2974 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
2975 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
2976 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
2977
2978 /**
2979  * Convert the encap action data from list of rte_flow_item to raw buffer
2980  *
2981  * @param[in] items
2982  *   Pointer to rte_flow_item objects list.
2983  * @param[out] buf
2984  *   Pointer to the output buffer.
2985  * @param[out] size
2986  *   Pointer to the output buffer size.
2987  * @param[out] error
2988  *   Pointer to the error structure.
2989  *
2990  * @return
2991  *   0 on success, a negative errno value otherwise and rte_errno is set.
2992  */
2993 static int
2994 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2995                            size_t *size, struct rte_flow_error *error)
2996 {
2997         struct rte_ether_hdr *eth = NULL;
2998         struct rte_vlan_hdr *vlan = NULL;
2999         struct rte_ipv4_hdr *ipv4 = NULL;
3000         struct rte_ipv6_hdr *ipv6 = NULL;
3001         struct rte_udp_hdr *udp = NULL;
3002         struct rte_vxlan_hdr *vxlan = NULL;
3003         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3004         struct rte_gre_hdr *gre = NULL;
3005         size_t len;
3006         size_t temp_size = 0;
3007
3008         if (!items)
3009                 return rte_flow_error_set(error, EINVAL,
3010                                           RTE_FLOW_ERROR_TYPE_ACTION,
3011                                           NULL, "invalid empty data");
3012         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3013                 len = flow_dv_get_item_hdr_len(items->type);
3014                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3015                         return rte_flow_error_set(error, EINVAL,
3016                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3017                                                   (void *)items->type,
3018                                                   "items total size is too big"
3019                                                   " for encap action");
3020                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3021                 switch (items->type) {
3022                 case RTE_FLOW_ITEM_TYPE_ETH:
3023                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3024                         break;
3025                 case RTE_FLOW_ITEM_TYPE_VLAN:
3026                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3027                         if (!eth)
3028                                 return rte_flow_error_set(error, EINVAL,
3029                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3030                                                 (void *)items->type,
3031                                                 "eth header not found");
3032                         if (!eth->ether_type)
3033                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3034                         break;
3035                 case RTE_FLOW_ITEM_TYPE_IPV4:
3036                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3037                         if (!vlan && !eth)
3038                                 return rte_flow_error_set(error, EINVAL,
3039                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3040                                                 (void *)items->type,
3041                                                 "neither eth nor vlan"
3042                                                 " header found");
3043                         if (vlan && !vlan->eth_proto)
3044                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3045                         else if (eth && !eth->ether_type)
3046                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3047                         if (!ipv4->version_ihl)
3048                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3049                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3050                         if (!ipv4->time_to_live)
3051                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3052                         break;
3053                 case RTE_FLOW_ITEM_TYPE_IPV6:
3054                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3055                         if (!vlan && !eth)
3056                                 return rte_flow_error_set(error, EINVAL,
3057                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3058                                                 (void *)items->type,
3059                                                 "neither eth nor vlan"
3060                                                 " header found");
3061                         if (vlan && !vlan->eth_proto)
3062                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3063                         else if (eth && !eth->ether_type)
3064                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3065                         if (!ipv6->vtc_flow)
3066                                 ipv6->vtc_flow =
3067                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3068                         if (!ipv6->hop_limits)
3069                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3070                         break;
3071                 case RTE_FLOW_ITEM_TYPE_UDP:
3072                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3073                         if (!ipv4 && !ipv6)
3074                                 return rte_flow_error_set(error, EINVAL,
3075                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3076                                                 (void *)items->type,
3077                                                 "ip header not found");
3078                         if (ipv4 && !ipv4->next_proto_id)
3079                                 ipv4->next_proto_id = IPPROTO_UDP;
3080                         else if (ipv6 && !ipv6->proto)
3081                                 ipv6->proto = IPPROTO_UDP;
3082                         break;
3083                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3084                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3085                         if (!udp)
3086                                 return rte_flow_error_set(error, EINVAL,
3087                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3088                                                 (void *)items->type,
3089                                                 "udp header not found");
3090                         if (!udp->dst_port)
3091                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3092                         if (!vxlan->vx_flags)
3093                                 vxlan->vx_flags =
3094                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3095                         break;
3096                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3097                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3098                         if (!udp)
3099                                 return rte_flow_error_set(error, EINVAL,
3100                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3101                                                 (void *)items->type,
3102                                                 "udp header not found");
3103                         if (!vxlan_gpe->proto)
3104                                 return rte_flow_error_set(error, EINVAL,
3105                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3106                                                 (void *)items->type,
3107                                                 "next protocol not found");
3108                         if (!udp->dst_port)
3109                                 udp->dst_port =
3110                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3111                         if (!vxlan_gpe->vx_flags)
3112                                 vxlan_gpe->vx_flags =
3113                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3114                         break;
3115                 case RTE_FLOW_ITEM_TYPE_GRE:
3116                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3117                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3118                         if (!gre->proto)
3119                                 return rte_flow_error_set(error, EINVAL,
3120                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3121                                                 (void *)items->type,
3122                                                 "next protocol not found");
3123                         if (!ipv4 && !ipv6)
3124                                 return rte_flow_error_set(error, EINVAL,
3125                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3126                                                 (void *)items->type,
3127                                                 "ip header not found");
3128                         if (ipv4 && !ipv4->next_proto_id)
3129                                 ipv4->next_proto_id = IPPROTO_GRE;
3130                         else if (ipv6 && !ipv6->proto)
3131                                 ipv6->proto = IPPROTO_GRE;
3132                         break;
3133                 case RTE_FLOW_ITEM_TYPE_VOID:
3134                         break;
3135                 default:
3136                         return rte_flow_error_set(error, EINVAL,
3137                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3138                                                   (void *)items->type,
3139                                                   "unsupported item type");
3140                         break;
3141                 }
3142                 temp_size += len;
3143         }
3144         *size = temp_size;
3145         return 0;
3146 }
3147
3148 static int
3149 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3150 {
3151         struct rte_ether_hdr *eth = NULL;
3152         struct rte_vlan_hdr *vlan = NULL;
3153         struct rte_ipv6_hdr *ipv6 = NULL;
3154         struct rte_udp_hdr *udp = NULL;
3155         char *next_hdr;
3156         uint16_t proto;
3157
3158         eth = (struct rte_ether_hdr *)data;
3159         next_hdr = (char *)(eth + 1);
3160         proto = RTE_BE16(eth->ether_type);
3161
3162         /* VLAN skipping */
3163         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3164                 vlan = (struct rte_vlan_hdr *)next_hdr;
3165                 proto = RTE_BE16(vlan->eth_proto);
3166                 next_hdr += sizeof(struct rte_vlan_hdr);
3167         }
3168
3169         /* HW calculates IPv4 csum. no need to proceed */
3170         if (proto == RTE_ETHER_TYPE_IPV4)
3171                 return 0;
3172
3173         /* non IPv4/IPv6 header. not supported */
3174         if (proto != RTE_ETHER_TYPE_IPV6) {
3175                 return rte_flow_error_set(error, ENOTSUP,
3176                                           RTE_FLOW_ERROR_TYPE_ACTION,
3177                                           NULL, "Cannot offload non IPv4/IPv6");
3178         }
3179
3180         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3181
3182         /* ignore non UDP */
3183         if (ipv6->proto != IPPROTO_UDP)
3184                 return 0;
3185
3186         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3187         udp->dgram_cksum = 0;
3188
3189         return 0;
3190 }
3191
3192 /**
3193  * Convert L2 encap action to DV specification.
3194  *
3195  * @param[in] dev
3196  *   Pointer to rte_eth_dev structure.
3197  * @param[in] action
3198  *   Pointer to action structure.
3199  * @param[in, out] dev_flow
3200  *   Pointer to the mlx5_flow.
3201  * @param[in] transfer
3202  *   Mark if the flow is E-Switch flow.
3203  * @param[out] error
3204  *   Pointer to the error structure.
3205  *
3206  * @return
3207  *   0 on success, a negative errno value otherwise and rte_errno is set.
3208  */
3209 static int
3210 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3211                                const struct rte_flow_action *action,
3212                                struct mlx5_flow *dev_flow,
3213                                uint8_t transfer,
3214                                struct rte_flow_error *error)
3215 {
3216         const struct rte_flow_item *encap_data;
3217         const struct rte_flow_action_raw_encap *raw_encap_data;
3218         struct mlx5_flow_dv_encap_decap_resource res = {
3219                 .reformat_type =
3220                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3221                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3222                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3223         };
3224
3225         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3226                 raw_encap_data =
3227                         (const struct rte_flow_action_raw_encap *)action->conf;
3228                 res.size = raw_encap_data->size;
3229                 memcpy(res.buf, raw_encap_data->data, res.size);
3230         } else {
3231                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3232                         encap_data =
3233                                 ((const struct rte_flow_action_vxlan_encap *)
3234                                                 action->conf)->definition;
3235                 else
3236                         encap_data =
3237                                 ((const struct rte_flow_action_nvgre_encap *)
3238                                                 action->conf)->definition;
3239                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3240                                                &res.size, error))
3241                         return -rte_errno;
3242         }
3243         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3244                 return -rte_errno;
3245         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3246                 return rte_flow_error_set(error, EINVAL,
3247                                           RTE_FLOW_ERROR_TYPE_ACTION,
3248                                           NULL, "can't create L2 encap action");
3249         return 0;
3250 }
3251
3252 /**
3253  * Convert L2 decap action to DV specification.
3254  *
3255  * @param[in] dev
3256  *   Pointer to rte_eth_dev structure.
3257  * @param[in, out] dev_flow
3258  *   Pointer to the mlx5_flow.
3259  * @param[in] transfer
3260  *   Mark if the flow is E-Switch flow.
3261  * @param[out] error
3262  *   Pointer to the error structure.
3263  *
3264  * @return
3265  *   0 on success, a negative errno value otherwise and rte_errno is set.
3266  */
3267 static int
3268 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3269                                struct mlx5_flow *dev_flow,
3270                                uint8_t transfer,
3271                                struct rte_flow_error *error)
3272 {
3273         struct mlx5_flow_dv_encap_decap_resource res = {
3274                 .size = 0,
3275                 .reformat_type =
3276                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3277                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3278                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3279         };
3280
3281         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3282                 return rte_flow_error_set(error, EINVAL,
3283                                           RTE_FLOW_ERROR_TYPE_ACTION,
3284                                           NULL, "can't create L2 decap action");
3285         return 0;
3286 }
3287
3288 /**
3289  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3290  *
3291  * @param[in] dev
3292  *   Pointer to rte_eth_dev structure.
3293  * @param[in] action
3294  *   Pointer to action structure.
3295  * @param[in, out] dev_flow
3296  *   Pointer to the mlx5_flow.
3297  * @param[in] attr
3298  *   Pointer to the flow attributes.
3299  * @param[out] error
3300  *   Pointer to the error structure.
3301  *
3302  * @return
3303  *   0 on success, a negative errno value otherwise and rte_errno is set.
3304  */
3305 static int
3306 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3307                                 const struct rte_flow_action *action,
3308                                 struct mlx5_flow *dev_flow,
3309                                 const struct rte_flow_attr *attr,
3310                                 struct rte_flow_error *error)
3311 {
3312         const struct rte_flow_action_raw_encap *encap_data;
3313         struct mlx5_flow_dv_encap_decap_resource res;
3314
3315         memset(&res, 0, sizeof(res));
3316         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3317         res.size = encap_data->size;
3318         memcpy(res.buf, encap_data->data, res.size);
3319         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3320                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3321                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3322         if (attr->transfer)
3323                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3324         else
3325                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3326                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3327         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3328                 return rte_flow_error_set(error, EINVAL,
3329                                           RTE_FLOW_ERROR_TYPE_ACTION,
3330                                           NULL, "can't create encap action");
3331         return 0;
3332 }
3333
3334 /**
3335  * Create action push VLAN.
3336  *
3337  * @param[in] dev
3338  *   Pointer to rte_eth_dev structure.
3339  * @param[in] attr
3340  *   Pointer to the flow attributes.
3341  * @param[in] vlan
3342  *   Pointer to the vlan to push to the Ethernet header.
3343  * @param[in, out] dev_flow
3344  *   Pointer to the mlx5_flow.
3345  * @param[out] error
3346  *   Pointer to the error structure.
3347  *
3348  * @return
3349  *   0 on success, a negative errno value otherwise and rte_errno is set.
3350  */
3351 static int
3352 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3353                                 const struct rte_flow_attr *attr,
3354                                 const struct rte_vlan_hdr *vlan,
3355                                 struct mlx5_flow *dev_flow,
3356                                 struct rte_flow_error *error)
3357 {
3358         struct mlx5_flow_dv_push_vlan_action_resource res;
3359
3360         memset(&res, 0, sizeof(res));
3361         res.vlan_tag =
3362                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3363                                  vlan->vlan_tci);
3364         if (attr->transfer)
3365                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3366         else
3367                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3368                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3369         return flow_dv_push_vlan_action_resource_register
3370                                             (dev, &res, dev_flow, error);
3371 }
3372
3373 /**
3374  * Validate the modify-header actions.
3375  *
3376  * @param[in] action_flags
3377  *   Holds the actions detected until now.
3378  * @param[in] action
3379  *   Pointer to the modify action.
3380  * @param[out] error
3381  *   Pointer to error structure.
3382  *
3383  * @return
3384  *   0 on success, a negative errno value otherwise and rte_errno is set.
3385  */
3386 static int
3387 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3388                                    const struct rte_flow_action *action,
3389                                    struct rte_flow_error *error)
3390 {
3391         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3392                 return rte_flow_error_set(error, EINVAL,
3393                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3394                                           NULL, "action configuration not set");
3395         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3396                 return rte_flow_error_set(error, EINVAL,
3397                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3398                                           "can't have encap action before"
3399                                           " modify action");
3400         return 0;
3401 }
3402
3403 /**
3404  * Validate the modify-header MAC address actions.
3405  *
3406  * @param[in] action_flags
3407  *   Holds the actions detected until now.
3408  * @param[in] action
3409  *   Pointer to the modify action.
3410  * @param[in] item_flags
3411  *   Holds the items detected.
3412  * @param[out] error
3413  *   Pointer to error structure.
3414  *
3415  * @return
3416  *   0 on success, a negative errno value otherwise and rte_errno is set.
3417  */
3418 static int
3419 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3420                                    const struct rte_flow_action *action,
3421                                    const uint64_t item_flags,
3422                                    struct rte_flow_error *error)
3423 {
3424         int ret = 0;
3425
3426         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3427         if (!ret) {
3428                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3429                         return rte_flow_error_set(error, EINVAL,
3430                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3431                                                   NULL,
3432                                                   "no L2 item in pattern");
3433         }
3434         return ret;
3435 }
3436
3437 /**
3438  * Validate the modify-header IPv4 address actions.
3439  *
3440  * @param[in] action_flags
3441  *   Holds the actions detected until now.
3442  * @param[in] action
3443  *   Pointer to the modify action.
3444  * @param[in] item_flags
3445  *   Holds the items detected.
3446  * @param[out] error
3447  *   Pointer to error structure.
3448  *
3449  * @return
3450  *   0 on success, a negative errno value otherwise and rte_errno is set.
3451  */
3452 static int
3453 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3454                                     const struct rte_flow_action *action,
3455                                     const uint64_t item_flags,
3456                                     struct rte_flow_error *error)
3457 {
3458         int ret = 0;
3459         uint64_t layer;
3460
3461         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3462         if (!ret) {
3463                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3464                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3465                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3466                 if (!(item_flags & layer))
3467                         return rte_flow_error_set(error, EINVAL,
3468                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3469                                                   NULL,
3470                                                   "no ipv4 item in pattern");
3471         }
3472         return ret;
3473 }
3474
3475 /**
3476  * Validate the modify-header IPv6 address actions.
3477  *
3478  * @param[in] action_flags
3479  *   Holds the actions detected until now.
3480  * @param[in] action
3481  *   Pointer to the modify action.
3482  * @param[in] item_flags
3483  *   Holds the items detected.
3484  * @param[out] error
3485  *   Pointer to error structure.
3486  *
3487  * @return
3488  *   0 on success, a negative errno value otherwise and rte_errno is set.
3489  */
3490 static int
3491 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3492                                     const struct rte_flow_action *action,
3493                                     const uint64_t item_flags,
3494                                     struct rte_flow_error *error)
3495 {
3496         int ret = 0;
3497         uint64_t layer;
3498
3499         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3500         if (!ret) {
3501                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3502                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3503                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3504                 if (!(item_flags & layer))
3505                         return rte_flow_error_set(error, EINVAL,
3506                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3507                                                   NULL,
3508                                                   "no ipv6 item in pattern");
3509         }
3510         return ret;
3511 }
3512
3513 /**
3514  * Validate the modify-header TP actions.
3515  *
3516  * @param[in] action_flags
3517  *   Holds the actions detected until now.
3518  * @param[in] action
3519  *   Pointer to the modify action.
3520  * @param[in] item_flags
3521  *   Holds the items detected.
3522  * @param[out] error
3523  *   Pointer to error structure.
3524  *
3525  * @return
3526  *   0 on success, a negative errno value otherwise and rte_errno is set.
3527  */
3528 static int
3529 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3530                                   const struct rte_flow_action *action,
3531                                   const uint64_t item_flags,
3532                                   struct rte_flow_error *error)
3533 {
3534         int ret = 0;
3535         uint64_t layer;
3536
3537         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3538         if (!ret) {
3539                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3540                                  MLX5_FLOW_LAYER_INNER_L4 :
3541                                  MLX5_FLOW_LAYER_OUTER_L4;
3542                 if (!(item_flags & layer))
3543                         return rte_flow_error_set(error, EINVAL,
3544                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3545                                                   NULL, "no transport layer "
3546                                                   "in pattern");
3547         }
3548         return ret;
3549 }
3550
3551 /**
3552  * Validate the modify-header actions of increment/decrement
3553  * TCP Sequence-number.
3554  *
3555  * @param[in] action_flags
3556  *   Holds the actions detected until now.
3557  * @param[in] action
3558  *   Pointer to the modify action.
3559  * @param[in] item_flags
3560  *   Holds the items detected.
3561  * @param[out] error
3562  *   Pointer to error structure.
3563  *
3564  * @return
3565  *   0 on success, a negative errno value otherwise and rte_errno is set.
3566  */
3567 static int
3568 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3569                                        const struct rte_flow_action *action,
3570                                        const uint64_t item_flags,
3571                                        struct rte_flow_error *error)
3572 {
3573         int ret = 0;
3574         uint64_t layer;
3575
3576         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3577         if (!ret) {
3578                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3579                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3580                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3581                 if (!(item_flags & layer))
3582                         return rte_flow_error_set(error, EINVAL,
3583                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3584                                                   NULL, "no TCP item in"
3585                                                   " pattern");
3586                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3587                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3588                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3589                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3590                         return rte_flow_error_set(error, EINVAL,
3591                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3592                                                   NULL,
3593                                                   "cannot decrease and increase"
3594                                                   " TCP sequence number"
3595                                                   " at the same time");
3596         }
3597         return ret;
3598 }
3599
3600 /**
3601  * Validate the modify-header actions of increment/decrement
3602  * TCP Acknowledgment number.
3603  *
3604  * @param[in] action_flags
3605  *   Holds the actions detected until now.
3606  * @param[in] action
3607  *   Pointer to the modify action.
3608  * @param[in] item_flags
3609  *   Holds the items detected.
3610  * @param[out] error
3611  *   Pointer to error structure.
3612  *
3613  * @return
3614  *   0 on success, a negative errno value otherwise and rte_errno is set.
3615  */
3616 static int
3617 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3618                                        const struct rte_flow_action *action,
3619                                        const uint64_t item_flags,
3620                                        struct rte_flow_error *error)
3621 {
3622         int ret = 0;
3623         uint64_t layer;
3624
3625         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3626         if (!ret) {
3627                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3628                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3629                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3630                 if (!(item_flags & layer))
3631                         return rte_flow_error_set(error, EINVAL,
3632                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3633                                                   NULL, "no TCP item in"
3634                                                   " pattern");
3635                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3636                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3637                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3638                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3639                         return rte_flow_error_set(error, EINVAL,
3640                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3641                                                   NULL,
3642                                                   "cannot decrease and increase"
3643                                                   " TCP acknowledgment number"
3644                                                   " at the same time");
3645         }
3646         return ret;
3647 }
3648
3649 /**
3650  * Validate the modify-header TTL actions.
3651  *
3652  * @param[in] action_flags
3653  *   Holds the actions detected until now.
3654  * @param[in] action
3655  *   Pointer to the modify action.
3656  * @param[in] item_flags
3657  *   Holds the items detected.
3658  * @param[out] error
3659  *   Pointer to error structure.
3660  *
3661  * @return
3662  *   0 on success, a negative errno value otherwise and rte_errno is set.
3663  */
3664 static int
3665 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3666                                    const struct rte_flow_action *action,
3667                                    const uint64_t item_flags,
3668                                    struct rte_flow_error *error)
3669 {
3670         int ret = 0;
3671         uint64_t layer;
3672
3673         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3674         if (!ret) {
3675                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3676                                  MLX5_FLOW_LAYER_INNER_L3 :
3677                                  MLX5_FLOW_LAYER_OUTER_L3;
3678                 if (!(item_flags & layer))
3679                         return rte_flow_error_set(error, EINVAL,
3680                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3681                                                   NULL,
3682                                                   "no IP protocol in pattern");
3683         }
3684         return ret;
3685 }
3686
3687 /**
3688  * Validate jump action.
3689  *
3690  * @param[in] action
3691  *   Pointer to the jump action.
3692  * @param[in] action_flags
3693  *   Holds the actions detected until now.
3694  * @param[in] attributes
3695  *   Pointer to flow attributes
3696  * @param[in] external
3697  *   Action belongs to flow rule created by request external to PMD.
3698  * @param[out] error
3699  *   Pointer to error structure.
3700  *
3701  * @return
3702  *   0 on success, a negative errno value otherwise and rte_errno is set.
3703  */
3704 static int
3705 flow_dv_validate_action_jump(const struct rte_flow_action *action,
3706                              uint64_t action_flags,
3707                              const struct rte_flow_attr *attributes,
3708                              bool external, struct rte_flow_error *error)
3709 {
3710         uint32_t target_group, table;
3711         int ret = 0;
3712
3713         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3714                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3715                 return rte_flow_error_set(error, EINVAL,
3716                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3717                                           "can't have 2 fate actions in"
3718                                           " same flow");
3719         if (action_flags & MLX5_FLOW_ACTION_METER)
3720                 return rte_flow_error_set(error, ENOTSUP,
3721                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3722                                           "jump with meter not support");
3723         if (!action->conf)
3724                 return rte_flow_error_set(error, EINVAL,
3725                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3726                                           NULL, "action configuration not set");
3727         target_group =
3728                 ((const struct rte_flow_action_jump *)action->conf)->group;
3729         ret = mlx5_flow_group_to_table(attributes, external, target_group,
3730                                        true, &table, error);
3731         if (ret)
3732                 return ret;
3733         if (attributes->group == target_group)
3734                 return rte_flow_error_set(error, EINVAL,
3735                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3736                                           "target group must be other than"
3737                                           " the current flow group");
3738         return 0;
3739 }
3740
3741 /*
3742  * Validate the port_id action.
3743  *
3744  * @param[in] dev
3745  *   Pointer to rte_eth_dev structure.
3746  * @param[in] action_flags
3747  *   Bit-fields that holds the actions detected until now.
3748  * @param[in] action
3749  *   Port_id RTE action structure.
3750  * @param[in] attr
3751  *   Attributes of flow that includes this action.
3752  * @param[out] error
3753  *   Pointer to error structure.
3754  *
3755  * @return
3756  *   0 on success, a negative errno value otherwise and rte_errno is set.
3757  */
3758 static int
3759 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3760                                 uint64_t action_flags,
3761                                 const struct rte_flow_action *action,
3762                                 const struct rte_flow_attr *attr,
3763                                 struct rte_flow_error *error)
3764 {
3765         const struct rte_flow_action_port_id *port_id;
3766         struct mlx5_priv *act_priv;
3767         struct mlx5_priv *dev_priv;
3768         uint16_t port;
3769
3770         if (!attr->transfer)
3771                 return rte_flow_error_set(error, ENOTSUP,
3772                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3773                                           NULL,
3774                                           "port id action is valid in transfer"
3775                                           " mode only");
3776         if (!action || !action->conf)
3777                 return rte_flow_error_set(error, ENOTSUP,
3778                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3779                                           NULL,
3780                                           "port id action parameters must be"
3781                                           " specified");
3782         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3783                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3784                 return rte_flow_error_set(error, EINVAL,
3785                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3786                                           "can have only one fate actions in"
3787                                           " a flow");
3788         dev_priv = mlx5_dev_to_eswitch_info(dev);
3789         if (!dev_priv)
3790                 return rte_flow_error_set(error, rte_errno,
3791                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3792                                           NULL,
3793                                           "failed to obtain E-Switch info");
3794         port_id = action->conf;
3795         port = port_id->original ? dev->data->port_id : port_id->id;
3796         act_priv = mlx5_port_to_eswitch_info(port, false);
3797         if (!act_priv)
3798                 return rte_flow_error_set
3799                                 (error, rte_errno,
3800                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
3801                                  "failed to obtain E-Switch port id for port");
3802         if (act_priv->domain_id != dev_priv->domain_id)
3803                 return rte_flow_error_set
3804                                 (error, EINVAL,
3805                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3806                                  "port does not belong to"
3807                                  " E-Switch being configured");
3808         return 0;
3809 }
3810
3811 /**
3812  * Get the maximum number of modify header actions.
3813  *
3814  * @param dev
3815  *   Pointer to rte_eth_dev structure.
3816  * @param flags
3817  *   Flags bits to check if root level.
3818  *
3819  * @return
3820  *   Max number of modify header actions device can support.
3821  */
3822 static inline unsigned int
3823 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
3824                               uint64_t flags)
3825 {
3826         /*
3827          * There's no way to directly query the max capacity from FW.
3828          * The maximal value on root table should be assumed to be supported.
3829          */
3830         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
3831                 return MLX5_MAX_MODIFY_NUM;
3832         else
3833                 return MLX5_ROOT_TBL_MODIFY_NUM;
3834 }
3835
3836 /**
3837  * Validate the meter action.
3838  *
3839  * @param[in] dev
3840  *   Pointer to rte_eth_dev structure.
3841  * @param[in] action_flags
3842  *   Bit-fields that holds the actions detected until now.
3843  * @param[in] action
3844  *   Pointer to the meter action.
3845  * @param[in] attr
3846  *   Attributes of flow that includes this action.
3847  * @param[out] error
3848  *   Pointer to error structure.
3849  *
3850  * @return
3851  *   0 on success, a negative errno value otherwise and rte_ernno is set.
3852  */
3853 static int
3854 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
3855                                 uint64_t action_flags,
3856                                 const struct rte_flow_action *action,
3857                                 const struct rte_flow_attr *attr,
3858                                 struct rte_flow_error *error)
3859 {
3860         struct mlx5_priv *priv = dev->data->dev_private;
3861         const struct rte_flow_action_meter *am = action->conf;
3862         struct mlx5_flow_meter *fm;
3863
3864         if (!am)
3865                 return rte_flow_error_set(error, EINVAL,
3866                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3867                                           "meter action conf is NULL");
3868
3869         if (action_flags & MLX5_FLOW_ACTION_METER)
3870                 return rte_flow_error_set(error, ENOTSUP,
3871                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3872                                           "meter chaining not support");
3873         if (action_flags & MLX5_FLOW_ACTION_JUMP)
3874                 return rte_flow_error_set(error, ENOTSUP,
3875                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3876                                           "meter with jump not support");
3877         if (!priv->mtr_en)
3878                 return rte_flow_error_set(error, ENOTSUP,
3879                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3880                                           NULL,
3881                                           "meter action not supported");
3882         fm = mlx5_flow_meter_find(priv, am->mtr_id);
3883         if (!fm)
3884                 return rte_flow_error_set(error, EINVAL,
3885                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3886                                           "Meter not found");
3887         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
3888               (!fm->ingress && !attr->ingress && attr->egress) ||
3889               (!fm->egress && !attr->egress && attr->ingress))))
3890                 return rte_flow_error_set(error, EINVAL,
3891                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3892                                           "Flow attributes are either invalid "
3893                                           "or have a conflict with current "
3894                                           "meter attributes");
3895         return 0;
3896 }
3897
3898 /**
3899  * Validate the age action.
3900  *
3901  * @param[in] action_flags
3902  *   Holds the actions detected until now.
3903  * @param[in] action
3904  *   Pointer to the age action.
3905  * @param[in] dev
3906  *   Pointer to the Ethernet device structure.
3907  * @param[out] error
3908  *   Pointer to error structure.
3909  *
3910  * @return
3911  *   0 on success, a negative errno value otherwise and rte_errno is set.
3912  */
3913 static int
3914 flow_dv_validate_action_age(uint64_t action_flags,
3915                             const struct rte_flow_action *action,
3916                             struct rte_eth_dev *dev,
3917                             struct rte_flow_error *error)
3918 {
3919         struct mlx5_priv *priv = dev->data->dev_private;
3920         const struct rte_flow_action_age *age = action->conf;
3921
3922         if (!priv->config.devx || priv->counter_fallback)
3923                 return rte_flow_error_set(error, ENOTSUP,
3924                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3925                                           NULL,
3926                                           "age action not supported");
3927         if (!(action->conf))
3928                 return rte_flow_error_set(error, EINVAL,
3929                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3930                                           "configuration cannot be null");
3931         if (age->timeout >= UINT16_MAX / 2 / 10)
3932                 return rte_flow_error_set(error, ENOTSUP,
3933                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3934                                           "Max age time: 3275 seconds");
3935         if (action_flags & MLX5_FLOW_ACTION_AGE)
3936                 return rte_flow_error_set(error, EINVAL,
3937                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3938                                           "Duplicate age ctions set");
3939         return 0;
3940 }
3941
3942 /**
3943  * Validate the modify-header IPv4 DSCP actions.
3944  *
3945  * @param[in] action_flags
3946  *   Holds the actions detected until now.
3947  * @param[in] action
3948  *   Pointer to the modify action.
3949  * @param[in] item_flags
3950  *   Holds the items detected.
3951  * @param[out] error
3952  *   Pointer to error structure.
3953  *
3954  * @return
3955  *   0 on success, a negative errno value otherwise and rte_errno is set.
3956  */
3957 static int
3958 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
3959                                          const struct rte_flow_action *action,
3960                                          const uint64_t item_flags,
3961                                          struct rte_flow_error *error)
3962 {
3963         int ret = 0;
3964
3965         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3966         if (!ret) {
3967                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3968                         return rte_flow_error_set(error, EINVAL,
3969                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3970                                                   NULL,
3971                                                   "no ipv4 item in pattern");
3972         }
3973         return ret;
3974 }
3975
3976 /**
3977  * Validate the modify-header IPv6 DSCP actions.
3978  *
3979  * @param[in] action_flags
3980  *   Holds the actions detected until now.
3981  * @param[in] action
3982  *   Pointer to the modify action.
3983  * @param[in] item_flags
3984  *   Holds the items detected.
3985  * @param[out] error
3986  *   Pointer to error structure.
3987  *
3988  * @return
3989  *   0 on success, a negative errno value otherwise and rte_errno is set.
3990  */
3991 static int
3992 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
3993                                          const struct rte_flow_action *action,
3994                                          const uint64_t item_flags,
3995                                          struct rte_flow_error *error)
3996 {
3997         int ret = 0;
3998
3999         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4000         if (!ret) {
4001                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4002                         return rte_flow_error_set(error, EINVAL,
4003                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4004                                                   NULL,
4005                                                   "no ipv6 item in pattern");
4006         }
4007         return ret;
4008 }
4009
4010 /**
4011  * Match modify-header resource.
4012  *
4013  * @param entry
4014  *   Pointer to exist resource entry object.
4015  * @param ctx
4016  *   Pointer to new modify-header resource.
4017  *
4018  * @return
4019  *   0 on matching, -1 otherwise.
4020  */
4021 static int
4022 flow_dv_modify_hdr_resource_match(struct mlx5_hlist_entry *entry, void *ctx)
4023 {
4024         struct mlx5_flow_dv_modify_hdr_resource *resource;
4025         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
4026         uint32_t actions_len;
4027
4028         resource = (struct mlx5_flow_dv_modify_hdr_resource *)ctx;
4029         cache_resource = container_of(entry,
4030                                       struct mlx5_flow_dv_modify_hdr_resource,
4031                                       entry);
4032         actions_len = resource->actions_num * sizeof(resource->actions[0]);
4033         if (resource->entry.key == cache_resource->entry.key &&
4034             resource->ft_type == cache_resource->ft_type &&
4035             resource->actions_num == cache_resource->actions_num &&
4036             resource->flags == cache_resource->flags &&
4037             !memcmp((const void *)resource->actions,
4038                     (const void *)cache_resource->actions,
4039                     actions_len))
4040                 return 0;
4041         return -1;
4042 }
4043
4044 /**
4045  * Find existing modify-header resource or create and register a new one.
4046  *
4047  * @param dev[in, out]
4048  *   Pointer to rte_eth_dev structure.
4049  * @param[in, out] resource
4050  *   Pointer to modify-header resource.
4051  * @parm[in, out] dev_flow
4052  *   Pointer to the dev_flow.
4053  * @param[out] error
4054  *   pointer to error structure.
4055  *
4056  * @return
4057  *   0 on success otherwise -errno and errno is set.
4058  */
4059 static int
4060 flow_dv_modify_hdr_resource_register
4061                         (struct rte_eth_dev *dev,
4062                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4063                          struct mlx5_flow *dev_flow,
4064                          struct rte_flow_error *error)
4065 {
4066         struct mlx5_priv *priv = dev->data->dev_private;
4067         struct mlx5_dev_ctx_shared *sh = priv->sh;
4068         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
4069         struct mlx5dv_dr_domain *ns;
4070         uint32_t actions_len;
4071         struct mlx5_hlist_entry *entry;
4072         union mlx5_flow_modify_hdr_key hdr_mod_key = {
4073                 {
4074                         .ft_type = resource->ft_type,
4075                         .actions_num = resource->actions_num,
4076                         .group = dev_flow->dv.group,
4077                         .cksum = 0,
4078                 }
4079         };
4080         int ret;
4081
4082         resource->flags = dev_flow->dv.group ? 0 :
4083                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4084         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4085                                     resource->flags))
4086                 return rte_flow_error_set(error, EOVERFLOW,
4087                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4088                                           "too many modify header items");
4089         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4090                 ns = sh->fdb_domain;
4091         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4092                 ns = sh->tx_domain;
4093         else
4094                 ns = sh->rx_domain;
4095         /* Lookup a matching resource from cache. */
4096         actions_len = resource->actions_num * sizeof(resource->actions[0]);
4097         hdr_mod_key.cksum = __rte_raw_cksum(resource->actions, actions_len, 0);
4098         resource->entry.key = hdr_mod_key.v64;
4099         entry = mlx5_hlist_lookup_ex(sh->modify_cmds, resource->entry.key,
4100                                      flow_dv_modify_hdr_resource_match,
4101                                      (void *)resource);
4102         if (entry) {
4103                 cache_resource = container_of(entry,
4104                                         struct mlx5_flow_dv_modify_hdr_resource,
4105                                         entry);
4106                 DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
4107                         (void *)cache_resource,
4108                         rte_atomic32_read(&cache_resource->refcnt));
4109                 rte_atomic32_inc(&cache_resource->refcnt);
4110                 dev_flow->handle->dvh.modify_hdr = cache_resource;
4111                 return 0;
4112
4113         }
4114         /* Register new modify-header resource. */
4115         cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
4116                                     sizeof(*cache_resource) + actions_len, 0,
4117                                     SOCKET_ID_ANY);
4118         if (!cache_resource)
4119                 return rte_flow_error_set(error, ENOMEM,
4120                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4121                                           "cannot allocate resource memory");
4122         *cache_resource = *resource;
4123         rte_memcpy(cache_resource->actions, resource->actions, actions_len);
4124         ret = mlx5_flow_os_create_flow_action_modify_header
4125                                         (sh->ctx, ns, cache_resource,
4126                                          actions_len, &cache_resource->action);
4127         if (ret) {
4128                 mlx5_free(cache_resource);
4129                 return rte_flow_error_set(error, ENOMEM,
4130                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4131                                           NULL, "cannot create action");
4132         }
4133         rte_atomic32_init(&cache_resource->refcnt);
4134         rte_atomic32_inc(&cache_resource->refcnt);
4135         if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry,
4136                                  flow_dv_modify_hdr_resource_match,
4137                                  (void *)cache_resource)) {
4138                 claim_zero(mlx5_flow_os_destroy_flow_action
4139                                                 (cache_resource->action));
4140                 mlx5_free(cache_resource);
4141                 return rte_flow_error_set(error, EEXIST,
4142                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4143                                           NULL, "action exist");
4144         }
4145         dev_flow->handle->dvh.modify_hdr = cache_resource;
4146         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
4147                 (void *)cache_resource,
4148                 rte_atomic32_read(&cache_resource->refcnt));
4149         return 0;
4150 }
4151
4152 /**
4153  * Get DV flow counter by index.
4154  *
4155  * @param[in] dev
4156  *   Pointer to the Ethernet device structure.
4157  * @param[in] idx
4158  *   mlx5 flow counter index in the container.
4159  * @param[out] ppool
4160  *   mlx5 flow counter pool in the container,
4161  *
4162  * @return
4163  *   Pointer to the counter, NULL otherwise.
4164  */
4165 static struct mlx5_flow_counter *
4166 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4167                            uint32_t idx,
4168                            struct mlx5_flow_counter_pool **ppool)
4169 {
4170         struct mlx5_priv *priv = dev->data->dev_private;
4171         struct mlx5_pools_container *cont;
4172         struct mlx5_flow_counter_pool *pool;
4173         uint32_t batch = 0, age = 0;
4174
4175         idx--;
4176         age = MLX_CNT_IS_AGE(idx);
4177         idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx;
4178         if (idx >= MLX5_CNT_BATCH_OFFSET) {
4179                 idx -= MLX5_CNT_BATCH_OFFSET;
4180                 batch = 1;
4181         }
4182         cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
4183         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
4184         pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
4185         MLX5_ASSERT(pool);
4186         if (ppool)
4187                 *ppool = pool;
4188         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4189 }
4190
4191 /**
4192  * Check the devx counter belongs to the pool.
4193  *
4194  * @param[in] pool
4195  *   Pointer to the counter pool.
4196  * @param[in] id
4197  *   The counter devx ID.
4198  *
4199  * @return
4200  *   True if counter belongs to the pool, false otherwise.
4201  */
4202 static bool
4203 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4204 {
4205         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4206                    MLX5_COUNTERS_PER_POOL;
4207
4208         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4209                 return true;
4210         return false;
4211 }
4212
4213 /**
4214  * Get a pool by devx counter ID.
4215  *
4216  * @param[in] cont
4217  *   Pointer to the counter container.
4218  * @param[in] id
4219  *   The counter devx ID.
4220  *
4221  * @return
4222  *   The counter pool pointer if exists, NULL otherwise,
4223  */
4224 static struct mlx5_flow_counter_pool *
4225 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
4226 {
4227         uint32_t i;
4228
4229         /* Check last used pool. */
4230         if (cont->last_pool_idx != POOL_IDX_INVALID &&
4231             flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
4232                 return cont->pools[cont->last_pool_idx];
4233         /* ID out of range means no suitable pool in the container. */
4234         if (id > cont->max_id || id < cont->min_id)
4235                 return NULL;
4236         /*
4237          * Find the pool from the end of the container, since mostly counter
4238          * ID is sequence increasing, and the last pool should be the needed
4239          * one.
4240          */
4241         i = rte_atomic16_read(&cont->n_valid);
4242         while (i--) {
4243                 struct mlx5_flow_counter_pool *pool = cont->pools[i];
4244
4245                 if (flow_dv_is_counter_in_pool(pool, id))
4246                         return pool;
4247         }
4248         return NULL;
4249 }
4250
4251 /**
4252  * Allocate a new memory for the counter values wrapped by all the needed
4253  * management.
4254  *
4255  * @param[in] dev
4256  *   Pointer to the Ethernet device structure.
4257  * @param[in] raws_n
4258  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
4259  *
4260  * @return
4261  *   The new memory management pointer on success, otherwise NULL and rte_errno
4262  *   is set.
4263  */
4264 static struct mlx5_counter_stats_mem_mng *
4265 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
4266 {
4267         struct mlx5_priv *priv = dev->data->dev_private;
4268         struct mlx5_dev_ctx_shared *sh = priv->sh;
4269         struct mlx5_devx_mkey_attr mkey_attr;
4270         struct mlx5_counter_stats_mem_mng *mem_mng;
4271         volatile struct flow_counter_stats *raw_data;
4272         int size = (sizeof(struct flow_counter_stats) *
4273                         MLX5_COUNTERS_PER_POOL +
4274                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
4275                         sizeof(struct mlx5_counter_stats_mem_mng);
4276         size_t pgsize = rte_mem_page_size();
4277         if (pgsize == (size_t)-1) {
4278                 DRV_LOG(ERR, "Failed to get mem page size");
4279                 rte_errno = ENOMEM;
4280                 return NULL;
4281         }
4282         uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize,
4283                                   SOCKET_ID_ANY);
4284         int i;
4285
4286         if (!mem) {
4287                 rte_errno = ENOMEM;
4288                 return NULL;
4289         }
4290         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
4291         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
4292         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
4293                                                  IBV_ACCESS_LOCAL_WRITE);
4294         if (!mem_mng->umem) {
4295                 rte_errno = errno;
4296                 mlx5_free(mem);
4297                 return NULL;
4298         }
4299         mkey_attr.addr = (uintptr_t)mem;
4300         mkey_attr.size = size;
4301         mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
4302         mkey_attr.pd = sh->pdn;
4303         mkey_attr.log_entity_size = 0;
4304         mkey_attr.pg_access = 0;
4305         mkey_attr.klm_array = NULL;
4306         mkey_attr.klm_num = 0;
4307         if (priv->config.hca_attr.relaxed_ordering_write &&
4308                 priv->config.hca_attr.relaxed_ordering_read  &&
4309                 !haswell_broadwell_cpu)
4310                 mkey_attr.relaxed_ordering = 1;
4311         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
4312         if (!mem_mng->dm) {
4313                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
4314                 rte_errno = errno;
4315                 mlx5_free(mem);
4316                 return NULL;
4317         }
4318         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
4319         raw_data = (volatile struct flow_counter_stats *)mem;
4320         for (i = 0; i < raws_n; ++i) {
4321                 mem_mng->raws[i].mem_mng = mem_mng;
4322                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
4323         }
4324         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
4325         return mem_mng;
4326 }
4327
4328 /**
4329  * Resize a counter container.
4330  *
4331  * @param[in] dev
4332  *   Pointer to the Ethernet device structure.
4333  * @param[in] batch
4334  *   Whether the pool is for counter that was allocated by batch command.
4335  * @param[in] age
4336  *   Whether the pool is for Aging counter.
4337  *
4338  * @return
4339  *   0 on success, otherwise negative errno value and rte_errno is set.
4340  */
4341 static int
4342 flow_dv_container_resize(struct rte_eth_dev *dev,
4343                                 uint32_t batch, uint32_t age)
4344 {
4345         struct mlx5_priv *priv = dev->data->dev_private;
4346         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4347                                                                age);
4348         struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
4349         void *old_pools = cont->pools;
4350         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
4351         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4352         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4353
4354         if (!pools) {
4355                 rte_errno = ENOMEM;
4356                 return -ENOMEM;
4357         }
4358         if (old_pools)
4359                 memcpy(pools, old_pools, cont->n *
4360                                        sizeof(struct mlx5_flow_counter_pool *));
4361         /*
4362          * Fallback mode query the counter directly, no background query
4363          * resources are needed.
4364          */
4365         if (!priv->counter_fallback) {
4366                 int i;
4367
4368                 mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
4369                           MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
4370                 if (!mem_mng) {
4371                         mlx5_free(pools);
4372                         return -ENOMEM;
4373                 }
4374                 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
4375                         LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
4376                                          mem_mng->raws +
4377                                          MLX5_CNT_CONTAINER_RESIZE +
4378                                          i, next);
4379         }
4380         rte_spinlock_lock(&cont->resize_sl);
4381         cont->n = resize;
4382         cont->mem_mng = mem_mng;
4383         cont->pools = pools;
4384         rte_spinlock_unlock(&cont->resize_sl);
4385         if (old_pools)
4386                 mlx5_free(old_pools);
4387         return 0;
4388 }
4389
4390 /**
4391  * Query a devx flow counter.
4392  *
4393  * @param[in] dev
4394  *   Pointer to the Ethernet device structure.
4395  * @param[in] cnt
4396  *   Index to the flow counter.
4397  * @param[out] pkts
4398  *   The statistics value of packets.
4399  * @param[out] bytes
4400  *   The statistics value of bytes.
4401  *
4402  * @return
4403  *   0 on success, otherwise a negative errno value and rte_errno is set.
4404  */
4405 static inline int
4406 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4407                      uint64_t *bytes)
4408 {
4409         struct mlx5_priv *priv = dev->data->dev_private;
4410         struct mlx5_flow_counter_pool *pool = NULL;
4411         struct mlx5_flow_counter *cnt;
4412         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4413         int offset;
4414
4415         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4416         MLX5_ASSERT(pool);
4417         if (counter < MLX5_CNT_BATCH_OFFSET) {
4418                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4419                 if (priv->counter_fallback)
4420                         return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0,
4421                                         0, pkts, bytes, 0, NULL, NULL, 0);
4422         }
4423
4424         rte_spinlock_lock(&pool->sl);
4425         /*
4426          * The single counters allocation may allocate smaller ID than the
4427          * current allocated in parallel to the host reading.
4428          * In this case the new counter values must be reported as 0.
4429          */
4430         if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
4431                 *pkts = 0;
4432                 *bytes = 0;
4433         } else {
4434                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4435                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4436                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4437         }
4438         rte_spinlock_unlock(&pool->sl);
4439         return 0;
4440 }
4441
4442 /**
4443  * Create and initialize a new counter pool.
4444  *
4445  * @param[in] dev
4446  *   Pointer to the Ethernet device structure.
4447  * @param[out] dcs
4448  *   The devX counter handle.
4449  * @param[in] batch
4450  *   Whether the pool is for counter that was allocated by batch command.
4451  * @param[in] age
4452  *   Whether the pool is for counter that was allocated for aging.
4453  * @param[in/out] cont_cur
4454  *   Pointer to the container pointer, it will be update in pool resize.
4455  *
4456  * @return
4457  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4458  */
4459 static struct mlx5_flow_counter_pool *
4460 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4461                     uint32_t batch, uint32_t age)
4462 {
4463         struct mlx5_priv *priv = dev->data->dev_private;
4464         struct mlx5_flow_counter_pool *pool;
4465         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4466                                                                age);
4467         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
4468         uint32_t size = sizeof(*pool);
4469
4470         if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age))
4471                 return NULL;
4472         size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
4473         size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
4474         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
4475         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4476         if (!pool) {
4477                 rte_errno = ENOMEM;
4478                 return NULL;
4479         }
4480         pool->min_dcs = dcs;
4481         if (!priv->counter_fallback)
4482                 pool->raw = cont->mem_mng->raws + n_valid %
4483                                                       MLX5_CNT_CONTAINER_RESIZE;
4484         pool->raw_hw = NULL;
4485         pool->type = 0;
4486         pool->type |= (batch ? 0 :  CNT_POOL_TYPE_EXT);
4487         pool->type |= (!age ? 0 :  CNT_POOL_TYPE_AGE);
4488         pool->query_gen = 0;
4489         rte_spinlock_init(&pool->sl);
4490         TAILQ_INIT(&pool->counters[0]);
4491         TAILQ_INIT(&pool->counters[1]);
4492         TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
4493         pool->index = n_valid;
4494         cont->pools[n_valid] = pool;
4495         if (!batch) {
4496                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4497
4498                 if (base < cont->min_id)
4499                         cont->min_id = base;
4500                 if (base > cont->max_id)
4501                         cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4502                 cont->last_pool_idx = pool->index;
4503         }
4504         /* Pool initialization must be updated before host thread access. */
4505         rte_io_wmb();
4506         rte_atomic16_add(&cont->n_valid, 1);
4507         return pool;
4508 }
4509
4510 /**
4511  * Restore skipped counters in the pool.
4512  *
4513  * As counter pool query requires the first counter dcs
4514  * ID start with 4 alinged, if the pool counters with
4515  * min_dcs ID are not aligned with 4, the counters will
4516  * be skipped.
4517  * Once other min_dcs ID less than these skipped counter
4518  * dcs ID appears, the skipped counters will be safe to
4519  * use.
4520  * Should be called when min_dcs is updated.
4521  *
4522  * @param[in] pool
4523  *   Current counter pool.
4524  * @param[in] last_min_dcs
4525  *   Last min_dcs.
4526  */
4527 static void
4528 flow_dv_counter_restore(struct mlx5_flow_counter_pool *pool,
4529                         struct mlx5_devx_obj *last_min_dcs)
4530 {
4531         struct mlx5_flow_counter_ext *cnt_ext;
4532         uint32_t offset, new_offset;
4533         uint32_t skip_cnt = 0;
4534         uint32_t i;
4535
4536         if (!pool->skip_cnt)
4537                 return;
4538         /*
4539          * If last min_dcs is not valid. The skipped counter may even after
4540          * last min_dcs, set the offset to the whole pool.
4541          */
4542         if (last_min_dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))
4543                 offset = MLX5_COUNTERS_PER_POOL;
4544         else
4545                 offset = last_min_dcs->id % MLX5_COUNTERS_PER_POOL;
4546         new_offset = pool->min_dcs->id % MLX5_COUNTERS_PER_POOL;
4547         /*
4548          * Check the counters from 1 to the last_min_dcs range. Counters
4549          * before new min_dcs indicates pool still has skipped counters.
4550          * Counters be skipped after new min_dcs will be ready to use.
4551          * Offset 0 counter must be empty or min_dcs, start from 1.
4552          */
4553         for (i = 1; i < offset; i++) {
4554                 cnt_ext = MLX5_GET_POOL_CNT_EXT(pool, i);
4555                 if (cnt_ext->skipped) {
4556                         if (i > new_offset) {
4557                                 cnt_ext->skipped = 0;
4558                                 TAILQ_INSERT_TAIL
4559                                         (&pool->counters[pool->query_gen],
4560                                          MLX5_POOL_GET_CNT(pool, i), next);
4561                         } else {
4562                                 skip_cnt++;
4563                         }
4564                 }
4565         }
4566         if (!skip_cnt)
4567                 pool->skip_cnt = 0;
4568 }
4569
4570 /**
4571  * Prepare a new counter and/or a new counter pool.
4572  *
4573  * @param[in] dev
4574  *   Pointer to the Ethernet device structure.
4575  * @param[out] cnt_free
4576  *   Where to put the pointer of a new counter.
4577  * @param[in] batch
4578  *   Whether the pool is for counter that was allocated by batch command.
4579  * @param[in] age
4580  *   Whether the pool is for counter that was allocated for aging.
4581  *
4582  * @return
4583  *   The counter pool pointer and @p cnt_free is set on success,
4584  *   NULL otherwise and rte_errno is set.
4585  */
4586 static struct mlx5_flow_counter_pool *
4587 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4588                              struct mlx5_flow_counter **cnt_free,
4589                              uint32_t batch, uint32_t age)
4590 {
4591         struct mlx5_priv *priv = dev->data->dev_private;
4592         struct mlx5_pools_container *cont;
4593         struct mlx5_flow_counter_pool *pool;
4594         struct mlx5_counters tmp_tq;
4595         struct mlx5_devx_obj *last_min_dcs;
4596         struct mlx5_devx_obj *dcs = NULL;
4597         struct mlx5_flow_counter *cnt;
4598         uint32_t add2other;
4599         uint32_t i;
4600
4601         cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
4602         if (!batch) {
4603 retry:
4604                 add2other = 0;
4605                 /* bulk_bitmap must be 0 for single counter allocation. */
4606                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4607                 if (!dcs)
4608                         return NULL;
4609                 pool = flow_dv_find_pool_by_id(cont, dcs->id);
4610                 /* Check if counter belongs to exist pool ID range. */
4611                 if (!pool) {
4612                         pool = flow_dv_find_pool_by_id
4613                                (MLX5_CNT_CONTAINER
4614                                (priv->sh, batch, (age ^ 0x1)), dcs->id);
4615                         /*
4616                          * Pool eixsts, counter will be added to the other
4617                          * container, need to reallocate it later.
4618                          */
4619                         if (pool) {
4620                                 add2other = 1;
4621                         } else {
4622                                 pool = flow_dv_pool_create(dev, dcs, batch,
4623                                                            age);
4624                                 if (!pool) {
4625                                         mlx5_devx_cmd_destroy(dcs);
4626                                         return NULL;
4627                                 }
4628                         }
4629                 }
4630                 if ((dcs->id < pool->min_dcs->id ||
4631                     pool->min_dcs->id &
4632                     (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1)) &&
4633                     !(dcs->id & (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))) {
4634                         /*
4635                          * Update the pool min_dcs only if current dcs is
4636                          * valid and exist min_dcs is not valid or greater
4637                          * than new dcs.
4638                          */
4639                         last_min_dcs = pool->min_dcs;
4640                         rte_atomic64_set(&pool->a64_dcs,
4641                                          (int64_t)(uintptr_t)dcs);
4642                         /*
4643                          * Restore any skipped counters if the new min_dcs
4644                          * ID is smaller or min_dcs is not valid.
4645                          */
4646                         if (dcs->id < last_min_dcs->id ||
4647                             last_min_dcs->id &
4648                             (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1))
4649                                 flow_dv_counter_restore(pool, last_min_dcs);
4650                 }
4651                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4652                 cnt = MLX5_POOL_GET_CNT(pool, i);
4653                 cnt->pool = pool;
4654                 MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
4655                 /*
4656                  * If min_dcs is not valid, it means the new allocated dcs
4657                  * also fail to become the valid min_dcs, just skip it.
4658                  * Or if min_dcs is valid, and new dcs ID is smaller than
4659                  * min_dcs, but not become the min_dcs, also skip it.
4660                  */
4661                 if (pool->min_dcs->id &
4662                     (MLX5_CNT_BATCH_QUERY_ID_ALIGNMENT - 1) ||
4663                     dcs->id < pool->min_dcs->id) {
4664                         MLX5_GET_POOL_CNT_EXT(pool, i)->skipped = 1;
4665                         pool->skip_cnt = 1;
4666                         goto retry;
4667                 }
4668                 if (add2other) {
4669                         TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen],
4670                                           cnt, next);
4671                         goto retry;
4672                 }
4673                 *cnt_free = cnt;
4674                 return pool;
4675         }
4676         /* bulk_bitmap is in 128 counters units. */
4677         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
4678                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4679         if (!dcs) {
4680                 rte_errno = ENODATA;
4681                 return NULL;
4682         }
4683         pool = flow_dv_pool_create(dev, dcs, batch, age);
4684         if (!pool) {
4685                 mlx5_devx_cmd_destroy(dcs);
4686                 return NULL;
4687         }
4688         TAILQ_INIT(&tmp_tq);
4689         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4690                 cnt = MLX5_POOL_GET_CNT(pool, i);
4691                 cnt->pool = pool;
4692                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4693         }
4694         rte_spinlock_lock(&cont->csl);
4695         TAILQ_CONCAT(&cont->counters, &tmp_tq, next);
4696         rte_spinlock_unlock(&cont->csl);
4697         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4698         (*cnt_free)->pool = pool;
4699         return pool;
4700 }
4701
4702 /**
4703  * Search for existed shared counter.
4704  *
4705  * @param[in] dev
4706  *   Pointer to the Ethernet device structure.
4707  * @param[in] id
4708  *   The shared counter ID to search.
4709  * @param[out] ppool
4710  *   mlx5 flow counter pool in the container,
4711  *
4712  * @return
4713  *   NULL if not existed, otherwise pointer to the shared extend counter.
4714  */
4715 static struct mlx5_flow_counter_ext *
4716 flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id,
4717                               struct mlx5_flow_counter_pool **ppool)
4718 {
4719         struct mlx5_priv *priv = dev->data->dev_private;
4720         union mlx5_l3t_data data;
4721         uint32_t cnt_idx;
4722
4723         if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword)
4724                 return NULL;
4725         cnt_idx = data.dword;
4726         /*
4727          * Shared counters don't have age info. The counter extend is after
4728          * the counter datat structure.
4729          */
4730         return (struct mlx5_flow_counter_ext *)
4731                ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1);
4732 }
4733
4734 /**
4735  * Allocate a flow counter.
4736  *
4737  * @param[in] dev
4738  *   Pointer to the Ethernet device structure.
4739  * @param[in] shared
4740  *   Indicate if this counter is shared with other flows.
4741  * @param[in] id
4742  *   Counter identifier.
4743  * @param[in] group
4744  *   Counter flow group.
4745  * @param[in] age
4746  *   Whether the counter was allocated for aging.
4747  *
4748  * @return
4749  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4750  */
4751 static uint32_t
4752 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
4753                       uint16_t group, uint32_t age)
4754 {
4755         struct mlx5_priv *priv = dev->data->dev_private;
4756         struct mlx5_flow_counter_pool *pool = NULL;
4757         struct mlx5_flow_counter *cnt_free = NULL;
4758         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4759         /*
4760          * Currently group 0 flow counter cannot be assigned to a flow if it is
4761          * not the first one in the batch counter allocation, so it is better
4762          * to allocate counters one by one for these flows in a separate
4763          * container.
4764          * A counter can be shared between different groups so need to take
4765          * shared counters from the single container.
4766          */
4767         uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
4768         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4769                                                                age);
4770         uint32_t cnt_idx;
4771
4772         if (!priv->config.devx) {
4773                 rte_errno = ENOTSUP;
4774                 return 0;
4775         }
4776         if (shared) {
4777                 cnt_ext = flow_dv_counter_shared_search(dev, id, &pool);
4778                 if (cnt_ext) {
4779                         if (cnt_ext->ref_cnt + 1 == 0) {
4780                                 rte_errno = E2BIG;
4781                                 return 0;
4782                         }
4783                         cnt_ext->ref_cnt++;
4784                         cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
4785                                   (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
4786                                   + 1;
4787                         return cnt_idx;
4788                 }
4789         }
4790         /* Get free counters from container. */
4791         rte_spinlock_lock(&cont->csl);
4792         cnt_free = TAILQ_FIRST(&cont->counters);
4793         if (cnt_free)
4794                 TAILQ_REMOVE(&cont->counters, cnt_free, next);
4795         rte_spinlock_unlock(&cont->csl);
4796         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free,
4797                                                        batch, age))
4798                 goto err;
4799         pool = cnt_free->pool;
4800         if (!batch)
4801                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
4802         /* Create a DV counter action only in the first time usage. */
4803         if (!cnt_free->action) {
4804                 uint16_t offset;
4805                 struct mlx5_devx_obj *dcs;
4806                 int ret;
4807
4808                 if (batch) {
4809                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4810                         dcs = pool->min_dcs;
4811                 } else {
4812                         offset = 0;
4813                         dcs = cnt_ext->dcs;
4814                 }
4815                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4816                                                             &cnt_free->action);
4817                 if (ret) {
4818                         rte_errno = errno;
4819                         goto err;
4820                 }
4821         }
4822         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4823                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4824         cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
4825         cnt_idx += age * MLX5_CNT_AGE_OFFSET;
4826         /* Update the counter reset values. */
4827         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4828                                  &cnt_free->bytes))
4829                 goto err;
4830         if (cnt_ext) {
4831                 cnt_ext->shared = shared;
4832                 cnt_ext->ref_cnt = 1;
4833                 cnt_ext->id = id;
4834                 if (shared) {
4835                         union mlx5_l3t_data data;
4836
4837                         data.dword = cnt_idx;
4838                         if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
4839                                 return 0;
4840                 }
4841         }
4842         if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
4843                 /* Start the asynchronous batch query by the host thread. */
4844                 mlx5_set_query_alarm(priv->sh);
4845         return cnt_idx;
4846 err:
4847         if (cnt_free) {
4848                 cnt_free->pool = pool;
4849                 rte_spinlock_lock(&cont->csl);
4850                 TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next);
4851                 rte_spinlock_unlock(&cont->csl);
4852         }
4853         return 0;
4854 }
4855
4856 /**
4857  * Get age param from counter index.
4858  *
4859  * @param[in] dev
4860  *   Pointer to the Ethernet device structure.
4861  * @param[in] counter
4862  *   Index to the counter handler.
4863  *
4864  * @return
4865  *   The aging parameter specified for the counter index.
4866  */
4867 static struct mlx5_age_param*
4868 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
4869                                 uint32_t counter)
4870 {
4871         struct mlx5_flow_counter *cnt;
4872         struct mlx5_flow_counter_pool *pool = NULL;
4873
4874         flow_dv_counter_get_by_idx(dev, counter, &pool);
4875         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
4876         cnt = MLX5_POOL_GET_CNT(pool, counter);
4877         return MLX5_CNT_TO_AGE(cnt);
4878 }
4879
4880 /**
4881  * Remove a flow counter from aged counter list.
4882  *
4883  * @param[in] dev
4884  *   Pointer to the Ethernet device structure.
4885  * @param[in] counter
4886  *   Index to the counter handler.
4887  * @param[in] cnt
4888  *   Pointer to the counter handler.
4889  */
4890 static void
4891 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
4892                                 uint32_t counter, struct mlx5_flow_counter *cnt)
4893 {
4894         struct mlx5_age_info *age_info;
4895         struct mlx5_age_param *age_param;
4896         struct mlx5_priv *priv = dev->data->dev_private;
4897
4898         age_info = GET_PORT_AGE_INFO(priv);
4899         age_param = flow_dv_counter_idx_get_age(dev, counter);
4900         if (rte_atomic16_cmpset((volatile uint16_t *)
4901                         &age_param->state,
4902                         AGE_CANDIDATE, AGE_FREE)
4903                         != AGE_CANDIDATE) {
4904                 /**
4905                  * We need the lock even it is age timeout,
4906                  * since counter may still in process.
4907                  */
4908                 rte_spinlock_lock(&age_info->aged_sl);
4909                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
4910                 rte_spinlock_unlock(&age_info->aged_sl);
4911         }
4912         rte_atomic16_set(&age_param->state, AGE_FREE);
4913 }
4914 /**
4915  * Release a flow counter.
4916  *
4917  * @param[in] dev
4918  *   Pointer to the Ethernet device structure.
4919  * @param[in] counter
4920  *   Index to the counter handler.
4921  */
4922 static void
4923 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
4924 {
4925         struct mlx5_priv *priv = dev->data->dev_private;
4926         struct mlx5_flow_counter_pool *pool = NULL;
4927         struct mlx5_flow_counter *cnt;
4928         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4929
4930         if (!counter)
4931                 return;
4932         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4933         MLX5_ASSERT(pool);
4934         if (counter < MLX5_CNT_BATCH_OFFSET) {
4935                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4936                 if (cnt_ext) {
4937                         if (--cnt_ext->ref_cnt)
4938                                 return;
4939                         if (cnt_ext->shared)
4940                                 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
4941                                                      cnt_ext->id);
4942                 }
4943         }
4944         if (IS_AGE_POOL(pool))
4945                 flow_dv_counter_remove_from_age(dev, counter, cnt);
4946         cnt->pool = pool;
4947         /*
4948          * Put the counter back to list to be updated in none fallback mode.
4949          * Currently, we are using two list alternately, while one is in query,
4950          * add the freed counter to the other list based on the pool query_gen
4951          * value. After query finishes, add counter the list to the global
4952          * container counter list. The list changes while query starts. In
4953          * this case, lock will not be needed as query callback and release
4954          * function both operate with the different list.
4955          *
4956          */
4957         if (!priv->counter_fallback)
4958                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
4959         else
4960                 TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER
4961                                   (priv->sh, 0, 0))->counters),
4962                                   cnt, next);
4963 }
4964
4965 /**
4966  * Verify the @p attributes will be correctly understood by the NIC and store
4967  * them in the @p flow if everything is correct.
4968  *
4969  * @param[in] dev
4970  *   Pointer to dev struct.
4971  * @param[in] attributes
4972  *   Pointer to flow attributes
4973  * @param[in] external
4974  *   This flow rule is created by request external to PMD.
4975  * @param[out] error
4976  *   Pointer to error structure.
4977  *
4978  * @return
4979  *   - 0 on success and non root table.
4980  *   - 1 on success and root table.
4981  *   - a negative errno value otherwise and rte_errno is set.
4982  */
4983 static int
4984 flow_dv_validate_attributes(struct rte_eth_dev *dev,
4985                             const struct rte_flow_attr *attributes,
4986                             bool external __rte_unused,
4987                             struct rte_flow_error *error)
4988 {
4989         struct mlx5_priv *priv = dev->data->dev_private;
4990         uint32_t priority_max = priv->config.flow_prio - 1;
4991         int ret = 0;
4992
4993 #ifndef HAVE_MLX5DV_DR
4994         if (attributes->group)
4995                 return rte_flow_error_set(error, ENOTSUP,
4996                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4997                                           NULL,
4998                                           "groups are not supported");
4999 #else
5000         uint32_t table = 0;
5001
5002         ret = mlx5_flow_group_to_table(attributes, external,
5003                                        attributes->group, !!priv->fdb_def_rule,
5004                                        &table, error);
5005         if (ret)
5006                 return ret;
5007         if (!table)
5008                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5009 #endif
5010         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5011             attributes->priority >= priority_max)
5012                 return rte_flow_error_set(error, ENOTSUP,
5013                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5014                                           NULL,
5015                                           "priority out of range");
5016         if (attributes->transfer) {
5017                 if (!priv->config.dv_esw_en)
5018                         return rte_flow_error_set
5019                                 (error, ENOTSUP,
5020                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5021                                  "E-Switch dr is not supported");
5022                 if (!(priv->representor || priv->master))
5023                         return rte_flow_error_set
5024                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5025                                  NULL, "E-Switch configuration can only be"
5026                                  " done by a master or a representor device");
5027                 if (attributes->egress)
5028                         return rte_flow_error_set
5029                                 (error, ENOTSUP,
5030                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5031                                  "egress is not supported");
5032         }
5033         if (!(attributes->egress ^ attributes->ingress))
5034                 return rte_flow_error_set(error, ENOTSUP,
5035                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5036                                           "must specify exactly one of "
5037                                           "ingress or egress");
5038         return ret;
5039 }
5040
5041 /**
5042  * Internal validation function. For validating both actions and items.
5043  *
5044  * @param[in] dev
5045  *   Pointer to the rte_eth_dev structure.
5046  * @param[in] attr
5047  *   Pointer to the flow attributes.
5048  * @param[in] items
5049  *   Pointer to the list of items.
5050  * @param[in] actions
5051  *   Pointer to the list of actions.
5052  * @param[in] external
5053  *   This flow rule is created by request external to PMD.
5054  * @param[in] hairpin
5055  *   Number of hairpin TX actions, 0 means classic flow.
5056  * @param[out] error
5057  *   Pointer to the error structure.
5058  *
5059  * @return
5060  *   0 on success, a negative errno value otherwise and rte_errno is set.
5061  */
5062 static int
5063 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5064                  const struct rte_flow_item items[],
5065                  const struct rte_flow_action actions[],
5066                  bool external, int hairpin, struct rte_flow_error *error)
5067 {
5068         int ret;
5069         uint64_t action_flags = 0;
5070         uint64_t item_flags = 0;
5071         uint64_t last_item = 0;
5072         uint8_t next_protocol = 0xff;
5073         uint16_t ether_type = 0;
5074         int actions_n = 0;
5075         uint8_t item_ipv6_proto = 0;
5076         const struct rte_flow_item *gre_item = NULL;
5077         const struct rte_flow_action_raw_decap *decap;
5078         const struct rte_flow_action_raw_encap *encap;
5079         const struct rte_flow_action_rss *rss;
5080         const struct rte_flow_item_tcp nic_tcp_mask = {
5081                 .hdr = {
5082                         .tcp_flags = 0xFF,
5083                         .src_port = RTE_BE16(UINT16_MAX),
5084                         .dst_port = RTE_BE16(UINT16_MAX),
5085                 }
5086         };
5087         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
5088                 .hdr = {
5089                         .src_addr = RTE_BE32(0xffffffff),
5090                         .dst_addr = RTE_BE32(0xffffffff),
5091                         .type_of_service = 0xff,
5092                         .next_proto_id = 0xff,
5093                         .time_to_live = 0xff,
5094                 },
5095         };
5096         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5097                 .hdr = {
5098                         .src_addr =
5099                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5100                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5101                         .dst_addr =
5102                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5103                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5104                         .vtc_flow = RTE_BE32(0xffffffff),
5105                         .proto = 0xff,
5106                         .hop_limits = 0xff,
5107                 },
5108         };
5109         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5110                 .hdr = {
5111                         .common = {
5112                                 .u32 =
5113                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5114                                         .type = 0xFF,
5115                                         }).u32),
5116                         },
5117                         .dummy[0] = 0xffffffff,
5118                 },
5119         };
5120         struct mlx5_priv *priv = dev->data->dev_private;
5121         struct mlx5_dev_config *dev_conf = &priv->config;
5122         uint16_t queue_index = 0xFFFF;
5123         const struct rte_flow_item_vlan *vlan_m = NULL;
5124         int16_t rw_act_num = 0;
5125         uint64_t is_root;
5126
5127         if (items == NULL)
5128                 return -1;
5129         ret = flow_dv_validate_attributes(dev, attr, external, error);
5130         if (ret < 0)
5131                 return ret;
5132         is_root = (uint64_t)ret;
5133         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5134                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5135                 int type = items->type;
5136
5137                 if (!mlx5_flow_os_item_supported(type))
5138                         return rte_flow_error_set(error, ENOTSUP,
5139                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5140                                                   NULL, "item not supported");
5141                 switch (type) {
5142                 case RTE_FLOW_ITEM_TYPE_VOID:
5143                         break;
5144                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5145                         ret = flow_dv_validate_item_port_id
5146                                         (dev, items, attr, item_flags, error);
5147                         if (ret < 0)
5148                                 return ret;
5149                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5150                         break;
5151                 case RTE_FLOW_ITEM_TYPE_ETH:
5152                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5153                                                           error);
5154                         if (ret < 0)
5155                                 return ret;
5156                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5157                                              MLX5_FLOW_LAYER_OUTER_L2;
5158                         if (items->mask != NULL && items->spec != NULL) {
5159                                 ether_type =
5160                                         ((const struct rte_flow_item_eth *)
5161                                          items->spec)->type;
5162                                 ether_type &=
5163                                         ((const struct rte_flow_item_eth *)
5164                                          items->mask)->type;
5165                                 ether_type = rte_be_to_cpu_16(ether_type);
5166                         } else {
5167                                 ether_type = 0;
5168                         }
5169                         break;
5170                 case RTE_FLOW_ITEM_TYPE_VLAN:
5171                         ret = flow_dv_validate_item_vlan(items, item_flags,
5172                                                          dev, error);
5173                         if (ret < 0)
5174                                 return ret;
5175                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5176                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5177                         if (items->mask != NULL && items->spec != NULL) {
5178                                 ether_type =
5179                                         ((const struct rte_flow_item_vlan *)
5180                                          items->spec)->inner_type;
5181                                 ether_type &=
5182                                         ((const struct rte_flow_item_vlan *)
5183                                          items->mask)->inner_type;
5184                                 ether_type = rte_be_to_cpu_16(ether_type);
5185                         } else {
5186                                 ether_type = 0;
5187                         }
5188                         /* Store outer VLAN mask for of_push_vlan action. */
5189                         if (!tunnel)
5190                                 vlan_m = items->mask;
5191                         break;
5192                 case RTE_FLOW_ITEM_TYPE_IPV4:
5193                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5194                                                   &item_flags, &tunnel);
5195                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
5196                                                            last_item,
5197                                                            ether_type,
5198                                                            &nic_ipv4_mask,
5199                                                            error);
5200                         if (ret < 0)
5201                                 return ret;
5202                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5203                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5204                         if (items->mask != NULL &&
5205                             ((const struct rte_flow_item_ipv4 *)
5206                              items->mask)->hdr.next_proto_id) {
5207                                 next_protocol =
5208                                         ((const struct rte_flow_item_ipv4 *)
5209                                          (items->spec))->hdr.next_proto_id;
5210                                 next_protocol &=
5211                                         ((const struct rte_flow_item_ipv4 *)
5212                                          (items->mask))->hdr.next_proto_id;
5213                         } else {
5214                                 /* Reset for inner layer. */
5215                                 next_protocol = 0xff;
5216                         }
5217                         break;
5218                 case RTE_FLOW_ITEM_TYPE_IPV6:
5219                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5220                                                   &item_flags, &tunnel);
5221                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5222                                                            last_item,
5223                                                            ether_type,
5224                                                            &nic_ipv6_mask,
5225                                                            error);
5226                         if (ret < 0)
5227                                 return ret;
5228                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5229                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5230                         if (items->mask != NULL &&
5231                             ((const struct rte_flow_item_ipv6 *)
5232                              items->mask)->hdr.proto) {
5233                                 item_ipv6_proto =
5234                                         ((const struct rte_flow_item_ipv6 *)
5235                                          items->spec)->hdr.proto;
5236                                 next_protocol =
5237                                         ((const struct rte_flow_item_ipv6 *)
5238                                          items->spec)->hdr.proto;
5239                                 next_protocol &=
5240                                         ((const struct rte_flow_item_ipv6 *)
5241                                          items->mask)->hdr.proto;
5242                         } else {
5243                                 /* Reset for inner layer. */
5244                                 next_protocol = 0xff;
5245                         }
5246                         break;
5247                 case RTE_FLOW_ITEM_TYPE_TCP:
5248                         ret = mlx5_flow_validate_item_tcp
5249                                                 (items, item_flags,
5250                                                  next_protocol,
5251                                                  &nic_tcp_mask,
5252                                                  error);
5253                         if (ret < 0)
5254                                 return ret;
5255                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5256                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5257                         break;
5258                 case RTE_FLOW_ITEM_TYPE_UDP:
5259                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5260                                                           next_protocol,
5261                                                           error);
5262                         if (ret < 0)
5263                                 return ret;
5264                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5265                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5266                         break;
5267                 case RTE_FLOW_ITEM_TYPE_GRE:
5268                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5269                                                           next_protocol, error);
5270                         if (ret < 0)
5271                                 return ret;
5272                         gre_item = items;
5273                         last_item = MLX5_FLOW_LAYER_GRE;
5274                         break;
5275                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5276                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5277                                                             next_protocol,
5278                                                             error);
5279                         if (ret < 0)
5280                                 return ret;
5281                         last_item = MLX5_FLOW_LAYER_NVGRE;
5282                         break;
5283                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5284                         ret = mlx5_flow_validate_item_gre_key
5285                                 (items, item_flags, gre_item, error);
5286                         if (ret < 0)
5287                                 return ret;
5288                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5289                         break;
5290                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5291                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5292                                                             error);
5293                         if (ret < 0)
5294                                 return ret;
5295                         last_item = MLX5_FLOW_LAYER_VXLAN;
5296                         break;
5297                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5298                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5299                                                                 item_flags, dev,
5300                                                                 error);
5301                         if (ret < 0)
5302                                 return ret;
5303                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5304                         break;
5305                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5306                         ret = mlx5_flow_validate_item_geneve(items,
5307                                                              item_flags, dev,
5308                                                              error);
5309                         if (ret < 0)
5310                                 return ret;
5311                         last_item = MLX5_FLOW_LAYER_GENEVE;
5312                         break;
5313                 case RTE_FLOW_ITEM_TYPE_MPLS:
5314                         ret = mlx5_flow_validate_item_mpls(dev, items,
5315                                                            item_flags,
5316                                                            last_item, error);
5317                         if (ret < 0)
5318                                 return ret;
5319                         last_item = MLX5_FLOW_LAYER_MPLS;
5320                         break;
5321
5322                 case RTE_FLOW_ITEM_TYPE_MARK:
5323                         ret = flow_dv_validate_item_mark(dev, items, attr,
5324                                                          error);
5325                         if (ret < 0)
5326                                 return ret;
5327                         last_item = MLX5_FLOW_ITEM_MARK;
5328                         break;
5329                 case RTE_FLOW_ITEM_TYPE_META:
5330                         ret = flow_dv_validate_item_meta(dev, items, attr,
5331                                                          error);
5332                         if (ret < 0)
5333                                 return ret;
5334                         last_item = MLX5_FLOW_ITEM_METADATA;
5335                         break;
5336                 case RTE_FLOW_ITEM_TYPE_ICMP:
5337                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5338                                                            next_protocol,
5339                                                            error);
5340                         if (ret < 0)
5341                                 return ret;
5342                         last_item = MLX5_FLOW_LAYER_ICMP;
5343                         break;
5344                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5345                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5346                                                             next_protocol,
5347                                                             error);
5348                         if (ret < 0)
5349                                 return ret;
5350                         item_ipv6_proto = IPPROTO_ICMPV6;
5351                         last_item = MLX5_FLOW_LAYER_ICMP6;
5352                         break;
5353                 case RTE_FLOW_ITEM_TYPE_TAG:
5354                         ret = flow_dv_validate_item_tag(dev, items,
5355                                                         attr, error);
5356                         if (ret < 0)
5357                                 return ret;
5358                         last_item = MLX5_FLOW_ITEM_TAG;
5359                         break;
5360                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5361                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5362                         break;
5363                 case RTE_FLOW_ITEM_TYPE_GTP:
5364                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5365                                                         error);
5366                         if (ret < 0)
5367                                 return ret;
5368                         last_item = MLX5_FLOW_LAYER_GTP;
5369                         break;
5370                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5371                         /* Capacity will be checked in the translate stage. */
5372                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5373                                                             last_item,
5374                                                             ether_type,
5375                                                             &nic_ecpri_mask,
5376                                                             error);
5377                         if (ret < 0)
5378                                 return ret;
5379                         last_item = MLX5_FLOW_LAYER_ECPRI;
5380                         break;
5381                 default:
5382                         return rte_flow_error_set(error, ENOTSUP,
5383                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5384                                                   NULL, "item not supported");
5385                 }
5386                 item_flags |= last_item;
5387         }
5388         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5389                 int type = actions->type;
5390
5391                 if (!mlx5_flow_os_action_supported(type))
5392                         return rte_flow_error_set(error, ENOTSUP,
5393                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5394                                                   actions,
5395                                                   "action not supported");
5396                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5397                         return rte_flow_error_set(error, ENOTSUP,
5398                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5399                                                   actions, "too many actions");
5400                 switch (type) {
5401                 case RTE_FLOW_ACTION_TYPE_VOID:
5402                         break;
5403                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5404                         ret = flow_dv_validate_action_port_id(dev,
5405                                                               action_flags,
5406                                                               actions,
5407                                                               attr,
5408                                                               error);
5409                         if (ret)
5410                                 return ret;
5411                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5412                         ++actions_n;
5413                         break;
5414                 case RTE_FLOW_ACTION_TYPE_FLAG:
5415                         ret = flow_dv_validate_action_flag(dev, action_flags,
5416                                                            attr, error);
5417                         if (ret < 0)
5418                                 return ret;
5419                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5420                                 /* Count all modify-header actions as one. */
5421                                 if (!(action_flags &
5422                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5423                                         ++actions_n;
5424                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5425                                                 MLX5_FLOW_ACTION_MARK_EXT;
5426                         } else {
5427                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5428                                 ++actions_n;
5429                         }
5430                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5431                         break;
5432                 case RTE_FLOW_ACTION_TYPE_MARK:
5433                         ret = flow_dv_validate_action_mark(dev, actions,
5434                                                            action_flags,
5435                                                            attr, error);
5436                         if (ret < 0)
5437                                 return ret;
5438                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5439                                 /* Count all modify-header actions as one. */
5440                                 if (!(action_flags &
5441                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5442                                         ++actions_n;
5443                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5444                                                 MLX5_FLOW_ACTION_MARK_EXT;
5445                         } else {
5446                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5447                                 ++actions_n;
5448                         }
5449                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5450                         break;
5451                 case RTE_FLOW_ACTION_TYPE_SET_META:
5452                         ret = flow_dv_validate_action_set_meta(dev, actions,
5453                                                                action_flags,
5454                                                                attr, error);
5455                         if (ret < 0)
5456                                 return ret;
5457                         /* Count all modify-header actions as one action. */
5458                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5459                                 ++actions_n;
5460                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5461                         rw_act_num += MLX5_ACT_NUM_SET_META;
5462                         break;
5463                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5464                         ret = flow_dv_validate_action_set_tag(dev, actions,
5465                                                               action_flags,
5466                                                               attr, error);
5467                         if (ret < 0)
5468                                 return ret;
5469                         /* Count all modify-header actions as one action. */
5470                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5471                                 ++actions_n;
5472                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5473                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5474                         break;
5475                 case RTE_FLOW_ACTION_TYPE_DROP:
5476                         ret = mlx5_flow_validate_action_drop(action_flags,
5477                                                              attr, error);
5478                         if (ret < 0)
5479                                 return ret;
5480                         action_flags |= MLX5_FLOW_ACTION_DROP;
5481                         ++actions_n;
5482                         break;
5483                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5484                         ret = mlx5_flow_validate_action_queue(actions,
5485                                                               action_flags, dev,
5486                                                               attr, error);
5487                         if (ret < 0)
5488                                 return ret;
5489                         queue_index = ((const struct rte_flow_action_queue *)
5490                                                         (actions->conf))->index;
5491                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5492                         ++actions_n;
5493                         break;
5494                 case RTE_FLOW_ACTION_TYPE_RSS:
5495                         rss = actions->conf;
5496                         ret = mlx5_flow_validate_action_rss(actions,
5497                                                             action_flags, dev,
5498                                                             attr, item_flags,
5499                                                             error);
5500                         if (ret < 0)
5501                                 return ret;
5502                         if (rss != NULL && rss->queue_num)
5503                                 queue_index = rss->queue[0];
5504                         action_flags |= MLX5_FLOW_ACTION_RSS;
5505                         ++actions_n;
5506                         break;
5507                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5508                         ret =
5509                         mlx5_flow_validate_action_default_miss(action_flags,
5510                                         attr, error);
5511                         if (ret < 0)
5512                                 return ret;
5513                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5514                         ++actions_n;
5515                         break;
5516                 case RTE_FLOW_ACTION_TYPE_COUNT:
5517                         ret = flow_dv_validate_action_count(dev, error);
5518                         if (ret < 0)
5519                                 return ret;
5520                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5521                         ++actions_n;
5522                         break;
5523                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5524                         if (flow_dv_validate_action_pop_vlan(dev,
5525                                                              action_flags,
5526                                                              actions,
5527                                                              item_flags, attr,
5528                                                              error))
5529                                 return -rte_errno;
5530                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5531                         ++actions_n;
5532                         break;
5533                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5534                         ret = flow_dv_validate_action_push_vlan(dev,
5535                                                                 action_flags,
5536                                                                 vlan_m,
5537                                                                 actions, attr,
5538                                                                 error);
5539                         if (ret < 0)
5540                                 return ret;
5541                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5542                         ++actions_n;
5543                         break;
5544                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5545                         ret = flow_dv_validate_action_set_vlan_pcp
5546                                                 (action_flags, actions, error);
5547                         if (ret < 0)
5548                                 return ret;
5549                         /* Count PCP with push_vlan command. */
5550                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5551                         break;
5552                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5553                         ret = flow_dv_validate_action_set_vlan_vid
5554                                                 (item_flags, action_flags,
5555                                                  actions, error);
5556                         if (ret < 0)
5557                                 return ret;
5558                         /* Count VID with push_vlan command. */
5559                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5560                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5561                         break;
5562                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5563                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5564                         ret = flow_dv_validate_action_l2_encap(dev,
5565                                                                action_flags,
5566                                                                actions, attr,
5567                                                                error);
5568                         if (ret < 0)
5569                                 return ret;
5570                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5571                         ++actions_n;
5572                         break;
5573                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5574                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5575                         ret = flow_dv_validate_action_decap(dev, action_flags,
5576                                                             attr, error);
5577                         if (ret < 0)
5578                                 return ret;
5579                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5580                         ++actions_n;
5581                         break;
5582                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5583                         ret = flow_dv_validate_action_raw_encap_decap
5584                                 (dev, NULL, actions->conf, attr, &action_flags,
5585                                  &actions_n, error);
5586                         if (ret < 0)
5587                                 return ret;
5588                         break;
5589                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5590                         decap = actions->conf;
5591                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5592                                 ;
5593                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5594                                 encap = NULL;
5595                                 actions--;
5596                         } else {
5597                                 encap = actions->conf;
5598                         }
5599                         ret = flow_dv_validate_action_raw_encap_decap
5600                                            (dev,
5601                                             decap ? decap : &empty_decap, encap,
5602                                             attr, &action_flags, &actions_n,
5603                                             error);
5604                         if (ret < 0)
5605                                 return ret;
5606                         break;
5607                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5608                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5609                         ret = flow_dv_validate_action_modify_mac(action_flags,
5610                                                                  actions,
5611                                                                  item_flags,
5612                                                                  error);
5613                         if (ret < 0)
5614                                 return ret;
5615                         /* Count all modify-header actions as one action. */
5616                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5617                                 ++actions_n;
5618                         action_flags |= actions->type ==
5619                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5620                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5621                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5622                         /*
5623                          * Even if the source and destination MAC addresses have
5624                          * overlap in the header with 4B alignment, the convert
5625                          * function will handle them separately and 4 SW actions
5626                          * will be created. And 2 actions will be added each
5627                          * time no matter how many bytes of address will be set.
5628                          */
5629                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5630                         break;
5631                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5632                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5633                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5634                                                                   actions,
5635                                                                   item_flags,
5636                                                                   error);
5637                         if (ret < 0)
5638                                 return ret;
5639                         /* Count all modify-header actions as one action. */
5640                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5641                                 ++actions_n;
5642                         action_flags |= actions->type ==
5643                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5644                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5645                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5646                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5647                         break;
5648                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5649                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5650                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5651                                                                   actions,
5652                                                                   item_flags,
5653                                                                   error);
5654                         if (ret < 0)
5655                                 return ret;
5656                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5657                                 return rte_flow_error_set(error, ENOTSUP,
5658                                         RTE_FLOW_ERROR_TYPE_ACTION,
5659                                         actions,
5660                                         "Can't change header "
5661                                         "with ICMPv6 proto");
5662                         /* Count all modify-header actions as one action. */
5663                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5664                                 ++actions_n;
5665                         action_flags |= actions->type ==
5666                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5667                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5668                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5669                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5670                         break;
5671                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5672                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5673                         ret = flow_dv_validate_action_modify_tp(action_flags,
5674                                                                 actions,
5675                                                                 item_flags,
5676                                                                 error);
5677                         if (ret < 0)
5678                                 return ret;
5679                         /* Count all modify-header actions as one action. */
5680                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5681                                 ++actions_n;
5682                         action_flags |= actions->type ==
5683                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5684                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5685                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5686                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5687                         break;
5688                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5689                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5690                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5691                                                                  actions,
5692                                                                  item_flags,
5693                                                                  error);
5694                         if (ret < 0)
5695                                 return ret;
5696                         /* Count all modify-header actions as one action. */
5697                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5698                                 ++actions_n;
5699                         action_flags |= actions->type ==
5700                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5701                                                 MLX5_FLOW_ACTION_SET_TTL :
5702                                                 MLX5_FLOW_ACTION_DEC_TTL;
5703                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5704                         break;
5705                 case RTE_FLOW_ACTION_TYPE_JUMP:
5706                         ret = flow_dv_validate_action_jump(actions,
5707                                                            action_flags,
5708                                                            attr, external,
5709                                                            error);
5710                         if (ret)
5711                                 return ret;
5712                         ++actions_n;
5713                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5714                         break;
5715                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5716                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5717                         ret = flow_dv_validate_action_modify_tcp_seq
5718                                                                 (action_flags,
5719                                                                  actions,
5720                                                                  item_flags,
5721                                                                  error);
5722                         if (ret < 0)
5723                                 return ret;
5724                         /* Count all modify-header actions as one action. */
5725                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5726                                 ++actions_n;
5727                         action_flags |= actions->type ==
5728                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5729                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5730                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5731                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5732                         break;
5733                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5734                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5735                         ret = flow_dv_validate_action_modify_tcp_ack
5736                                                                 (action_flags,
5737                                                                  actions,
5738                                                                  item_flags,
5739                                                                  error);
5740                         if (ret < 0)
5741                                 return ret;
5742                         /* Count all modify-header actions as one action. */
5743                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5744                                 ++actions_n;
5745                         action_flags |= actions->type ==
5746                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5747                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5748                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5749                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5750                         break;
5751                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5752                         break;
5753                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5754                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5755                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5756                         break;
5757                 case RTE_FLOW_ACTION_TYPE_METER:
5758                         ret = mlx5_flow_validate_action_meter(dev,
5759                                                               action_flags,
5760                                                               actions, attr,
5761                                                               error);
5762                         if (ret < 0)
5763                                 return ret;
5764                         action_flags |= MLX5_FLOW_ACTION_METER;
5765                         ++actions_n;
5766                         /* Meter action will add one more TAG action. */
5767                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5768                         break;
5769                 case RTE_FLOW_ACTION_TYPE_AGE:
5770                         ret = flow_dv_validate_action_age(action_flags,
5771                                                           actions, dev,
5772                                                           error);
5773                         if (ret < 0)
5774                                 return ret;
5775                         action_flags |= MLX5_FLOW_ACTION_AGE;
5776                         ++actions_n;
5777                         break;
5778                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5779                         ret = flow_dv_validate_action_modify_ipv4_dscp
5780                                                          (action_flags,
5781                                                           actions,
5782                                                           item_flags,
5783                                                           error);
5784                         if (ret < 0)
5785                                 return ret;
5786                         /* Count all modify-header actions as one action. */
5787                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5788                                 ++actions_n;
5789                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5790                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5791                         break;
5792                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5793                         ret = flow_dv_validate_action_modify_ipv6_dscp
5794                                                                 (action_flags,
5795                                                                  actions,
5796                                                                  item_flags,
5797                                                                  error);
5798                         if (ret < 0)
5799                                 return ret;
5800                         /* Count all modify-header actions as one action. */
5801                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5802                                 ++actions_n;
5803                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5804                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5805                         break;
5806                 default:
5807                         return rte_flow_error_set(error, ENOTSUP,
5808                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5809                                                   actions,
5810                                                   "action not supported");
5811                 }
5812         }
5813         /*
5814          * Validate the drop action mutual exclusion with other actions.
5815          * Drop action is mutually-exclusive with any other action, except for
5816          * Count action.
5817          */
5818         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
5819             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
5820                 return rte_flow_error_set(error, EINVAL,
5821                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5822                                           "Drop action is mutually-exclusive "
5823                                           "with any other action, except for "
5824                                           "Count action");
5825         /* Eswitch has few restrictions on using items and actions */
5826         if (attr->transfer) {
5827                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5828                     action_flags & MLX5_FLOW_ACTION_FLAG)
5829                         return rte_flow_error_set(error, ENOTSUP,
5830                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5831                                                   NULL,
5832                                                   "unsupported action FLAG");
5833                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5834                     action_flags & MLX5_FLOW_ACTION_MARK)
5835                         return rte_flow_error_set(error, ENOTSUP,
5836                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5837                                                   NULL,
5838                                                   "unsupported action MARK");
5839                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
5840                         return rte_flow_error_set(error, ENOTSUP,
5841                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5842                                                   NULL,
5843                                                   "unsupported action QUEUE");
5844                 if (action_flags & MLX5_FLOW_ACTION_RSS)
5845                         return rte_flow_error_set(error, ENOTSUP,
5846                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5847                                                   NULL,
5848                                                   "unsupported action RSS");
5849                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5850                         return rte_flow_error_set(error, EINVAL,
5851                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5852                                                   actions,
5853                                                   "no fate action is found");
5854         } else {
5855                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
5856                         return rte_flow_error_set(error, EINVAL,
5857                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5858                                                   actions,
5859                                                   "no fate action is found");
5860         }
5861         /* Continue validation for Xcap and VLAN actions.*/
5862         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
5863                              MLX5_FLOW_VLAN_ACTIONS)) &&
5864             (queue_index == 0xFFFF ||
5865              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5866                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5867                     MLX5_FLOW_XCAP_ACTIONS)
5868                         return rte_flow_error_set(error, ENOTSUP,
5869                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5870                                                   NULL, "encap and decap "
5871                                                   "combination aren't supported");
5872                 if (!attr->transfer && attr->ingress) {
5873                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
5874                                 return rte_flow_error_set
5875                                                 (error, ENOTSUP,
5876                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5877                                                  NULL, "encap is not supported"
5878                                                  " for ingress traffic");
5879                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
5880                                 return rte_flow_error_set
5881                                                 (error, ENOTSUP,
5882                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5883                                                  NULL, "push VLAN action not "
5884                                                  "supported for ingress");
5885                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
5886                                         MLX5_FLOW_VLAN_ACTIONS)
5887                                 return rte_flow_error_set
5888                                                 (error, ENOTSUP,
5889                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5890                                                  NULL, "no support for "
5891                                                  "multiple VLAN actions");
5892                 }
5893         }
5894         /* Hairpin flow will add one more TAG action. */
5895         if (hairpin > 0)
5896                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5897         /* extra metadata enabled: one more TAG action will be add. */
5898         if (dev_conf->dv_flow_en &&
5899             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
5900             mlx5_flow_ext_mreg_supported(dev))
5901                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5902         if ((uint32_t)rw_act_num >
5903                         flow_dv_modify_hdr_action_max(dev, is_root)) {
5904                 return rte_flow_error_set(error, ENOTSUP,
5905                                           RTE_FLOW_ERROR_TYPE_ACTION,
5906                                           NULL, "too many header modify"
5907                                           " actions to support");
5908         }
5909         return 0;
5910 }
5911
5912 /**
5913  * Internal preparation function. Allocates the DV flow size,
5914  * this size is constant.
5915  *
5916  * @param[in] dev
5917  *   Pointer to the rte_eth_dev structure.
5918  * @param[in] attr
5919  *   Pointer to the flow attributes.
5920  * @param[in] items
5921  *   Pointer to the list of items.
5922  * @param[in] actions
5923  *   Pointer to the list of actions.
5924  * @param[out] error
5925  *   Pointer to the error structure.
5926  *
5927  * @return
5928  *   Pointer to mlx5_flow object on success,
5929  *   otherwise NULL and rte_errno is set.
5930  */
5931 static struct mlx5_flow *
5932 flow_dv_prepare(struct rte_eth_dev *dev,
5933                 const struct rte_flow_attr *attr __rte_unused,
5934                 const struct rte_flow_item items[] __rte_unused,
5935                 const struct rte_flow_action actions[] __rte_unused,
5936                 struct rte_flow_error *error)
5937 {
5938         uint32_t handle_idx = 0;
5939         struct mlx5_flow *dev_flow;
5940         struct mlx5_flow_handle *dev_handle;
5941         struct mlx5_priv *priv = dev->data->dev_private;
5942
5943         /* In case of corrupting the memory. */
5944         if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
5945                 rte_flow_error_set(error, ENOSPC,
5946                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5947                                    "not free temporary device flow");
5948                 return NULL;
5949         }
5950         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
5951                                    &handle_idx);
5952         if (!dev_handle) {
5953                 rte_flow_error_set(error, ENOMEM,
5954                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5955                                    "not enough memory to create flow handle");
5956                 return NULL;
5957         }
5958         /* No multi-thread supporting. */
5959         dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
5960         dev_flow->handle = dev_handle;
5961         dev_flow->handle_idx = handle_idx;
5962         /*
5963          * In some old rdma-core releases, before continuing, a check of the
5964          * length of matching parameter will be done at first. It needs to use
5965          * the length without misc4 param. If the flow has misc4 support, then
5966          * the length needs to be adjusted accordingly. Each param member is
5967          * aligned with a 64B boundary naturally.
5968          */
5969         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
5970                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
5971         /*
5972          * The matching value needs to be cleared to 0 before using. In the
5973          * past, it will be automatically cleared when using rte_*alloc
5974          * API. The time consumption will be almost the same as before.
5975          */
5976         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
5977         dev_flow->ingress = attr->ingress;
5978         dev_flow->dv.transfer = attr->transfer;
5979         return dev_flow;
5980 }
5981
5982 #ifdef RTE_LIBRTE_MLX5_DEBUG
5983 /**
5984  * Sanity check for match mask and value. Similar to check_valid_spec() in
5985  * kernel driver. If unmasked bit is present in value, it returns failure.
5986  *
5987  * @param match_mask
5988  *   pointer to match mask buffer.
5989  * @param match_value
5990  *   pointer to match value buffer.
5991  *
5992  * @return
5993  *   0 if valid, -EINVAL otherwise.
5994  */
5995 static int
5996 flow_dv_check_valid_spec(void *match_mask, void *match_value)
5997 {
5998         uint8_t *m = match_mask;
5999         uint8_t *v = match_value;
6000         unsigned int i;
6001
6002         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6003                 if (v[i] & ~m[i]) {
6004                         DRV_LOG(ERR,
6005                                 "match_value differs from match_criteria"
6006                                 " %p[%u] != %p[%u]",
6007                                 match_value, i, match_mask, i);
6008                         return -EINVAL;
6009                 }
6010         }
6011         return 0;
6012 }
6013 #endif
6014
6015 /**
6016  * Add match of ip_version.
6017  *
6018  * @param[in] group
6019  *   Flow group.
6020  * @param[in] headers_v
6021  *   Values header pointer.
6022  * @param[in] headers_m
6023  *   Masks header pointer.
6024  * @param[in] ip_version
6025  *   The IP version to set.
6026  */
6027 static inline void
6028 flow_dv_set_match_ip_version(uint32_t group,
6029                              void *headers_v,
6030                              void *headers_m,
6031                              uint8_t ip_version)
6032 {
6033         if (group == 0)
6034                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6035         else
6036                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6037                          ip_version);
6038         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6039         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6040         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6041 }
6042
6043 /**
6044  * Add Ethernet item to matcher and to the value.
6045  *
6046  * @param[in, out] matcher
6047  *   Flow matcher.
6048  * @param[in, out] key
6049  *   Flow matcher value.
6050  * @param[in] item
6051  *   Flow pattern to translate.
6052  * @param[in] inner
6053  *   Item is inner pattern.
6054  */
6055 static void
6056 flow_dv_translate_item_eth(void *matcher, void *key,
6057                            const struct rte_flow_item *item, int inner,
6058                            uint32_t group)
6059 {
6060         const struct rte_flow_item_eth *eth_m = item->mask;
6061         const struct rte_flow_item_eth *eth_v = item->spec;
6062         const struct rte_flow_item_eth nic_mask = {
6063                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6064                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6065                 .type = RTE_BE16(0xffff),
6066         };
6067         void *headers_m;
6068         void *headers_v;
6069         char *l24_v;
6070         unsigned int i;
6071
6072         if (!eth_v)
6073                 return;
6074         if (!eth_m)
6075                 eth_m = &nic_mask;
6076         if (inner) {
6077                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6078                                          inner_headers);
6079                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6080         } else {
6081                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6082                                          outer_headers);
6083                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6084         }
6085         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
6086                &eth_m->dst, sizeof(eth_m->dst));
6087         /* The value must be in the range of the mask. */
6088         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
6089         for (i = 0; i < sizeof(eth_m->dst); ++i)
6090                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6091         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
6092                &eth_m->src, sizeof(eth_m->src));
6093         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
6094         /* The value must be in the range of the mask. */
6095         for (i = 0; i < sizeof(eth_m->dst); ++i)
6096                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6097         if (eth_v->type) {
6098                 /* When ethertype is present set mask for tagged VLAN. */
6099                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6100                 /* Set value for tagged VLAN if ethertype is 802.1Q. */
6101                 if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
6102                     eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
6103                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
6104                                  1);
6105                         /* Return here to avoid setting match on ethertype. */
6106                         return;
6107                 }
6108         }
6109         /*
6110          * HW supports match on one Ethertype, the Ethertype following the last
6111          * VLAN tag of the packet (see PRM).
6112          * Set match on ethertype only if ETH header is not followed by VLAN.
6113          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6114          * ethertype, and use ip_version field instead.
6115          * eCPRI over Ether layer will use type value 0xAEFE.
6116          */
6117         if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
6118             eth_m->type == 0xFFFF) {
6119                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6120         } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
6121                    eth_m->type == 0xFFFF) {
6122                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6123         } else {
6124                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
6125                          rte_be_to_cpu_16(eth_m->type));
6126                 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6127                                      ethertype);
6128                 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6129         }
6130 }
6131
6132 /**
6133  * Add VLAN item to matcher and to the value.
6134  *
6135  * @param[in, out] dev_flow
6136  *   Flow descriptor.
6137  * @param[in, out] matcher
6138  *   Flow matcher.
6139  * @param[in, out] key
6140  *   Flow matcher value.
6141  * @param[in] item
6142  *   Flow pattern to translate.
6143  * @param[in] inner
6144  *   Item is inner pattern.
6145  */
6146 static void
6147 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6148                             void *matcher, void *key,
6149                             const struct rte_flow_item *item,
6150                             int inner, uint32_t group)
6151 {
6152         const struct rte_flow_item_vlan *vlan_m = item->mask;
6153         const struct rte_flow_item_vlan *vlan_v = item->spec;
6154         void *headers_m;
6155         void *headers_v;
6156         uint16_t tci_m;
6157         uint16_t tci_v;
6158
6159         if (inner) {
6160                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6161                                          inner_headers);
6162                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6163         } else {
6164                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6165                                          outer_headers);
6166                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6167                 /*
6168                  * This is workaround, masks are not supported,
6169                  * and pre-validated.
6170                  */
6171                 if (vlan_v)
6172                         dev_flow->handle->vf_vlan.tag =
6173                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6174         }
6175         /*
6176          * When VLAN item exists in flow, mark packet as tagged,
6177          * even if TCI is not specified.
6178          */
6179         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6180         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
6181         if (!vlan_v)
6182                 return;
6183         if (!vlan_m)
6184                 vlan_m = &rte_flow_item_vlan_mask;
6185         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6186         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6187         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
6188         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
6189         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
6190         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
6191         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
6192         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
6193         /*
6194          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6195          * ethertype, and use ip_version field instead.
6196          */
6197         if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
6198             vlan_m->inner_type == 0xFFFF) {
6199                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6200         } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
6201                    vlan_m->inner_type == 0xFFFF) {
6202                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6203         } else {
6204                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
6205                          rte_be_to_cpu_16(vlan_m->inner_type));
6206                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
6207                          rte_be_to_cpu_16(vlan_m->inner_type &
6208                                           vlan_v->inner_type));
6209         }
6210 }
6211
6212 /**
6213  * Add IPV4 item to matcher and to the value.
6214  *
6215  * @param[in, out] matcher
6216  *   Flow matcher.
6217  * @param[in, out] key
6218  *   Flow matcher value.
6219  * @param[in] item
6220  *   Flow pattern to translate.
6221  * @param[in] item_flags
6222  *   Bit-fields that holds the items detected until now.
6223  * @param[in] inner
6224  *   Item is inner pattern.
6225  * @param[in] group
6226  *   The group to insert the rule.
6227  */
6228 static void
6229 flow_dv_translate_item_ipv4(void *matcher, void *key,
6230                             const struct rte_flow_item *item,
6231                             const uint64_t item_flags,
6232                             int inner, uint32_t group)
6233 {
6234         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6235         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6236         const struct rte_flow_item_ipv4 nic_mask = {
6237                 .hdr = {
6238                         .src_addr = RTE_BE32(0xffffffff),
6239                         .dst_addr = RTE_BE32(0xffffffff),
6240                         .type_of_service = 0xff,
6241                         .next_proto_id = 0xff,
6242                         .time_to_live = 0xff,
6243                 },
6244         };
6245         void *headers_m;
6246         void *headers_v;
6247         char *l24_m;
6248         char *l24_v;
6249         uint8_t tos;
6250
6251         if (inner) {
6252                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6253                                          inner_headers);
6254                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6255         } else {
6256                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6257                                          outer_headers);
6258                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6259         }
6260         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6261         /*
6262          * On outer header (which must contains L2), or inner header with L2,
6263          * set cvlan_tag mask bit to mark this packet as untagged.
6264          * This should be done even if item->spec is empty.
6265          */
6266         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
6267                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6268         if (!ipv4_v)
6269                 return;
6270         if (!ipv4_m)
6271                 ipv4_m = &nic_mask;
6272         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6273                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6274         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6275                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6276         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6277         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6278         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6279                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6280         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6281                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6282         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6283         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6284         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6285         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6286                  ipv4_m->hdr.type_of_service);
6287         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6288         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6289                  ipv4_m->hdr.type_of_service >> 2);
6290         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6291         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6292                  ipv4_m->hdr.next_proto_id);
6293         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6294                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6295         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6296                  ipv4_m->hdr.time_to_live);
6297         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6298                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6299 }
6300
6301 /**
6302  * Add IPV6 item to matcher and to the value.
6303  *
6304  * @param[in, out] matcher
6305  *   Flow matcher.
6306  * @param[in, out] key
6307  *   Flow matcher value.
6308  * @param[in] item
6309  *   Flow pattern to translate.
6310  * @param[in] item_flags
6311  *   Bit-fields that holds the items detected until now.
6312  * @param[in] inner
6313  *   Item is inner pattern.
6314  * @param[in] group
6315  *   The group to insert the rule.
6316  */
6317 static void
6318 flow_dv_translate_item_ipv6(void *matcher, void *key,
6319                             const struct rte_flow_item *item,
6320                             const uint64_t item_flags,
6321                             int inner, uint32_t group)
6322 {
6323         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6324         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6325         const struct rte_flow_item_ipv6 nic_mask = {
6326                 .hdr = {
6327                         .src_addr =
6328                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6329                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6330                         .dst_addr =
6331                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6332                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6333                         .vtc_flow = RTE_BE32(0xffffffff),
6334                         .proto = 0xff,
6335                         .hop_limits = 0xff,
6336                 },
6337         };
6338         void *headers_m;
6339         void *headers_v;
6340         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6341         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6342         char *l24_m;
6343         char *l24_v;
6344         uint32_t vtc_m;
6345         uint32_t vtc_v;
6346         int i;
6347         int size;
6348
6349         if (inner) {
6350                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6351                                          inner_headers);
6352                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6353         } else {
6354                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6355                                          outer_headers);
6356                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6357         }
6358         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6359         /*
6360          * On outer header (which must contains L2), or inner header with L2,
6361          * set cvlan_tag mask bit to mark this packet as untagged.
6362          * This should be done even if item->spec is empty.
6363          */
6364         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
6365                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6366         if (!ipv6_v)
6367                 return;
6368         if (!ipv6_m)
6369                 ipv6_m = &nic_mask;
6370         size = sizeof(ipv6_m->hdr.dst_addr);
6371         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6372                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6373         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6374                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6375         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6376         for (i = 0; i < size; ++i)
6377                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6378         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6379                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6380         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6381                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6382         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6383         for (i = 0; i < size; ++i)
6384                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6385         /* TOS. */
6386         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6387         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6388         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6389         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6390         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6391         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6392         /* Label. */
6393         if (inner) {
6394                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6395                          vtc_m);
6396                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6397                          vtc_v);
6398         } else {
6399                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6400                          vtc_m);
6401                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6402                          vtc_v);
6403         }
6404         /* Protocol. */
6405         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6406                  ipv6_m->hdr.proto);
6407         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6408                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6409         /* Hop limit. */
6410         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6411                  ipv6_m->hdr.hop_limits);
6412         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6413                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6414 }
6415
6416 /**
6417  * Add TCP item to matcher and to the value.
6418  *
6419  * @param[in, out] matcher
6420  *   Flow matcher.
6421  * @param[in, out] key
6422  *   Flow matcher value.
6423  * @param[in] item
6424  *   Flow pattern to translate.
6425  * @param[in] inner
6426  *   Item is inner pattern.
6427  */
6428 static void
6429 flow_dv_translate_item_tcp(void *matcher, void *key,
6430                            const struct rte_flow_item *item,
6431                            int inner)
6432 {
6433         const struct rte_flow_item_tcp *tcp_m = item->mask;
6434         const struct rte_flow_item_tcp *tcp_v = item->spec;
6435         void *headers_m;
6436         void *headers_v;
6437
6438         if (inner) {
6439                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6440                                          inner_headers);
6441                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6442         } else {
6443                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6444                                          outer_headers);
6445                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6446         }
6447         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6448         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6449         if (!tcp_v)
6450                 return;
6451         if (!tcp_m)
6452                 tcp_m = &rte_flow_item_tcp_mask;
6453         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6454                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6455         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6456                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6457         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6458                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6459         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6460                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6461         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6462                  tcp_m->hdr.tcp_flags);
6463         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6464                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6465 }
6466
6467 /**
6468  * Add UDP item to matcher and to the value.
6469  *
6470  * @param[in, out] matcher
6471  *   Flow matcher.
6472  * @param[in, out] key
6473  *   Flow matcher value.
6474  * @param[in] item
6475  *   Flow pattern to translate.
6476  * @param[in] inner
6477  *   Item is inner pattern.
6478  */
6479 static void
6480 flow_dv_translate_item_udp(void *matcher, void *key,
6481                            const struct rte_flow_item *item,
6482                            int inner)
6483 {
6484         const struct rte_flow_item_udp *udp_m = item->mask;
6485         const struct rte_flow_item_udp *udp_v = item->spec;
6486         void *headers_m;
6487         void *headers_v;
6488
6489         if (inner) {
6490                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6491                                          inner_headers);
6492                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6493         } else {
6494                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6495                                          outer_headers);
6496                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6497         }
6498         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6499         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6500         if (!udp_v)
6501                 return;
6502         if (!udp_m)
6503                 udp_m = &rte_flow_item_udp_mask;
6504         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6505                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6506         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6507                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6508         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6509                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6510         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6511                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6512 }
6513
6514 /**
6515  * Add GRE optional Key item to matcher and to the value.
6516  *
6517  * @param[in, out] matcher
6518  *   Flow matcher.
6519  * @param[in, out] key
6520  *   Flow matcher value.
6521  * @param[in] item
6522  *   Flow pattern to translate.
6523  * @param[in] inner
6524  *   Item is inner pattern.
6525  */
6526 static void
6527 flow_dv_translate_item_gre_key(void *matcher, void *key,
6528                                    const struct rte_flow_item *item)
6529 {
6530         const rte_be32_t *key_m = item->mask;
6531         const rte_be32_t *key_v = item->spec;
6532         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6533         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6534         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6535
6536         /* GRE K bit must be on and should already be validated */
6537         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6538         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6539         if (!key_v)
6540                 return;
6541         if (!key_m)
6542                 key_m = &gre_key_default_mask;
6543         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6544                  rte_be_to_cpu_32(*key_m) >> 8);
6545         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6546                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6547         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6548                  rte_be_to_cpu_32(*key_m) & 0xFF);
6549         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6550                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6551 }
6552
6553 /**
6554  * Add GRE item to matcher and to the value.
6555  *
6556  * @param[in, out] matcher
6557  *   Flow matcher.
6558  * @param[in, out] key
6559  *   Flow matcher value.
6560  * @param[in] item
6561  *   Flow pattern to translate.
6562  * @param[in] inner
6563  *   Item is inner pattern.
6564  */
6565 static void
6566 flow_dv_translate_item_gre(void *matcher, void *key,
6567                            const struct rte_flow_item *item,
6568                            int inner)
6569 {
6570         const struct rte_flow_item_gre *gre_m = item->mask;
6571         const struct rte_flow_item_gre *gre_v = item->spec;
6572         void *headers_m;
6573         void *headers_v;
6574         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6575         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6576         struct {
6577                 union {
6578                         __extension__
6579                         struct {
6580                                 uint16_t version:3;
6581                                 uint16_t rsvd0:9;
6582                                 uint16_t s_present:1;
6583                                 uint16_t k_present:1;
6584                                 uint16_t rsvd_bit1:1;
6585                                 uint16_t c_present:1;
6586                         };
6587                         uint16_t value;
6588                 };
6589         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6590
6591         if (inner) {
6592                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6593                                          inner_headers);
6594                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6595         } else {
6596                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6597                                          outer_headers);
6598                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6599         }
6600         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6601         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6602         if (!gre_v)
6603                 return;
6604         if (!gre_m)
6605                 gre_m = &rte_flow_item_gre_mask;
6606         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6607                  rte_be_to_cpu_16(gre_m->protocol));
6608         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6609                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6610         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6611         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6612         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6613                  gre_crks_rsvd0_ver_m.c_present);
6614         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6615                  gre_crks_rsvd0_ver_v.c_present &
6616                  gre_crks_rsvd0_ver_m.c_present);
6617         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6618                  gre_crks_rsvd0_ver_m.k_present);
6619         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6620                  gre_crks_rsvd0_ver_v.k_present &
6621                  gre_crks_rsvd0_ver_m.k_present);
6622         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6623                  gre_crks_rsvd0_ver_m.s_present);
6624         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6625                  gre_crks_rsvd0_ver_v.s_present &
6626                  gre_crks_rsvd0_ver_m.s_present);
6627 }
6628
6629 /**
6630  * Add NVGRE item to matcher and to the value.
6631  *
6632  * @param[in, out] matcher
6633  *   Flow matcher.
6634  * @param[in, out] key
6635  *   Flow matcher value.
6636  * @param[in] item
6637  *   Flow pattern to translate.
6638  * @param[in] inner
6639  *   Item is inner pattern.
6640  */
6641 static void
6642 flow_dv_translate_item_nvgre(void *matcher, void *key,
6643                              const struct rte_flow_item *item,
6644                              int inner)
6645 {
6646         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6647         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6648         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6649         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6650         const char *tni_flow_id_m;
6651         const char *tni_flow_id_v;
6652         char *gre_key_m;
6653         char *gre_key_v;
6654         int size;
6655         int i;
6656
6657         /* For NVGRE, GRE header fields must be set with defined values. */
6658         const struct rte_flow_item_gre gre_spec = {
6659                 .c_rsvd0_ver = RTE_BE16(0x2000),
6660                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
6661         };
6662         const struct rte_flow_item_gre gre_mask = {
6663                 .c_rsvd0_ver = RTE_BE16(0xB000),
6664                 .protocol = RTE_BE16(UINT16_MAX),
6665         };
6666         const struct rte_flow_item gre_item = {
6667                 .spec = &gre_spec,
6668                 .mask = &gre_mask,
6669                 .last = NULL,
6670         };
6671         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
6672         if (!nvgre_v)
6673                 return;
6674         if (!nvgre_m)
6675                 nvgre_m = &rte_flow_item_nvgre_mask;
6676         tni_flow_id_m = (const char *)nvgre_m->tni;
6677         tni_flow_id_v = (const char *)nvgre_v->tni;
6678         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
6679         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
6680         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
6681         memcpy(gre_key_m, tni_flow_id_m, size);
6682         for (i = 0; i < size; ++i)
6683                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
6684 }
6685
6686 /**
6687  * Add VXLAN item to matcher and to the value.
6688  *
6689  * @param[in, out] matcher
6690  *   Flow matcher.
6691  * @param[in, out] key
6692  *   Flow matcher value.
6693  * @param[in] item
6694  *   Flow pattern to translate.
6695  * @param[in] inner
6696  *   Item is inner pattern.
6697  */
6698 static void
6699 flow_dv_translate_item_vxlan(void *matcher, void *key,
6700                              const struct rte_flow_item *item,
6701                              int inner)
6702 {
6703         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
6704         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
6705         void *headers_m;
6706         void *headers_v;
6707         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6708         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6709         char *vni_m;
6710         char *vni_v;
6711         uint16_t dport;
6712         int size;
6713         int i;
6714
6715         if (inner) {
6716                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6717                                          inner_headers);
6718                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6719         } else {
6720                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6721                                          outer_headers);
6722                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6723         }
6724         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6725                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6726         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6727                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6728                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6729         }
6730         if (!vxlan_v)
6731                 return;
6732         if (!vxlan_m)
6733                 vxlan_m = &rte_flow_item_vxlan_mask;
6734         size = sizeof(vxlan_m->vni);
6735         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
6736         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
6737         memcpy(vni_m, vxlan_m->vni, size);
6738         for (i = 0; i < size; ++i)
6739                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6740 }
6741
6742 /**
6743  * Add VXLAN-GPE item to matcher and to the value.
6744  *
6745  * @param[in, out] matcher
6746  *   Flow matcher.
6747  * @param[in, out] key
6748  *   Flow matcher value.
6749  * @param[in] item
6750  *   Flow pattern to translate.
6751  * @param[in] inner
6752  *   Item is inner pattern.
6753  */
6754
6755 static void
6756 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
6757                                  const struct rte_flow_item *item, int inner)
6758 {
6759         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
6760         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
6761         void *headers_m;
6762         void *headers_v;
6763         void *misc_m =
6764                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
6765         void *misc_v =
6766                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6767         char *vni_m;
6768         char *vni_v;
6769         uint16_t dport;
6770         int size;
6771         int i;
6772         uint8_t flags_m = 0xff;
6773         uint8_t flags_v = 0xc;
6774
6775         if (inner) {
6776                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6777                                          inner_headers);
6778                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6779         } else {
6780                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6781                                          outer_headers);
6782                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6783         }
6784         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6785                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6786         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6787                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6788                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6789         }
6790         if (!vxlan_v)
6791                 return;
6792         if (!vxlan_m)
6793                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
6794         size = sizeof(vxlan_m->vni);
6795         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
6796         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
6797         memcpy(vni_m, vxlan_m->vni, size);
6798         for (i = 0; i < size; ++i)
6799                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6800         if (vxlan_m->flags) {
6801                 flags_m = vxlan_m->flags;
6802                 flags_v = vxlan_v->flags;
6803         }
6804         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
6805         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
6806         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
6807                  vxlan_m->protocol);
6808         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
6809                  vxlan_v->protocol);
6810 }
6811
6812 /**
6813  * Add Geneve item to matcher and to the value.
6814  *
6815  * @param[in, out] matcher
6816  *   Flow matcher.
6817  * @param[in, out] key
6818  *   Flow matcher value.
6819  * @param[in] item
6820  *   Flow pattern to translate.
6821  * @param[in] inner
6822  *   Item is inner pattern.
6823  */
6824
6825 static void
6826 flow_dv_translate_item_geneve(void *matcher, void *key,
6827                               const struct rte_flow_item *item, int inner)
6828 {
6829         const struct rte_flow_item_geneve *geneve_m = item->mask;
6830         const struct rte_flow_item_geneve *geneve_v = item->spec;
6831         void *headers_m;
6832         void *headers_v;
6833         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6834         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6835         uint16_t dport;
6836         uint16_t gbhdr_m;
6837         uint16_t gbhdr_v;
6838         char *vni_m;
6839         char *vni_v;
6840         size_t size, i;
6841
6842         if (inner) {
6843                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6844                                          inner_headers);
6845                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6846         } else {
6847                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6848                                          outer_headers);
6849                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6850         }
6851         dport = MLX5_UDP_PORT_GENEVE;
6852         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6853                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6854                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6855         }
6856         if (!geneve_v)
6857                 return;
6858         if (!geneve_m)
6859                 geneve_m = &rte_flow_item_geneve_mask;
6860         size = sizeof(geneve_m->vni);
6861         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
6862         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
6863         memcpy(vni_m, geneve_m->vni, size);
6864         for (i = 0; i < size; ++i)
6865                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
6866         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
6867                  rte_be_to_cpu_16(geneve_m->protocol));
6868         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
6869                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
6870         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
6871         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
6872         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
6873                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6874         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
6875                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6876         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
6877                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6878         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
6879                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
6880                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6881 }
6882
6883 /**
6884  * Add MPLS item to matcher and to the value.
6885  *
6886  * @param[in, out] matcher
6887  *   Flow matcher.
6888  * @param[in, out] key
6889  *   Flow matcher value.
6890  * @param[in] item
6891  *   Flow pattern to translate.
6892  * @param[in] prev_layer
6893  *   The protocol layer indicated in previous item.
6894  * @param[in] inner
6895  *   Item is inner pattern.
6896  */
6897 static void
6898 flow_dv_translate_item_mpls(void *matcher, void *key,
6899                             const struct rte_flow_item *item,
6900                             uint64_t prev_layer,
6901                             int inner)
6902 {
6903         const uint32_t *in_mpls_m = item->mask;
6904         const uint32_t *in_mpls_v = item->spec;
6905         uint32_t *out_mpls_m = 0;
6906         uint32_t *out_mpls_v = 0;
6907         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6908         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6909         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
6910                                      misc_parameters_2);
6911         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6912         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
6913         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6914
6915         switch (prev_layer) {
6916         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6917                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
6918                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6919                          MLX5_UDP_PORT_MPLS);
6920                 break;
6921         case MLX5_FLOW_LAYER_GRE:
6922                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
6923                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6924                          RTE_ETHER_TYPE_MPLS);
6925                 break;
6926         default:
6927                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6928                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6929                          IPPROTO_MPLS);
6930                 break;
6931         }
6932         if (!in_mpls_v)
6933                 return;
6934         if (!in_mpls_m)
6935                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
6936         switch (prev_layer) {
6937         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6938                 out_mpls_m =
6939                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6940                                                  outer_first_mpls_over_udp);
6941                 out_mpls_v =
6942                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6943                                                  outer_first_mpls_over_udp);
6944                 break;
6945         case MLX5_FLOW_LAYER_GRE:
6946                 out_mpls_m =
6947                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6948                                                  outer_first_mpls_over_gre);
6949                 out_mpls_v =
6950                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6951                                                  outer_first_mpls_over_gre);
6952                 break;
6953         default:
6954                 /* Inner MPLS not over GRE is not supported. */
6955                 if (!inner) {
6956                         out_mpls_m =
6957                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6958                                                          misc2_m,
6959                                                          outer_first_mpls);
6960                         out_mpls_v =
6961                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6962                                                          misc2_v,
6963                                                          outer_first_mpls);
6964                 }
6965                 break;
6966         }
6967         if (out_mpls_m && out_mpls_v) {
6968                 *out_mpls_m = *in_mpls_m;
6969                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
6970         }
6971 }
6972
6973 /**
6974  * Add metadata register item to matcher
6975  *
6976  * @param[in, out] matcher
6977  *   Flow matcher.
6978  * @param[in, out] key
6979  *   Flow matcher value.
6980  * @param[in] reg_type
6981  *   Type of device metadata register
6982  * @param[in] value
6983  *   Register value
6984  * @param[in] mask
6985  *   Register mask
6986  */
6987 static void
6988 flow_dv_match_meta_reg(void *matcher, void *key,
6989                        enum modify_reg reg_type,
6990                        uint32_t data, uint32_t mask)
6991 {
6992         void *misc2_m =
6993                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
6994         void *misc2_v =
6995                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6996         uint32_t temp;
6997
6998         data &= mask;
6999         switch (reg_type) {
7000         case REG_A:
7001                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7002                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7003                 break;
7004         case REG_B:
7005                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7006                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7007                 break;
7008         case REG_C_0:
7009                 /*
7010                  * The metadata register C0 field might be divided into
7011                  * source vport index and META item value, we should set
7012                  * this field according to specified mask, not as whole one.
7013                  */
7014                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7015                 temp |= mask;
7016                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7017                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7018                 temp &= ~mask;
7019                 temp |= data;
7020                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7021                 break;
7022         case REG_C_1:
7023                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7024                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7025                 break;
7026         case REG_C_2:
7027                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7028                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7029                 break;
7030         case REG_C_3:
7031                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7032                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7033                 break;
7034         case REG_C_4:
7035                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7036                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7037                 break;
7038         case REG_C_5:
7039                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7040                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7041                 break;
7042         case REG_C_6:
7043                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7044                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7045                 break;
7046         case REG_C_7:
7047                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7048                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7049                 break;
7050         default:
7051                 MLX5_ASSERT(false);
7052                 break;
7053         }
7054 }
7055
7056 /**
7057  * Add MARK item to matcher
7058  *
7059  * @param[in] dev
7060  *   The device to configure through.
7061  * @param[in, out] matcher
7062  *   Flow matcher.
7063  * @param[in, out] key
7064  *   Flow matcher value.
7065  * @param[in] item
7066  *   Flow pattern to translate.
7067  */
7068 static void
7069 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7070                             void *matcher, void *key,
7071                             const struct rte_flow_item *item)
7072 {
7073         struct mlx5_priv *priv = dev->data->dev_private;
7074         const struct rte_flow_item_mark *mark;
7075         uint32_t value;
7076         uint32_t mask;
7077
7078         mark = item->mask ? (const void *)item->mask :
7079                             &rte_flow_item_mark_mask;
7080         mask = mark->id & priv->sh->dv_mark_mask;
7081         mark = (const void *)item->spec;
7082         MLX5_ASSERT(mark);
7083         value = mark->id & priv->sh->dv_mark_mask & mask;
7084         if (mask) {
7085                 enum modify_reg reg;
7086
7087                 /* Get the metadata register index for the mark. */
7088                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7089                 MLX5_ASSERT(reg > 0);
7090                 if (reg == REG_C_0) {
7091                         struct mlx5_priv *priv = dev->data->dev_private;
7092                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7093                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7094
7095                         mask &= msk_c0;
7096                         mask <<= shl_c0;
7097                         value <<= shl_c0;
7098                 }
7099                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7100         }
7101 }
7102
7103 /**
7104  * Add META item to matcher
7105  *
7106  * @param[in] dev
7107  *   The devich to configure through.
7108  * @param[in, out] matcher
7109  *   Flow matcher.
7110  * @param[in, out] key
7111  *   Flow matcher value.
7112  * @param[in] attr
7113  *   Attributes of flow that includes this item.
7114  * @param[in] item
7115  *   Flow pattern to translate.
7116  */
7117 static void
7118 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7119                             void *matcher, void *key,
7120                             const struct rte_flow_attr *attr,
7121                             const struct rte_flow_item *item)
7122 {
7123         const struct rte_flow_item_meta *meta_m;
7124         const struct rte_flow_item_meta *meta_v;
7125
7126         meta_m = (const void *)item->mask;
7127         if (!meta_m)
7128                 meta_m = &rte_flow_item_meta_mask;
7129         meta_v = (const void *)item->spec;
7130         if (meta_v) {
7131                 int reg;
7132                 uint32_t value = meta_v->data;
7133                 uint32_t mask = meta_m->data;
7134
7135                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7136                 if (reg < 0)
7137                         return;
7138                 /*
7139                  * In datapath code there is no endianness
7140                  * coversions for perfromance reasons, all
7141                  * pattern conversions are done in rte_flow.
7142                  */
7143                 value = rte_cpu_to_be_32(value);
7144                 mask = rte_cpu_to_be_32(mask);
7145                 if (reg == REG_C_0) {
7146                         struct mlx5_priv *priv = dev->data->dev_private;
7147                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7148                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7149 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7150                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7151
7152                         value >>= shr_c0;
7153                         mask >>= shr_c0;
7154 #endif
7155                         value <<= shl_c0;
7156                         mask <<= shl_c0;
7157                         MLX5_ASSERT(msk_c0);
7158                         MLX5_ASSERT(!(~msk_c0 & mask));
7159                 }
7160                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7161         }
7162 }
7163
7164 /**
7165  * Add vport metadata Reg C0 item to matcher
7166  *
7167  * @param[in, out] matcher
7168  *   Flow matcher.
7169  * @param[in, out] key
7170  *   Flow matcher value.
7171  * @param[in] reg
7172  *   Flow pattern to translate.
7173  */
7174 static void
7175 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7176                                   uint32_t value, uint32_t mask)
7177 {
7178         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7179 }
7180
7181 /**
7182  * Add tag item to matcher
7183  *
7184  * @param[in] dev
7185  *   The devich to configure through.
7186  * @param[in, out] matcher
7187  *   Flow matcher.
7188  * @param[in, out] key
7189  *   Flow matcher value.
7190  * @param[in] item
7191  *   Flow pattern to translate.
7192  */
7193 static void
7194 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7195                                 void *matcher, void *key,
7196                                 const struct rte_flow_item *item)
7197 {
7198         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7199         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7200         uint32_t mask, value;
7201
7202         MLX5_ASSERT(tag_v);
7203         value = tag_v->data;
7204         mask = tag_m ? tag_m->data : UINT32_MAX;
7205         if (tag_v->id == REG_C_0) {
7206                 struct mlx5_priv *priv = dev->data->dev_private;
7207                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7208                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7209
7210                 mask &= msk_c0;
7211                 mask <<= shl_c0;
7212                 value <<= shl_c0;
7213         }
7214         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7215 }
7216
7217 /**
7218  * Add TAG item to matcher
7219  *
7220  * @param[in] dev
7221  *   The devich to configure through.
7222  * @param[in, out] matcher
7223  *   Flow matcher.
7224  * @param[in, out] key
7225  *   Flow matcher value.
7226  * @param[in] item
7227  *   Flow pattern to translate.
7228  */
7229 static void
7230 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7231                            void *matcher, void *key,
7232                            const struct rte_flow_item *item)
7233 {
7234         const struct rte_flow_item_tag *tag_v = item->spec;
7235         const struct rte_flow_item_tag *tag_m = item->mask;
7236         enum modify_reg reg;
7237
7238         MLX5_ASSERT(tag_v);
7239         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7240         /* Get the metadata register index for the tag. */
7241         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7242         MLX5_ASSERT(reg > 0);
7243         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7244 }
7245
7246 /**
7247  * Add source vport match to the specified matcher.
7248  *
7249  * @param[in, out] matcher
7250  *   Flow matcher.
7251  * @param[in, out] key
7252  *   Flow matcher value.
7253  * @param[in] port
7254  *   Source vport value to match
7255  * @param[in] mask
7256  *   Mask
7257  */
7258 static void
7259 flow_dv_translate_item_source_vport(void *matcher, void *key,
7260                                     int16_t port, uint16_t mask)
7261 {
7262         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7263         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7264
7265         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7266         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7267 }
7268
7269 /**
7270  * Translate port-id item to eswitch match on  port-id.
7271  *
7272  * @param[in] dev
7273  *   The devich to configure through.
7274  * @param[in, out] matcher
7275  *   Flow matcher.
7276  * @param[in, out] key
7277  *   Flow matcher value.
7278  * @param[in] item
7279  *   Flow pattern to translate.
7280  *
7281  * @return
7282  *   0 on success, a negative errno value otherwise.
7283  */
7284 static int
7285 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7286                                void *key, const struct rte_flow_item *item)
7287 {
7288         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7289         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7290         struct mlx5_priv *priv;
7291         uint16_t mask, id;
7292
7293         mask = pid_m ? pid_m->id : 0xffff;
7294         id = pid_v ? pid_v->id : dev->data->port_id;
7295         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7296         if (!priv)
7297                 return -rte_errno;
7298         /* Translate to vport field or to metadata, depending on mode. */
7299         if (priv->vport_meta_mask)
7300                 flow_dv_translate_item_meta_vport(matcher, key,
7301                                                   priv->vport_meta_tag,
7302                                                   priv->vport_meta_mask);
7303         else
7304                 flow_dv_translate_item_source_vport(matcher, key,
7305                                                     priv->vport_id, mask);
7306         return 0;
7307 }
7308
7309 /**
7310  * Add ICMP6 item to matcher and to the value.
7311  *
7312  * @param[in, out] matcher
7313  *   Flow matcher.
7314  * @param[in, out] key
7315  *   Flow matcher value.
7316  * @param[in] item
7317  *   Flow pattern to translate.
7318  * @param[in] inner
7319  *   Item is inner pattern.
7320  */
7321 static void
7322 flow_dv_translate_item_icmp6(void *matcher, void *key,
7323                               const struct rte_flow_item *item,
7324                               int inner)
7325 {
7326         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7327         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7328         void *headers_m;
7329         void *headers_v;
7330         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7331                                      misc_parameters_3);
7332         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7333         if (inner) {
7334                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7335                                          inner_headers);
7336                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7337         } else {
7338                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7339                                          outer_headers);
7340                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7341         }
7342         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7343         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7344         if (!icmp6_v)
7345                 return;
7346         if (!icmp6_m)
7347                 icmp6_m = &rte_flow_item_icmp6_mask;
7348         /*
7349          * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
7350          * If only the protocol is specified, no need to match the frag.
7351          */
7352         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7353         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
7354         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7355         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7356                  icmp6_v->type & icmp6_m->type);
7357         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7358         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7359                  icmp6_v->code & icmp6_m->code);
7360 }
7361
7362 /**
7363  * Add ICMP item to matcher and to the value.
7364  *
7365  * @param[in, out] matcher
7366  *   Flow matcher.
7367  * @param[in, out] key
7368  *   Flow matcher value.
7369  * @param[in] item
7370  *   Flow pattern to translate.
7371  * @param[in] inner
7372  *   Item is inner pattern.
7373  */
7374 static void
7375 flow_dv_translate_item_icmp(void *matcher, void *key,
7376                             const struct rte_flow_item *item,
7377                             int inner)
7378 {
7379         const struct rte_flow_item_icmp *icmp_m = item->mask;
7380         const struct rte_flow_item_icmp *icmp_v = item->spec;
7381         void *headers_m;
7382         void *headers_v;
7383         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7384                                      misc_parameters_3);
7385         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7386         if (inner) {
7387                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7388                                          inner_headers);
7389                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7390         } else {
7391                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7392                                          outer_headers);
7393                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7394         }
7395         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7396         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7397         if (!icmp_v)
7398                 return;
7399         if (!icmp_m)
7400                 icmp_m = &rte_flow_item_icmp_mask;
7401         /*
7402          * Force flow only to match the non-fragmented IPv4 ICMP packets.
7403          * If only the protocol is specified, no need to match the frag.
7404          */
7405         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7406         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
7407         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7408                  icmp_m->hdr.icmp_type);
7409         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7410                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7411         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7412                  icmp_m->hdr.icmp_code);
7413         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7414                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7415 }
7416
7417 /**
7418  * Add GTP item to matcher and to the value.
7419  *
7420  * @param[in, out] matcher
7421  *   Flow matcher.
7422  * @param[in, out] key
7423  *   Flow matcher value.
7424  * @param[in] item
7425  *   Flow pattern to translate.
7426  * @param[in] inner
7427  *   Item is inner pattern.
7428  */
7429 static void
7430 flow_dv_translate_item_gtp(void *matcher, void *key,
7431                            const struct rte_flow_item *item, int inner)
7432 {
7433         const struct rte_flow_item_gtp *gtp_m = item->mask;
7434         const struct rte_flow_item_gtp *gtp_v = item->spec;
7435         void *headers_m;
7436         void *headers_v;
7437         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7438                                      misc_parameters_3);
7439         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7440         uint16_t dport = RTE_GTPU_UDP_PORT;
7441
7442         if (inner) {
7443                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7444                                          inner_headers);
7445                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7446         } else {
7447                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7448                                          outer_headers);
7449                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7450         }
7451         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7452                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7453                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7454         }
7455         if (!gtp_v)
7456                 return;
7457         if (!gtp_m)
7458                 gtp_m = &rte_flow_item_gtp_mask;
7459         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7460                  gtp_m->v_pt_rsv_flags);
7461         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7462                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7463         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7464         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7465                  gtp_v->msg_type & gtp_m->msg_type);
7466         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7467                  rte_be_to_cpu_32(gtp_m->teid));
7468         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7469                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7470 }
7471
7472 /**
7473  * Add eCPRI item to matcher and to the value.
7474  *
7475  * @param[in] dev
7476  *   The devich to configure through.
7477  * @param[in, out] matcher
7478  *   Flow matcher.
7479  * @param[in, out] key
7480  *   Flow matcher value.
7481  * @param[in] item
7482  *   Flow pattern to translate.
7483  * @param[in] samples
7484  *   Sample IDs to be used in the matching.
7485  */
7486 static void
7487 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7488                              void *key, const struct rte_flow_item *item)
7489 {
7490         struct mlx5_priv *priv = dev->data->dev_private;
7491         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7492         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7493         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7494                                      misc_parameters_4);
7495         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7496         uint32_t *samples;
7497         void *dw_m;
7498         void *dw_v;
7499
7500         if (!ecpri_v)
7501                 return;
7502         if (!ecpri_m)
7503                 ecpri_m = &rte_flow_item_ecpri_mask;
7504         /*
7505          * Maximal four DW samples are supported in a single matching now.
7506          * Two are used now for a eCPRI matching:
7507          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7508          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7509          *    if any.
7510          */
7511         if (!ecpri_m->hdr.common.u32)
7512                 return;
7513         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7514         /* Need to take the whole DW as the mask to fill the entry. */
7515         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7516                             prog_sample_field_value_0);
7517         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7518                             prog_sample_field_value_0);
7519         /* Already big endian (network order) in the header. */
7520         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7521         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7522         /* Sample#0, used for matching type, offset 0. */
7523         MLX5_SET(fte_match_set_misc4, misc4_m,
7524                  prog_sample_field_id_0, samples[0]);
7525         /* It makes no sense to set the sample ID in the mask field. */
7526         MLX5_SET(fte_match_set_misc4, misc4_v,
7527                  prog_sample_field_id_0, samples[0]);
7528         /*
7529          * Checking if message body part needs to be matched.
7530          * Some wildcard rules only matching type field should be supported.
7531          */
7532         if (ecpri_m->hdr.dummy[0]) {
7533                 switch (ecpri_v->hdr.common.type) {
7534                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7535                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7536                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7537                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7538                                             prog_sample_field_value_1);
7539                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7540                                             prog_sample_field_value_1);
7541                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7542                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7543                         /* Sample#1, to match message body, offset 4. */
7544                         MLX5_SET(fte_match_set_misc4, misc4_m,
7545                                  prog_sample_field_id_1, samples[1]);
7546                         MLX5_SET(fte_match_set_misc4, misc4_v,
7547                                  prog_sample_field_id_1, samples[1]);
7548                         break;
7549                 default:
7550                         /* Others, do not match any sample ID. */
7551                         break;
7552                 }
7553         }
7554 }
7555
7556 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7557
7558 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7559         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7560                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7561
7562 /**
7563  * Calculate flow matcher enable bitmap.
7564  *
7565  * @param match_criteria
7566  *   Pointer to flow matcher criteria.
7567  *
7568  * @return
7569  *   Bitmap of enabled fields.
7570  */
7571 static uint8_t
7572 flow_dv_matcher_enable(uint32_t *match_criteria)
7573 {
7574         uint8_t match_criteria_enable;
7575
7576         match_criteria_enable =
7577                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7578                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7579         match_criteria_enable |=
7580                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7581                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7582         match_criteria_enable |=
7583                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7584                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7585         match_criteria_enable |=
7586                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7587                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7588         match_criteria_enable |=
7589                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7590                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7591         match_criteria_enable |=
7592                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7593                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7594         return match_criteria_enable;
7595 }
7596
7597
7598 /**
7599  * Get a flow table.
7600  *
7601  * @param[in, out] dev
7602  *   Pointer to rte_eth_dev structure.
7603  * @param[in] table_id
7604  *   Table id to use.
7605  * @param[in] egress
7606  *   Direction of the table.
7607  * @param[in] transfer
7608  *   E-Switch or NIC flow.
7609  * @param[out] error
7610  *   pointer to error structure.
7611  *
7612  * @return
7613  *   Returns tables resource based on the index, NULL in case of failed.
7614  */
7615 static struct mlx5_flow_tbl_resource *
7616 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
7617                          uint32_t table_id, uint8_t egress,
7618                          uint8_t transfer,
7619                          struct rte_flow_error *error)
7620 {
7621         struct mlx5_priv *priv = dev->data->dev_private;
7622         struct mlx5_dev_ctx_shared *sh = priv->sh;
7623         struct mlx5_flow_tbl_resource *tbl;
7624         union mlx5_flow_tbl_key table_key = {
7625                 {
7626                         .table_id = table_id,
7627                         .reserved = 0,
7628                         .domain = !!transfer,
7629                         .direction = !!egress,
7630                 }
7631         };
7632         struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
7633                                                          table_key.v64);
7634         struct mlx5_flow_tbl_data_entry *tbl_data;
7635         uint32_t idx = 0;
7636         int ret;
7637         void *domain;
7638
7639         if (pos) {
7640                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
7641                                         entry);
7642                 tbl = &tbl_data->tbl;
7643                 rte_atomic32_inc(&tbl->refcnt);
7644                 return tbl;
7645         }
7646         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7647         if (!tbl_data) {
7648                 rte_flow_error_set(error, ENOMEM,
7649                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7650                                    NULL,
7651                                    "cannot allocate flow table data entry");
7652                 return NULL;
7653         }
7654         tbl_data->idx = idx;
7655         tbl = &tbl_data->tbl;
7656         pos = &tbl_data->entry;
7657         if (transfer)
7658                 domain = sh->fdb_domain;
7659         else if (egress)
7660                 domain = sh->tx_domain;
7661         else
7662                 domain = sh->rx_domain;
7663         ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
7664         if (ret) {
7665                 rte_flow_error_set(error, ENOMEM,
7666                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7667                                    NULL, "cannot create flow table object");
7668                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7669                 return NULL;
7670         }
7671         /*
7672          * No multi-threads now, but still better to initialize the reference
7673          * count before insert it into the hash list.
7674          */
7675         rte_atomic32_init(&tbl->refcnt);
7676         /* Jump action reference count is initialized here. */
7677         rte_atomic32_init(&tbl_data->jump.refcnt);
7678         pos->key = table_key.v64;
7679         ret = mlx5_hlist_insert(sh->flow_tbls, pos);
7680         if (ret < 0) {
7681                 rte_flow_error_set(error, -ret,
7682                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7683                                    "cannot insert flow table data entry");
7684                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7685                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7686         }
7687         rte_atomic32_inc(&tbl->refcnt);
7688         return tbl;
7689 }
7690
7691 /**
7692  * Release a flow table.
7693  *
7694  * @param[in] dev
7695  *   Pointer to rte_eth_dev structure.
7696  * @param[in] tbl
7697  *   Table resource to be released.
7698  *
7699  * @return
7700  *   Returns 0 if table was released, else return 1;
7701  */
7702 static int
7703 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
7704                              struct mlx5_flow_tbl_resource *tbl)
7705 {
7706         struct mlx5_priv *priv = dev->data->dev_private;
7707         struct mlx5_dev_ctx_shared *sh = priv->sh;
7708         struct mlx5_flow_tbl_data_entry *tbl_data =
7709                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
7710
7711         if (!tbl)
7712                 return 0;
7713         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
7714                 struct mlx5_hlist_entry *pos = &tbl_data->entry;
7715
7716                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7717                 tbl->obj = NULL;
7718                 /* remove the entry from the hash list and free memory. */
7719                 mlx5_hlist_remove(sh->flow_tbls, pos);
7720                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP],
7721                                 tbl_data->idx);
7722                 return 0;
7723         }
7724         return 1;
7725 }
7726
7727 /**
7728  * Register the flow matcher.
7729  *
7730  * @param[in, out] dev
7731  *   Pointer to rte_eth_dev structure.
7732  * @param[in, out] matcher
7733  *   Pointer to flow matcher.
7734  * @param[in, out] key
7735  *   Pointer to flow table key.
7736  * @parm[in, out] dev_flow
7737  *   Pointer to the dev_flow.
7738  * @param[out] error
7739  *   pointer to error structure.
7740  *
7741  * @return
7742  *   0 on success otherwise -errno and errno is set.
7743  */
7744 static int
7745 flow_dv_matcher_register(struct rte_eth_dev *dev,
7746                          struct mlx5_flow_dv_matcher *matcher,
7747                          union mlx5_flow_tbl_key *key,
7748                          struct mlx5_flow *dev_flow,
7749                          struct rte_flow_error *error)
7750 {
7751         struct mlx5_priv *priv = dev->data->dev_private;
7752         struct mlx5_dev_ctx_shared *sh = priv->sh;
7753         struct mlx5_flow_dv_matcher *cache_matcher;
7754         struct mlx5dv_flow_matcher_attr dv_attr = {
7755                 .type = IBV_FLOW_ATTR_NORMAL,
7756                 .match_mask = (void *)&matcher->mask,
7757         };
7758         struct mlx5_flow_tbl_resource *tbl;
7759         struct mlx5_flow_tbl_data_entry *tbl_data;
7760         int ret;
7761
7762         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
7763                                        key->domain, error);
7764         if (!tbl)
7765                 return -rte_errno;      /* No need to refill the error info */
7766         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
7767         /* Lookup from cache. */
7768         LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
7769                 if (matcher->crc == cache_matcher->crc &&
7770                     matcher->priority == cache_matcher->priority &&
7771                     !memcmp((const void *)matcher->mask.buf,
7772                             (const void *)cache_matcher->mask.buf,
7773                             cache_matcher->mask.size)) {
7774                         DRV_LOG(DEBUG,
7775                                 "%s group %u priority %hd use %s "
7776                                 "matcher %p: refcnt %d++",
7777                                 key->domain ? "FDB" : "NIC", key->table_id,
7778                                 cache_matcher->priority,
7779                                 key->direction ? "tx" : "rx",
7780                                 (void *)cache_matcher,
7781                                 rte_atomic32_read(&cache_matcher->refcnt));
7782                         rte_atomic32_inc(&cache_matcher->refcnt);
7783                         dev_flow->handle->dvh.matcher = cache_matcher;
7784                         /* old matcher should not make the table ref++. */
7785                         flow_dv_tbl_resource_release(dev, tbl);
7786                         return 0;
7787                 }
7788         }
7789         /* Register new matcher. */
7790         cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
7791                                     SOCKET_ID_ANY);
7792         if (!cache_matcher) {
7793                 flow_dv_tbl_resource_release(dev, tbl);
7794                 return rte_flow_error_set(error, ENOMEM,
7795                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7796                                           "cannot allocate matcher memory");
7797         }
7798         *cache_matcher = *matcher;
7799         dv_attr.match_criteria_enable =
7800                 flow_dv_matcher_enable(cache_matcher->mask.buf);
7801         dv_attr.priority = matcher->priority;
7802         if (key->direction)
7803                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
7804         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
7805                                                &cache_matcher->matcher_object);
7806         if (ret) {
7807                 mlx5_free(cache_matcher);
7808 #ifdef HAVE_MLX5DV_DR
7809                 flow_dv_tbl_resource_release(dev, tbl);
7810 #endif
7811                 return rte_flow_error_set(error, ENOMEM,
7812                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7813                                           NULL, "cannot create matcher");
7814         }
7815         /* Save the table information */
7816         cache_matcher->tbl = tbl;
7817         rte_atomic32_init(&cache_matcher->refcnt);
7818         /* only matcher ref++, table ref++ already done above in get API. */
7819         rte_atomic32_inc(&cache_matcher->refcnt);
7820         LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
7821         dev_flow->handle->dvh.matcher = cache_matcher;
7822         DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
7823                 key->domain ? "FDB" : "NIC", key->table_id,
7824                 cache_matcher->priority,
7825                 key->direction ? "tx" : "rx", (void *)cache_matcher,
7826                 rte_atomic32_read(&cache_matcher->refcnt));
7827         return 0;
7828 }
7829
7830 /**
7831  * Find existing tag resource or create and register a new one.
7832  *
7833  * @param dev[in, out]
7834  *   Pointer to rte_eth_dev structure.
7835  * @param[in, out] tag_be24
7836  *   Tag value in big endian then R-shift 8.
7837  * @parm[in, out] dev_flow
7838  *   Pointer to the dev_flow.
7839  * @param[out] error
7840  *   pointer to error structure.
7841  *
7842  * @return
7843  *   0 on success otherwise -errno and errno is set.
7844  */
7845 static int
7846 flow_dv_tag_resource_register
7847                         (struct rte_eth_dev *dev,
7848                          uint32_t tag_be24,
7849                          struct mlx5_flow *dev_flow,
7850                          struct rte_flow_error *error)
7851 {
7852         struct mlx5_priv *priv = dev->data->dev_private;
7853         struct mlx5_dev_ctx_shared *sh = priv->sh;
7854         struct mlx5_flow_dv_tag_resource *cache_resource;
7855         struct mlx5_hlist_entry *entry;
7856         int ret;
7857
7858         /* Lookup a matching resource from cache. */
7859         entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
7860         if (entry) {
7861                 cache_resource = container_of
7862                         (entry, struct mlx5_flow_dv_tag_resource, entry);
7863                 rte_atomic32_inc(&cache_resource->refcnt);
7864                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
7865                 dev_flow->dv.tag_resource = cache_resource;
7866                 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
7867                         (void *)cache_resource,
7868                         rte_atomic32_read(&cache_resource->refcnt));
7869                 return 0;
7870         }
7871         /* Register new resource. */
7872         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
7873                                        &dev_flow->handle->dvh.rix_tag);
7874         if (!cache_resource)
7875                 return rte_flow_error_set(error, ENOMEM,
7876                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7877                                           "cannot allocate resource memory");
7878         cache_resource->entry.key = (uint64_t)tag_be24;
7879         ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
7880                                                   &cache_resource->action);
7881         if (ret) {
7882                 mlx5_free(cache_resource);
7883                 return rte_flow_error_set(error, ENOMEM,
7884                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7885                                           NULL, "cannot create action");
7886         }
7887         rte_atomic32_init(&cache_resource->refcnt);
7888         rte_atomic32_inc(&cache_resource->refcnt);
7889         if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
7890                 mlx5_flow_os_destroy_flow_action(cache_resource->action);
7891                 mlx5_free(cache_resource);
7892                 return rte_flow_error_set(error, EEXIST,
7893                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7894                                           NULL, "cannot insert tag");
7895         }
7896         dev_flow->dv.tag_resource = cache_resource;
7897         DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
7898                 (void *)cache_resource,
7899                 rte_atomic32_read(&cache_resource->refcnt));
7900         return 0;
7901 }
7902
7903 /**
7904  * Release the tag.
7905  *
7906  * @param dev
7907  *   Pointer to Ethernet device.
7908  * @param tag_idx
7909  *   Tag index.
7910  *
7911  * @return
7912  *   1 while a reference on it exists, 0 when freed.
7913  */
7914 static int
7915 flow_dv_tag_release(struct rte_eth_dev *dev,
7916                     uint32_t tag_idx)
7917 {
7918         struct mlx5_priv *priv = dev->data->dev_private;
7919         struct mlx5_dev_ctx_shared *sh = priv->sh;
7920         struct mlx5_flow_dv_tag_resource *tag;
7921
7922         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
7923         if (!tag)
7924                 return 0;
7925         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
7926                 dev->data->port_id, (void *)tag,
7927                 rte_atomic32_read(&tag->refcnt));
7928         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
7929                 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
7930                 mlx5_hlist_remove(sh->tag_table, &tag->entry);
7931                 DRV_LOG(DEBUG, "port %u tag %p: removed",
7932                         dev->data->port_id, (void *)tag);
7933                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
7934                 return 0;
7935         }
7936         return 1;
7937 }
7938
7939 /**
7940  * Translate port ID action to vport.
7941  *
7942  * @param[in] dev
7943  *   Pointer to rte_eth_dev structure.
7944  * @param[in] action
7945  *   Pointer to the port ID action.
7946  * @param[out] dst_port_id
7947  *   The target port ID.
7948  * @param[out] error
7949  *   Pointer to the error structure.
7950  *
7951  * @return
7952  *   0 on success, a negative errno value otherwise and rte_errno is set.
7953  */
7954 static int
7955 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
7956                                  const struct rte_flow_action *action,
7957                                  uint32_t *dst_port_id,
7958                                  struct rte_flow_error *error)
7959 {
7960         uint32_t port;
7961         struct mlx5_priv *priv;
7962         const struct rte_flow_action_port_id *conf =
7963                         (const struct rte_flow_action_port_id *)action->conf;
7964
7965         port = conf->original ? dev->data->port_id : conf->id;
7966         priv = mlx5_port_to_eswitch_info(port, false);
7967         if (!priv)
7968                 return rte_flow_error_set(error, -rte_errno,
7969                                           RTE_FLOW_ERROR_TYPE_ACTION,
7970                                           NULL,
7971                                           "No eswitch info was found for port");
7972 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
7973         /*
7974          * This parameter is transferred to
7975          * mlx5dv_dr_action_create_dest_ib_port().
7976          */
7977         *dst_port_id = priv->dev_port;
7978 #else
7979         /*
7980          * Legacy mode, no LAG configurations is supported.
7981          * This parameter is transferred to
7982          * mlx5dv_dr_action_create_dest_vport().
7983          */
7984         *dst_port_id = priv->vport_id;
7985 #endif
7986         return 0;
7987 }
7988
7989 /**
7990  * Create a counter with aging configuration.
7991  *
7992  * @param[in] dev
7993  *   Pointer to rte_eth_dev structure.
7994  * @param[out] count
7995  *   Pointer to the counter action configuration.
7996  * @param[in] age
7997  *   Pointer to the aging action configuration.
7998  *
7999  * @return
8000  *   Index to flow counter on success, 0 otherwise.
8001  */
8002 static uint32_t
8003 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8004                                 struct mlx5_flow *dev_flow,
8005                                 const struct rte_flow_action_count *count,
8006                                 const struct rte_flow_action_age *age)
8007 {
8008         uint32_t counter;
8009         struct mlx5_age_param *age_param;
8010
8011         counter = flow_dv_counter_alloc(dev,
8012                                 count ? count->shared : 0,
8013                                 count ? count->id : 0,
8014                                 dev_flow->dv.group, !!age);
8015         if (!counter || age == NULL)
8016                 return counter;
8017         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8018         /*
8019          * The counter age accuracy may have a bit delay. Have 3/4
8020          * second bias on the timeount in order to let it age in time.
8021          */
8022         age_param->context = age->context ? age->context :
8023                 (void *)(uintptr_t)(dev_flow->flow_idx);
8024         /*
8025          * The counter age accuracy may have a bit delay. Have 3/4
8026          * second bias on the timeount in order to let it age in time.
8027          */
8028         age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY;
8029         /* Set expire time in unit of 0.1 sec. */
8030         age_param->port_id = dev->data->port_id;
8031         age_param->expire = age_param->timeout +
8032                         rte_rdtsc() / (rte_get_tsc_hz() / 10);
8033         rte_atomic16_set(&age_param->state, AGE_CANDIDATE);
8034         return counter;
8035 }
8036 /**
8037  * Add Tx queue matcher
8038  *
8039  * @param[in] dev
8040  *   Pointer to the dev struct.
8041  * @param[in, out] matcher
8042  *   Flow matcher.
8043  * @param[in, out] key
8044  *   Flow matcher value.
8045  * @param[in] item
8046  *   Flow pattern to translate.
8047  * @param[in] inner
8048  *   Item is inner pattern.
8049  */
8050 static void
8051 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8052                                 void *matcher, void *key,
8053                                 const struct rte_flow_item *item)
8054 {
8055         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8056         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8057         void *misc_m =
8058                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8059         void *misc_v =
8060                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8061         struct mlx5_txq_ctrl *txq;
8062         uint32_t queue;
8063
8064
8065         queue_m = (const void *)item->mask;
8066         if (!queue_m)
8067                 return;
8068         queue_v = (const void *)item->spec;
8069         if (!queue_v)
8070                 return;
8071         txq = mlx5_txq_get(dev, queue_v->queue);
8072         if (!txq)
8073                 return;
8074         queue = txq->obj->sq->id;
8075         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8076         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8077                  queue & queue_m->queue);
8078         mlx5_txq_release(dev, queue_v->queue);
8079 }
8080
8081 /**
8082  * Set the hash fields according to the @p flow information.
8083  *
8084  * @param[in] dev_flow
8085  *   Pointer to the mlx5_flow.
8086  * @param[in] rss_desc
8087  *   Pointer to the mlx5_flow_rss_desc.
8088  */
8089 static void
8090 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8091                        struct mlx5_flow_rss_desc *rss_desc)
8092 {
8093         uint64_t items = dev_flow->handle->layers;
8094         int rss_inner = 0;
8095         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8096
8097         dev_flow->hash_fields = 0;
8098 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8099         if (rss_desc->level >= 2) {
8100                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8101                 rss_inner = 1;
8102         }
8103 #endif
8104         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8105             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8106                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8107                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8108                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8109                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8110                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8111                         else
8112                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8113                 }
8114         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8115                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8116                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8117                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8118                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8119                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8120                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8121                         else
8122                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8123                 }
8124         }
8125         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8126             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8127                 if (rss_types & ETH_RSS_UDP) {
8128                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8129                                 dev_flow->hash_fields |=
8130                                                 IBV_RX_HASH_SRC_PORT_UDP;
8131                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8132                                 dev_flow->hash_fields |=
8133                                                 IBV_RX_HASH_DST_PORT_UDP;
8134                         else
8135                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8136                 }
8137         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8138                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8139                 if (rss_types & ETH_RSS_TCP) {
8140                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8141                                 dev_flow->hash_fields |=
8142                                                 IBV_RX_HASH_SRC_PORT_TCP;
8143                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8144                                 dev_flow->hash_fields |=
8145                                                 IBV_RX_HASH_DST_PORT_TCP;
8146                         else
8147                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8148                 }
8149         }
8150 }
8151
8152 /**
8153  * Fill the flow with DV spec, lock free
8154  * (mutex should be acquired by caller).
8155  *
8156  * @param[in] dev
8157  *   Pointer to rte_eth_dev structure.
8158  * @param[in, out] dev_flow
8159  *   Pointer to the sub flow.
8160  * @param[in] attr
8161  *   Pointer to the flow attributes.
8162  * @param[in] items
8163  *   Pointer to the list of items.
8164  * @param[in] actions
8165  *   Pointer to the list of actions.
8166  * @param[out] error
8167  *   Pointer to the error structure.
8168  *
8169  * @return
8170  *   0 on success, a negative errno value otherwise and rte_errno is set.
8171  */
8172 static int
8173 __flow_dv_translate(struct rte_eth_dev *dev,
8174                     struct mlx5_flow *dev_flow,
8175                     const struct rte_flow_attr *attr,
8176                     const struct rte_flow_item items[],
8177                     const struct rte_flow_action actions[],
8178                     struct rte_flow_error *error)
8179 {
8180         struct mlx5_priv *priv = dev->data->dev_private;
8181         struct mlx5_dev_config *dev_conf = &priv->config;
8182         struct rte_flow *flow = dev_flow->flow;
8183         struct mlx5_flow_handle *handle = dev_flow->handle;
8184         struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
8185                                               priv->rss_desc)
8186                                               [!!priv->flow_nested_idx];
8187         uint64_t item_flags = 0;
8188         uint64_t last_item = 0;
8189         uint64_t action_flags = 0;
8190         uint64_t priority = attr->priority;
8191         struct mlx5_flow_dv_matcher matcher = {
8192                 .mask = {
8193                         .size = sizeof(matcher.mask.buf) -
8194                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
8195                 },
8196         };
8197         int actions_n = 0;
8198         bool actions_end = false;
8199         union {
8200                 struct mlx5_flow_dv_modify_hdr_resource res;
8201                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
8202                             sizeof(struct mlx5_modification_cmd) *
8203                             (MLX5_MAX_MODIFY_NUM + 1)];
8204         } mhdr_dummy;
8205         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
8206         const struct rte_flow_action_count *count = NULL;
8207         const struct rte_flow_action_age *age = NULL;
8208         union flow_dv_attr flow_attr = { .attr = 0 };
8209         uint32_t tag_be;
8210         union mlx5_flow_tbl_key tbl_key;
8211         uint32_t modify_action_position = UINT32_MAX;
8212         void *match_mask = matcher.mask.buf;
8213         void *match_value = dev_flow->dv.value.buf;
8214         uint8_t next_protocol = 0xff;
8215         struct rte_vlan_hdr vlan = { 0 };
8216         uint32_t table;
8217         int ret = 0;
8218
8219         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
8220                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
8221         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
8222                                        !!priv->fdb_def_rule, &table, error);
8223         if (ret)
8224                 return ret;
8225         dev_flow->dv.group = table;
8226         if (attr->transfer)
8227                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
8228         if (priority == MLX5_FLOW_PRIO_RSVD)
8229                 priority = dev_conf->flow_prio - 1;
8230         /* number of actions must be set to 0 in case of dirty stack. */
8231         mhdr_res->actions_num = 0;
8232         for (; !actions_end ; actions++) {
8233                 const struct rte_flow_action_queue *queue;
8234                 const struct rte_flow_action_rss *rss;
8235                 const struct rte_flow_action *action = actions;
8236                 const uint8_t *rss_key;
8237                 const struct rte_flow_action_jump *jump_data;
8238                 const struct rte_flow_action_meter *mtr;
8239                 struct mlx5_flow_tbl_resource *tbl;
8240                 uint32_t port_id = 0;
8241                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
8242                 int action_type = actions->type;
8243                 const struct rte_flow_action *found_action = NULL;
8244                 struct mlx5_flow_meter *fm = NULL;
8245
8246                 if (!mlx5_flow_os_action_supported(action_type))
8247                         return rte_flow_error_set(error, ENOTSUP,
8248                                                   RTE_FLOW_ERROR_TYPE_ACTION,
8249                                                   actions,
8250                                                   "action not supported");
8251                 switch (action_type) {
8252                 case RTE_FLOW_ACTION_TYPE_VOID:
8253                         break;
8254                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
8255                         if (flow_dv_translate_action_port_id(dev, action,
8256                                                              &port_id, error))
8257                                 return -rte_errno;
8258                         port_id_resource.port_id = port_id;
8259                         MLX5_ASSERT(!handle->rix_port_id_action);
8260                         if (flow_dv_port_id_action_resource_register
8261                             (dev, &port_id_resource, dev_flow, error))
8262                                 return -rte_errno;
8263                         dev_flow->dv.actions[actions_n++] =
8264                                         dev_flow->dv.port_id_action->action;
8265                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
8266                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
8267                         break;
8268                 case RTE_FLOW_ACTION_TYPE_FLAG:
8269                         action_flags |= MLX5_FLOW_ACTION_FLAG;
8270                         dev_flow->handle->mark = 1;
8271                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
8272                                 struct rte_flow_action_mark mark = {
8273                                         .id = MLX5_FLOW_MARK_DEFAULT,
8274                                 };
8275
8276                                 if (flow_dv_convert_action_mark(dev, &mark,
8277                                                                 mhdr_res,
8278                                                                 error))
8279                                         return -rte_errno;
8280                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
8281                                 break;
8282                         }
8283                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
8284                         /*
8285                          * Only one FLAG or MARK is supported per device flow
8286                          * right now. So the pointer to the tag resource must be
8287                          * zero before the register process.
8288                          */
8289                         MLX5_ASSERT(!handle->dvh.rix_tag);
8290                         if (flow_dv_tag_resource_register(dev, tag_be,
8291                                                           dev_flow, error))
8292                                 return -rte_errno;
8293                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8294                         dev_flow->dv.actions[actions_n++] =
8295                                         dev_flow->dv.tag_resource->action;
8296                         break;
8297                 case RTE_FLOW_ACTION_TYPE_MARK:
8298                         action_flags |= MLX5_FLOW_ACTION_MARK;
8299                         dev_flow->handle->mark = 1;
8300                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
8301                                 const struct rte_flow_action_mark *mark =
8302                                         (const struct rte_flow_action_mark *)
8303                                                 actions->conf;
8304
8305                                 if (flow_dv_convert_action_mark(dev, mark,
8306                                                                 mhdr_res,
8307                                                                 error))
8308                                         return -rte_errno;
8309                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
8310                                 break;
8311                         }
8312                         /* Fall-through */
8313                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
8314                         /* Legacy (non-extensive) MARK action. */
8315                         tag_be = mlx5_flow_mark_set
8316                               (((const struct rte_flow_action_mark *)
8317                                (actions->conf))->id);
8318                         MLX5_ASSERT(!handle->dvh.rix_tag);
8319                         if (flow_dv_tag_resource_register(dev, tag_be,
8320                                                           dev_flow, error))
8321                                 return -rte_errno;
8322                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8323                         dev_flow->dv.actions[actions_n++] =
8324                                         dev_flow->dv.tag_resource->action;
8325                         break;
8326                 case RTE_FLOW_ACTION_TYPE_SET_META:
8327                         if (flow_dv_convert_action_set_meta
8328                                 (dev, mhdr_res, attr,
8329                                  (const struct rte_flow_action_set_meta *)
8330                                   actions->conf, error))
8331                                 return -rte_errno;
8332                         action_flags |= MLX5_FLOW_ACTION_SET_META;
8333                         break;
8334                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
8335                         if (flow_dv_convert_action_set_tag
8336                                 (dev, mhdr_res,
8337                                  (const struct rte_flow_action_set_tag *)
8338                                   actions->conf, error))
8339                                 return -rte_errno;
8340                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8341                         break;
8342                 case RTE_FLOW_ACTION_TYPE_DROP:
8343                         action_flags |= MLX5_FLOW_ACTION_DROP;
8344                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
8345                         break;
8346                 case RTE_FLOW_ACTION_TYPE_QUEUE:
8347                         queue = actions->conf;
8348                         rss_desc->queue_num = 1;
8349                         rss_desc->queue[0] = queue->index;
8350                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
8351                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
8352                         break;
8353                 case RTE_FLOW_ACTION_TYPE_RSS:
8354                         rss = actions->conf;
8355                         memcpy(rss_desc->queue, rss->queue,
8356                                rss->queue_num * sizeof(uint16_t));
8357                         rss_desc->queue_num = rss->queue_num;
8358                         /* NULL RSS key indicates default RSS key. */
8359                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
8360                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
8361                         /*
8362                          * rss->level and rss.types should be set in advance
8363                          * when expanding items for RSS.
8364                          */
8365                         action_flags |= MLX5_FLOW_ACTION_RSS;
8366                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
8367                         break;
8368                 case RTE_FLOW_ACTION_TYPE_AGE:
8369                 case RTE_FLOW_ACTION_TYPE_COUNT:
8370                         if (!dev_conf->devx) {
8371                                 return rte_flow_error_set
8372                                               (error, ENOTSUP,
8373                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8374                                                NULL,
8375                                                "count action not supported");
8376                         }
8377                         /* Save information first, will apply later. */
8378                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
8379                                 count = action->conf;
8380                         else
8381                                 age = action->conf;
8382                         action_flags |= MLX5_FLOW_ACTION_COUNT;
8383                         break;
8384                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
8385                         dev_flow->dv.actions[actions_n++] =
8386                                                 priv->sh->pop_vlan_action;
8387                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
8388                         break;
8389                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
8390                         if (!(action_flags &
8391                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
8392                                 flow_dev_get_vlan_info_from_items(items, &vlan);
8393                         vlan.eth_proto = rte_be_to_cpu_16
8394                              ((((const struct rte_flow_action_of_push_vlan *)
8395                                                    actions->conf)->ethertype));
8396                         found_action = mlx5_flow_find_action
8397                                         (actions + 1,
8398                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
8399                         if (found_action)
8400                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
8401                         found_action = mlx5_flow_find_action
8402                                         (actions + 1,
8403                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
8404                         if (found_action)
8405                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
8406                         if (flow_dv_create_action_push_vlan
8407                                             (dev, attr, &vlan, dev_flow, error))
8408                                 return -rte_errno;
8409                         dev_flow->dv.actions[actions_n++] =
8410                                         dev_flow->dv.push_vlan_res->action;
8411                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
8412                         break;
8413                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
8414                         /* of_vlan_push action handled this action */
8415                         MLX5_ASSERT(action_flags &
8416                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
8417                         break;
8418                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
8419                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8420                                 break;
8421                         flow_dev_get_vlan_info_from_items(items, &vlan);
8422                         mlx5_update_vlan_vid_pcp(actions, &vlan);
8423                         /* If no VLAN push - this is a modify header action */
8424                         if (flow_dv_convert_action_modify_vlan_vid
8425                                                 (mhdr_res, actions, error))
8426                                 return -rte_errno;
8427                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
8428                         break;
8429                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
8430                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
8431                         if (flow_dv_create_action_l2_encap(dev, actions,
8432                                                            dev_flow,
8433                                                            attr->transfer,
8434                                                            error))
8435                                 return -rte_errno;
8436                         dev_flow->dv.actions[actions_n++] =
8437                                         dev_flow->dv.encap_decap->action;
8438                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
8439                         break;
8440                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
8441                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
8442                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
8443                                                            attr->transfer,
8444                                                            error))
8445                                 return -rte_errno;
8446                         dev_flow->dv.actions[actions_n++] =
8447                                         dev_flow->dv.encap_decap->action;
8448                         action_flags |= MLX5_FLOW_ACTION_DECAP;
8449                         break;
8450                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
8451                         /* Handle encap with preceding decap. */
8452                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
8453                                 if (flow_dv_create_action_raw_encap
8454                                         (dev, actions, dev_flow, attr, error))
8455                                         return -rte_errno;
8456                                 dev_flow->dv.actions[actions_n++] =
8457                                         dev_flow->dv.encap_decap->action;
8458                         } else {
8459                                 /* Handle encap without preceding decap. */
8460                                 if (flow_dv_create_action_l2_encap
8461                                     (dev, actions, dev_flow, attr->transfer,
8462                                      error))
8463                                         return -rte_errno;
8464                                 dev_flow->dv.actions[actions_n++] =
8465                                         dev_flow->dv.encap_decap->action;
8466                         }
8467                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
8468                         break;
8469                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
8470                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
8471                                 ;
8472                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
8473                                 if (flow_dv_create_action_l2_decap
8474                                     (dev, dev_flow, attr->transfer, error))
8475                                         return -rte_errno;
8476                                 dev_flow->dv.actions[actions_n++] =
8477                                         dev_flow->dv.encap_decap->action;
8478                         }
8479                         /* If decap is followed by encap, handle it at encap. */
8480                         action_flags |= MLX5_FLOW_ACTION_DECAP;
8481                         break;
8482                 case RTE_FLOW_ACTION_TYPE_JUMP:
8483                         jump_data = action->conf;
8484                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
8485                                                        jump_data->group,
8486                                                        !!priv->fdb_def_rule,
8487                                                        &table, error);
8488                         if (ret)
8489                                 return ret;
8490                         tbl = flow_dv_tbl_resource_get(dev, table,
8491                                                        attr->egress,
8492                                                        attr->transfer, error);
8493                         if (!tbl)
8494                                 return rte_flow_error_set
8495                                                 (error, errno,
8496                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8497                                                  NULL,
8498                                                  "cannot create jump action.");
8499                         if (flow_dv_jump_tbl_resource_register
8500                             (dev, tbl, dev_flow, error)) {
8501                                 flow_dv_tbl_resource_release(dev, tbl);
8502                                 return rte_flow_error_set
8503                                                 (error, errno,
8504                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8505                                                  NULL,
8506                                                  "cannot create jump action.");
8507                         }
8508                         dev_flow->dv.actions[actions_n++] =
8509                                         dev_flow->dv.jump->action;
8510                         action_flags |= MLX5_FLOW_ACTION_JUMP;
8511                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
8512                         break;
8513                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
8514                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
8515                         if (flow_dv_convert_action_modify_mac
8516                                         (mhdr_res, actions, error))
8517                                 return -rte_errno;
8518                         action_flags |= actions->type ==
8519                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
8520                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
8521                                         MLX5_FLOW_ACTION_SET_MAC_DST;
8522                         break;
8523                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
8524                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
8525                         if (flow_dv_convert_action_modify_ipv4
8526                                         (mhdr_res, actions, error))
8527                                 return -rte_errno;
8528                         action_flags |= actions->type ==
8529                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
8530                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
8531                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
8532                         break;
8533                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
8534                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
8535                         if (flow_dv_convert_action_modify_ipv6
8536                                         (mhdr_res, actions, error))
8537                                 return -rte_errno;
8538                         action_flags |= actions->type ==
8539                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
8540                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
8541                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
8542                         break;
8543                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
8544                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
8545                         if (flow_dv_convert_action_modify_tp
8546                                         (mhdr_res, actions, items,
8547                                          &flow_attr, dev_flow, !!(action_flags &
8548                                          MLX5_FLOW_ACTION_DECAP), error))
8549                                 return -rte_errno;
8550                         action_flags |= actions->type ==
8551                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
8552                                         MLX5_FLOW_ACTION_SET_TP_SRC :
8553                                         MLX5_FLOW_ACTION_SET_TP_DST;
8554                         break;
8555                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
8556                         if (flow_dv_convert_action_modify_dec_ttl
8557                                         (mhdr_res, items, &flow_attr, dev_flow,
8558                                          !!(action_flags &
8559                                          MLX5_FLOW_ACTION_DECAP), error))
8560                                 return -rte_errno;
8561                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
8562                         break;
8563                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
8564                         if (flow_dv_convert_action_modify_ttl
8565                                         (mhdr_res, actions, items, &flow_attr,
8566                                          dev_flow, !!(action_flags &
8567                                          MLX5_FLOW_ACTION_DECAP), error))
8568                                 return -rte_errno;
8569                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
8570                         break;
8571                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
8572                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
8573                         if (flow_dv_convert_action_modify_tcp_seq
8574                                         (mhdr_res, actions, error))
8575                                 return -rte_errno;
8576                         action_flags |= actions->type ==
8577                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
8578                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
8579                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
8580                         break;
8581
8582                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
8583                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
8584                         if (flow_dv_convert_action_modify_tcp_ack
8585                                         (mhdr_res, actions, error))
8586                                 return -rte_errno;
8587                         action_flags |= actions->type ==
8588                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
8589                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
8590                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
8591                         break;
8592                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
8593                         if (flow_dv_convert_action_set_reg
8594                                         (mhdr_res, actions, error))
8595                                 return -rte_errno;
8596                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8597                         break;
8598                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
8599                         if (flow_dv_convert_action_copy_mreg
8600                                         (dev, mhdr_res, actions, error))
8601                                 return -rte_errno;
8602                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8603                         break;
8604                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
8605                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
8606                         dev_flow->handle->fate_action =
8607                                         MLX5_FLOW_FATE_DEFAULT_MISS;
8608                         break;
8609                 case RTE_FLOW_ACTION_TYPE_METER:
8610                         mtr = actions->conf;
8611                         if (!flow->meter) {
8612                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
8613                                                             attr, error);
8614                                 if (!fm)
8615                                         return rte_flow_error_set(error,
8616                                                 rte_errno,
8617                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8618                                                 NULL,
8619                                                 "meter not found "
8620                                                 "or invalid parameters");
8621                                 flow->meter = fm->idx;
8622                         }
8623                         /* Set the meter action. */
8624                         if (!fm) {
8625                                 fm = mlx5_ipool_get(priv->sh->ipool
8626                                                 [MLX5_IPOOL_MTR], flow->meter);
8627                                 if (!fm)
8628                                         return rte_flow_error_set(error,
8629                                                 rte_errno,
8630                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8631                                                 NULL,
8632                                                 "meter not found "
8633                                                 "or invalid parameters");
8634                         }
8635                         dev_flow->dv.actions[actions_n++] =
8636                                 fm->mfts->meter_action;
8637                         action_flags |= MLX5_FLOW_ACTION_METER;
8638                         break;
8639                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
8640                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
8641                                                               actions, error))
8642                                 return -rte_errno;
8643                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
8644                         break;
8645                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
8646                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
8647                                                               actions, error))
8648                                 return -rte_errno;
8649                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
8650                         break;
8651                 case RTE_FLOW_ACTION_TYPE_END:
8652                         actions_end = true;
8653                         if (mhdr_res->actions_num) {
8654                                 /* create modify action if needed. */
8655                                 if (flow_dv_modify_hdr_resource_register
8656                                         (dev, mhdr_res, dev_flow, error))
8657                                         return -rte_errno;
8658                                 dev_flow->dv.actions[modify_action_position] =
8659                                         handle->dvh.modify_hdr->action;
8660                         }
8661                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
8662                                 flow->counter =
8663                                         flow_dv_translate_create_counter(dev,
8664                                                 dev_flow, count, age);
8665
8666                                 if (!flow->counter)
8667                                         return rte_flow_error_set
8668                                                 (error, rte_errno,
8669                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8670                                                 NULL,
8671                                                 "cannot create counter"
8672                                                 " object.");
8673                                 dev_flow->dv.actions[actions_n++] =
8674                                           (flow_dv_counter_get_by_idx(dev,
8675                                           flow->counter, NULL))->action;
8676                         }
8677                         break;
8678                 default:
8679                         break;
8680                 }
8681                 if (mhdr_res->actions_num &&
8682                     modify_action_position == UINT32_MAX)
8683                         modify_action_position = actions_n++;
8684         }
8685         dev_flow->dv.actions_n = actions_n;
8686         dev_flow->act_flags = action_flags;
8687         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
8688                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
8689                 int item_type = items->type;
8690
8691                 if (!mlx5_flow_os_item_supported(item_type))
8692                         return rte_flow_error_set(error, ENOTSUP,
8693                                                   RTE_FLOW_ERROR_TYPE_ITEM,
8694                                                   NULL, "item not supported");
8695                 switch (item_type) {
8696                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
8697                         flow_dv_translate_item_port_id(dev, match_mask,
8698                                                        match_value, items);
8699                         last_item = MLX5_FLOW_ITEM_PORT_ID;
8700                         break;
8701                 case RTE_FLOW_ITEM_TYPE_ETH:
8702                         flow_dv_translate_item_eth(match_mask, match_value,
8703                                                    items, tunnel,
8704                                                    dev_flow->dv.group);
8705                         matcher.priority = action_flags &
8706                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
8707                                         !dev_flow->external ?
8708                                         MLX5_PRIORITY_MAP_L3 :
8709                                         MLX5_PRIORITY_MAP_L2;
8710                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
8711                                              MLX5_FLOW_LAYER_OUTER_L2;
8712                         break;
8713                 case RTE_FLOW_ITEM_TYPE_VLAN:
8714                         flow_dv_translate_item_vlan(dev_flow,
8715                                                     match_mask, match_value,
8716                                                     items, tunnel,
8717                                                     dev_flow->dv.group);
8718                         matcher.priority = MLX5_PRIORITY_MAP_L2;
8719                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
8720                                               MLX5_FLOW_LAYER_INNER_VLAN) :
8721                                              (MLX5_FLOW_LAYER_OUTER_L2 |
8722                                               MLX5_FLOW_LAYER_OUTER_VLAN);
8723                         break;
8724                 case RTE_FLOW_ITEM_TYPE_IPV4:
8725                         mlx5_flow_tunnel_ip_check(items, next_protocol,
8726                                                   &item_flags, &tunnel);
8727                         flow_dv_translate_item_ipv4(match_mask, match_value,
8728                                                     items, item_flags, tunnel,
8729                                                     dev_flow->dv.group);
8730                         matcher.priority = MLX5_PRIORITY_MAP_L3;
8731                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
8732                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
8733                         if (items->mask != NULL &&
8734                             ((const struct rte_flow_item_ipv4 *)
8735                              items->mask)->hdr.next_proto_id) {
8736                                 next_protocol =
8737                                         ((const struct rte_flow_item_ipv4 *)
8738                                          (items->spec))->hdr.next_proto_id;
8739                                 next_protocol &=
8740                                         ((const struct rte_flow_item_ipv4 *)
8741                                          (items->mask))->hdr.next_proto_id;
8742                         } else {
8743                                 /* Reset for inner layer. */
8744                                 next_protocol = 0xff;
8745                         }
8746                         break;
8747                 case RTE_FLOW_ITEM_TYPE_IPV6:
8748                         mlx5_flow_tunnel_ip_check(items, next_protocol,
8749                                                   &item_flags, &tunnel);
8750                         flow_dv_translate_item_ipv6(match_mask, match_value,
8751                                                     items, item_flags, tunnel,
8752                                                     dev_flow->dv.group);
8753                         matcher.priority = MLX5_PRIORITY_MAP_L3;
8754                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
8755                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
8756                         if (items->mask != NULL &&
8757                             ((const struct rte_flow_item_ipv6 *)
8758                              items->mask)->hdr.proto) {
8759                                 next_protocol =
8760                                         ((const struct rte_flow_item_ipv6 *)
8761                                          items->spec)->hdr.proto;
8762                                 next_protocol &=
8763                                         ((const struct rte_flow_item_ipv6 *)
8764                                          items->mask)->hdr.proto;
8765                         } else {
8766                                 /* Reset for inner layer. */
8767                                 next_protocol = 0xff;
8768                         }
8769                         break;
8770                 case RTE_FLOW_ITEM_TYPE_TCP:
8771                         flow_dv_translate_item_tcp(match_mask, match_value,
8772                                                    items, tunnel);
8773                         matcher.priority = MLX5_PRIORITY_MAP_L4;
8774                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
8775                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
8776                         break;
8777                 case RTE_FLOW_ITEM_TYPE_UDP:
8778                         flow_dv_translate_item_udp(match_mask, match_value,
8779                                                    items, tunnel);
8780                         matcher.priority = MLX5_PRIORITY_MAP_L4;
8781                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
8782                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
8783                         break;
8784                 case RTE_FLOW_ITEM_TYPE_GRE:
8785                         flow_dv_translate_item_gre(match_mask, match_value,
8786                                                    items, tunnel);
8787                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8788                         last_item = MLX5_FLOW_LAYER_GRE;
8789                         break;
8790                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
8791                         flow_dv_translate_item_gre_key(match_mask,
8792                                                        match_value, items);
8793                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
8794                         break;
8795                 case RTE_FLOW_ITEM_TYPE_NVGRE:
8796                         flow_dv_translate_item_nvgre(match_mask, match_value,
8797                                                      items, tunnel);
8798                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8799                         last_item = MLX5_FLOW_LAYER_GRE;
8800                         break;
8801                 case RTE_FLOW_ITEM_TYPE_VXLAN:
8802                         flow_dv_translate_item_vxlan(match_mask, match_value,
8803                                                      items, tunnel);
8804                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8805                         last_item = MLX5_FLOW_LAYER_VXLAN;
8806                         break;
8807                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
8808                         flow_dv_translate_item_vxlan_gpe(match_mask,
8809                                                          match_value, items,
8810                                                          tunnel);
8811                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8812                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
8813                         break;
8814                 case RTE_FLOW_ITEM_TYPE_GENEVE:
8815                         flow_dv_translate_item_geneve(match_mask, match_value,
8816                                                       items, tunnel);
8817                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8818                         last_item = MLX5_FLOW_LAYER_GENEVE;
8819                         break;
8820                 case RTE_FLOW_ITEM_TYPE_MPLS:
8821                         flow_dv_translate_item_mpls(match_mask, match_value,
8822                                                     items, last_item, tunnel);
8823                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8824                         last_item = MLX5_FLOW_LAYER_MPLS;
8825                         break;
8826                 case RTE_FLOW_ITEM_TYPE_MARK:
8827                         flow_dv_translate_item_mark(dev, match_mask,
8828                                                     match_value, items);
8829                         last_item = MLX5_FLOW_ITEM_MARK;
8830                         break;
8831                 case RTE_FLOW_ITEM_TYPE_META:
8832                         flow_dv_translate_item_meta(dev, match_mask,
8833                                                     match_value, attr, items);
8834                         last_item = MLX5_FLOW_ITEM_METADATA;
8835                         break;
8836                 case RTE_FLOW_ITEM_TYPE_ICMP:
8837                         flow_dv_translate_item_icmp(match_mask, match_value,
8838                                                     items, tunnel);
8839                         last_item = MLX5_FLOW_LAYER_ICMP;
8840                         break;
8841                 case RTE_FLOW_ITEM_TYPE_ICMP6:
8842                         flow_dv_translate_item_icmp6(match_mask, match_value,
8843                                                       items, tunnel);
8844                         last_item = MLX5_FLOW_LAYER_ICMP6;
8845                         break;
8846                 case RTE_FLOW_ITEM_TYPE_TAG:
8847                         flow_dv_translate_item_tag(dev, match_mask,
8848                                                    match_value, items);
8849                         last_item = MLX5_FLOW_ITEM_TAG;
8850                         break;
8851                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
8852                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
8853                                                         match_value, items);
8854                         last_item = MLX5_FLOW_ITEM_TAG;
8855                         break;
8856                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
8857                         flow_dv_translate_item_tx_queue(dev, match_mask,
8858                                                         match_value,
8859                                                         items);
8860                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
8861                         break;
8862                 case RTE_FLOW_ITEM_TYPE_GTP:
8863                         flow_dv_translate_item_gtp(match_mask, match_value,
8864                                                    items, tunnel);
8865                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
8866                         last_item = MLX5_FLOW_LAYER_GTP;
8867                         break;
8868                 case RTE_FLOW_ITEM_TYPE_ECPRI:
8869                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
8870                                 /* Create it only the first time to be used. */
8871                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
8872                                 if (ret)
8873                                         return rte_flow_error_set
8874                                                 (error, -ret,
8875                                                 RTE_FLOW_ERROR_TYPE_ITEM,
8876                                                 NULL,
8877                                                 "cannot create eCPRI parser");
8878                         }
8879                         /* Adjust the length matcher and device flow value. */
8880                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
8881                         dev_flow->dv.value.size =
8882                                         MLX5_ST_SZ_BYTES(fte_match_param);
8883                         flow_dv_translate_item_ecpri(dev, match_mask,
8884                                                      match_value, items);
8885                         /* No other protocol should follow eCPRI layer. */
8886                         last_item = MLX5_FLOW_LAYER_ECPRI;
8887                         break;
8888                 default:
8889                         break;
8890                 }
8891                 item_flags |= last_item;
8892         }
8893         /*
8894          * When E-Switch mode is enabled, we have two cases where we need to
8895          * set the source port manually.
8896          * The first one, is in case of Nic steering rule, and the second is
8897          * E-Switch rule where no port_id item was found. In both cases
8898          * the source port is set according the current port in use.
8899          */
8900         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
8901             (priv->representor || priv->master)) {
8902                 if (flow_dv_translate_item_port_id(dev, match_mask,
8903                                                    match_value, NULL))
8904                         return -rte_errno;
8905         }
8906 #ifdef RTE_LIBRTE_MLX5_DEBUG
8907         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
8908                                               dev_flow->dv.value.buf));
8909 #endif
8910         /*
8911          * Layers may be already initialized from prefix flow if this dev_flow
8912          * is the suffix flow.
8913          */
8914         handle->layers |= item_flags;
8915         if (action_flags & MLX5_FLOW_ACTION_RSS)
8916                 flow_dv_hashfields_set(dev_flow, rss_desc);
8917         /* Register matcher. */
8918         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
8919                                     matcher.mask.size);
8920         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
8921                                                      matcher.priority);
8922         /* reserved field no needs to be set to 0 here. */
8923         tbl_key.domain = attr->transfer;
8924         tbl_key.direction = attr->egress;
8925         tbl_key.table_id = dev_flow->dv.group;
8926         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
8927                 return -rte_errno;
8928         return 0;
8929 }
8930
8931 /**
8932  * Apply the flow to the NIC, lock free,
8933  * (mutex should be acquired by caller).
8934  *
8935  * @param[in] dev
8936  *   Pointer to the Ethernet device structure.
8937  * @param[in, out] flow
8938  *   Pointer to flow structure.
8939  * @param[out] error
8940  *   Pointer to error structure.
8941  *
8942  * @return
8943  *   0 on success, a negative errno value otherwise and rte_errno is set.
8944  */
8945 static int
8946 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
8947                 struct rte_flow_error *error)
8948 {
8949         struct mlx5_flow_dv_workspace *dv;
8950         struct mlx5_flow_handle *dh;
8951         struct mlx5_flow_handle_dv *dv_h;
8952         struct mlx5_flow *dev_flow;
8953         struct mlx5_priv *priv = dev->data->dev_private;
8954         uint32_t handle_idx;
8955         int n;
8956         int err;
8957         int idx;
8958
8959         for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
8960                 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
8961                 dv = &dev_flow->dv;
8962                 dh = dev_flow->handle;
8963                 dv_h = &dh->dvh;
8964                 n = dv->actions_n;
8965                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
8966                         if (dv->transfer) {
8967                                 dv->actions[n++] = priv->sh->esw_drop_action;
8968                         } else {
8969                                 struct mlx5_hrxq *drop_hrxq;
8970                                 drop_hrxq = mlx5_drop_action_create(dev);
8971                                 if (!drop_hrxq) {
8972                                         rte_flow_error_set
8973                                                 (error, errno,
8974                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8975                                                  NULL,
8976                                                  "cannot get drop hash queue");
8977                                         goto error;
8978                                 }
8979                                 /*
8980                                  * Drop queues will be released by the specify
8981                                  * mlx5_drop_action_destroy() function. Assign
8982                                  * the special index to hrxq to mark the queue
8983                                  * has been allocated.
8984                                  */
8985                                 dh->rix_hrxq = UINT32_MAX;
8986                                 dv->actions[n++] = drop_hrxq->action;
8987                         }
8988                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
8989                         struct mlx5_hrxq *hrxq;
8990                         uint32_t hrxq_idx;
8991                         struct mlx5_flow_rss_desc *rss_desc =
8992                                 &((struct mlx5_flow_rss_desc *)priv->rss_desc)
8993                                 [!!priv->flow_nested_idx];
8994
8995                         MLX5_ASSERT(rss_desc->queue_num);
8996                         hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
8997                                                  MLX5_RSS_HASH_KEY_LEN,
8998                                                  dev_flow->hash_fields,
8999                                                  rss_desc->queue,
9000                                                  rss_desc->queue_num);
9001                         if (!hrxq_idx) {
9002                                 hrxq_idx = mlx5_hrxq_new
9003                                                 (dev, rss_desc->key,
9004                                                  MLX5_RSS_HASH_KEY_LEN,
9005                                                  dev_flow->hash_fields,
9006                                                  rss_desc->queue,
9007                                                  rss_desc->queue_num,
9008                                                  !!(dh->layers &
9009                                                  MLX5_FLOW_LAYER_TUNNEL));
9010                         }
9011                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
9012                                               hrxq_idx);
9013                         if (!hrxq) {
9014                                 rte_flow_error_set
9015                                         (error, rte_errno,
9016                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9017                                          "cannot get hash queue");
9018                                 goto error;
9019                         }
9020                         dh->rix_hrxq = hrxq_idx;
9021                         dv->actions[n++] = hrxq->action;
9022                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
9023                         if (flow_dv_default_miss_resource_register
9024                                         (dev, error)) {
9025                                 rte_flow_error_set
9026                                         (error, rte_errno,
9027                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
9028                                          "cannot create default miss resource");
9029                                 goto error_default_miss;
9030                         }
9031                         dh->rix_default_fate =  MLX5_FLOW_FATE_DEFAULT_MISS;
9032                         dv->actions[n++] = priv->sh->default_miss.action;
9033                 }
9034                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
9035                                                (void *)&dv->value, n,
9036                                                dv->actions, &dh->drv_flow);
9037                 if (err) {
9038                         rte_flow_error_set(error, errno,
9039                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9040                                            NULL,
9041                                            "hardware refuses to create flow");
9042                         goto error;
9043                 }
9044                 if (priv->vmwa_context &&
9045                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
9046                         /*
9047                          * The rule contains the VLAN pattern.
9048                          * For VF we are going to create VLAN
9049                          * interface to make hypervisor set correct
9050                          * e-Switch vport context.
9051                          */
9052                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
9053                 }
9054         }
9055         return 0;
9056 error:
9057         if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
9058                 flow_dv_default_miss_resource_release(dev);
9059 error_default_miss:
9060         err = rte_errno; /* Save rte_errno before cleanup. */
9061         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
9062                        handle_idx, dh, next) {
9063                 /* hrxq is union, don't clear it if the flag is not set. */
9064                 if (dh->rix_hrxq) {
9065                         if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
9066                                 mlx5_drop_action_destroy(dev);
9067                                 dh->rix_hrxq = 0;
9068                         } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
9069                                 mlx5_hrxq_release(dev, dh->rix_hrxq);
9070                                 dh->rix_hrxq = 0;
9071                         }
9072                 }
9073                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
9074                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
9075         }
9076         rte_errno = err; /* Restore rte_errno. */
9077         return -rte_errno;
9078 }
9079
9080 /**
9081  * Release the flow matcher.
9082  *
9083  * @param dev
9084  *   Pointer to Ethernet device.
9085  * @param handle
9086  *   Pointer to mlx5_flow_handle.
9087  *
9088  * @return
9089  *   1 while a reference on it exists, 0 when freed.
9090  */
9091 static int
9092 flow_dv_matcher_release(struct rte_eth_dev *dev,
9093                         struct mlx5_flow_handle *handle)
9094 {
9095         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
9096
9097         MLX5_ASSERT(matcher->matcher_object);
9098         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
9099                 dev->data->port_id, (void *)matcher,
9100                 rte_atomic32_read(&matcher->refcnt));
9101         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
9102                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9103                            (matcher->matcher_object));
9104                 LIST_REMOVE(matcher, next);
9105                 /* table ref-- in release interface. */
9106                 flow_dv_tbl_resource_release(dev, matcher->tbl);
9107                 mlx5_free(matcher);
9108                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
9109                         dev->data->port_id, (void *)matcher);
9110                 return 0;
9111         }
9112         return 1;
9113 }
9114
9115 /**
9116  * Release an encap/decap resource.
9117  *
9118  * @param dev
9119  *   Pointer to Ethernet device.
9120  * @param handle
9121  *   Pointer to mlx5_flow_handle.
9122  *
9123  * @return
9124  *   1 while a reference on it exists, 0 when freed.
9125  */
9126 static int
9127 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
9128                                      struct mlx5_flow_handle *handle)
9129 {
9130         struct mlx5_priv *priv = dev->data->dev_private;
9131         uint32_t idx = handle->dvh.rix_encap_decap;
9132         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
9133
9134         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
9135                          idx);
9136         if (!cache_resource)
9137                 return 0;
9138         MLX5_ASSERT(cache_resource->action);
9139         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
9140                 (void *)cache_resource,
9141                 rte_atomic32_read(&cache_resource->refcnt));
9142         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9143                 claim_zero(mlx5_flow_os_destroy_flow_action
9144                                                 (cache_resource->action));
9145                 mlx5_hlist_remove(priv->sh->encaps_decaps,
9146                                   &cache_resource->entry);
9147                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
9148                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
9149                         (void *)cache_resource);
9150                 return 0;
9151         }
9152         return 1;
9153 }
9154
9155 /**
9156  * Release an jump to table action resource.
9157  *
9158  * @param dev
9159  *   Pointer to Ethernet device.
9160  * @param handle
9161  *   Pointer to mlx5_flow_handle.
9162  *
9163  * @return
9164  *   1 while a reference on it exists, 0 when freed.
9165  */
9166 static int
9167 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
9168                                   struct mlx5_flow_handle *handle)
9169 {
9170         struct mlx5_priv *priv = dev->data->dev_private;
9171         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
9172         struct mlx5_flow_tbl_data_entry *tbl_data;
9173
9174         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
9175                              handle->rix_jump);
9176         if (!tbl_data)
9177                 return 0;
9178         cache_resource = &tbl_data->jump;
9179         MLX5_ASSERT(cache_resource->action);
9180         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
9181                 (void *)cache_resource,
9182                 rte_atomic32_read(&cache_resource->refcnt));
9183         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9184                 claim_zero(mlx5_flow_os_destroy_flow_action
9185                                                 (cache_resource->action));
9186                 /* jump action memory free is inside the table release. */
9187                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
9188                 DRV_LOG(DEBUG, "jump table resource %p: removed",
9189                         (void *)cache_resource);
9190                 return 0;
9191         }
9192         return 1;
9193 }
9194
9195 /**
9196  * Release a default miss resource.
9197  *
9198  * @param dev
9199  *   Pointer to Ethernet device.
9200  * @return
9201  *   1 while a reference on it exists, 0 when freed.
9202  */
9203 static int
9204 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev)
9205 {
9206         struct mlx5_priv *priv = dev->data->dev_private;
9207         struct mlx5_dev_ctx_shared *sh = priv->sh;
9208         struct mlx5_flow_default_miss_resource *cache_resource =
9209                         &sh->default_miss;
9210
9211         MLX5_ASSERT(cache_resource->action);
9212         DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--",
9213                         (void *)cache_resource->action,
9214                         rte_atomic32_read(&cache_resource->refcnt));
9215         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9216                 claim_zero(mlx5_glue->destroy_flow_action
9217                                 (cache_resource->action));
9218                 DRV_LOG(DEBUG, "default miss resource %p: removed",
9219                                 (void *)cache_resource->action);
9220                 return 0;
9221         }
9222         return 1;
9223 }
9224
9225 /**
9226  * Release a modify-header resource.
9227  *
9228  * @param dev
9229  *   Pointer to Ethernet device.
9230  * @param handle
9231  *   Pointer to mlx5_flow_handle.
9232  *
9233  * @return
9234  *   1 while a reference on it exists, 0 when freed.
9235  */
9236 static int
9237 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
9238                                     struct mlx5_flow_handle *handle)
9239 {
9240         struct mlx5_priv *priv = dev->data->dev_private;
9241         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
9242                                                         handle->dvh.modify_hdr;
9243
9244         MLX5_ASSERT(cache_resource->action);
9245         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
9246                 (void *)cache_resource,
9247                 rte_atomic32_read(&cache_resource->refcnt));
9248         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9249                 claim_zero(mlx5_flow_os_destroy_flow_action
9250                                                 (cache_resource->action));
9251                 mlx5_hlist_remove(priv->sh->modify_cmds,
9252                                   &cache_resource->entry);
9253                 mlx5_free(cache_resource);
9254                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
9255                         (void *)cache_resource);
9256                 return 0;
9257         }
9258         return 1;
9259 }
9260
9261 /**
9262  * Release port ID action resource.
9263  *
9264  * @param dev
9265  *   Pointer to Ethernet device.
9266  * @param handle
9267  *   Pointer to mlx5_flow_handle.
9268  *
9269  * @return
9270  *   1 while a reference on it exists, 0 when freed.
9271  */
9272 static int
9273 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
9274                                         struct mlx5_flow_handle *handle)
9275 {
9276         struct mlx5_priv *priv = dev->data->dev_private;
9277         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
9278         uint32_t idx = handle->rix_port_id_action;
9279
9280         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
9281                                         idx);
9282         if (!cache_resource)
9283                 return 0;
9284         MLX5_ASSERT(cache_resource->action);
9285         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
9286                 (void *)cache_resource,
9287                 rte_atomic32_read(&cache_resource->refcnt));
9288         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9289                 claim_zero(mlx5_flow_os_destroy_flow_action
9290                                                 (cache_resource->action));
9291                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
9292                              &priv->sh->port_id_action_list, idx,
9293                              cache_resource, next);
9294                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx);
9295                 DRV_LOG(DEBUG, "port id action resource %p: removed",
9296                         (void *)cache_resource);
9297                 return 0;
9298         }
9299         return 1;
9300 }
9301
9302 /**
9303  * Release push vlan action resource.
9304  *
9305  * @param dev
9306  *   Pointer to Ethernet device.
9307  * @param handle
9308  *   Pointer to mlx5_flow_handle.
9309  *
9310  * @return
9311  *   1 while a reference on it exists, 0 when freed.
9312  */
9313 static int
9314 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
9315                                           struct mlx5_flow_handle *handle)
9316 {
9317         struct mlx5_priv *priv = dev->data->dev_private;
9318         uint32_t idx = handle->dvh.rix_push_vlan;
9319         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
9320
9321         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
9322                                         idx);
9323         if (!cache_resource)
9324                 return 0;
9325         MLX5_ASSERT(cache_resource->action);
9326         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
9327                 (void *)cache_resource,
9328                 rte_atomic32_read(&cache_resource->refcnt));
9329         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9330                 claim_zero(mlx5_flow_os_destroy_flow_action
9331                                                 (cache_resource->action));
9332                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
9333                              &priv->sh->push_vlan_action_list, idx,
9334                              cache_resource, next);
9335                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
9336                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
9337                         (void *)cache_resource);
9338                 return 0;
9339         }
9340         return 1;
9341 }
9342
9343 /**
9344  * Release the fate resource.
9345  *
9346  * @param dev
9347  *   Pointer to Ethernet device.
9348  * @param handle
9349  *   Pointer to mlx5_flow_handle.
9350  */
9351 static void
9352 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
9353                                struct mlx5_flow_handle *handle)
9354 {
9355         if (!handle->rix_fate)
9356                 return;
9357         switch (handle->fate_action) {
9358         case MLX5_FLOW_FATE_DROP:
9359                 mlx5_drop_action_destroy(dev);
9360                 break;
9361         case MLX5_FLOW_FATE_QUEUE:
9362                 mlx5_hrxq_release(dev, handle->rix_hrxq);
9363                 break;
9364         case MLX5_FLOW_FATE_JUMP:
9365                 flow_dv_jump_tbl_resource_release(dev, handle);
9366                 break;
9367         case MLX5_FLOW_FATE_PORT_ID:
9368                 flow_dv_port_id_action_resource_release(dev, handle);
9369                 break;
9370         case MLX5_FLOW_FATE_DEFAULT_MISS:
9371                 flow_dv_default_miss_resource_release(dev);
9372                 break;
9373         default:
9374                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
9375                 break;
9376         }
9377         handle->rix_fate = 0;
9378 }
9379
9380 /**
9381  * Remove the flow from the NIC but keeps it in memory.
9382  * Lock free, (mutex should be acquired by caller).
9383  *
9384  * @param[in] dev
9385  *   Pointer to Ethernet device.
9386  * @param[in, out] flow
9387  *   Pointer to flow structure.
9388  */
9389 static void
9390 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
9391 {
9392         struct mlx5_flow_handle *dh;
9393         uint32_t handle_idx;
9394         struct mlx5_priv *priv = dev->data->dev_private;
9395
9396         if (!flow)
9397                 return;
9398         handle_idx = flow->dev_handles;
9399         while (handle_idx) {
9400                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9401                                     handle_idx);
9402                 if (!dh)
9403                         return;
9404                 if (dh->drv_flow) {
9405                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
9406                         dh->drv_flow = NULL;
9407                 }
9408                 if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
9409                     dh->fate_action == MLX5_FLOW_FATE_QUEUE ||
9410                     dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
9411                         flow_dv_fate_resource_release(dev, dh);
9412                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
9413                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
9414                 handle_idx = dh->next.next;
9415         }
9416 }
9417
9418 /**
9419  * Remove the flow from the NIC and the memory.
9420  * Lock free, (mutex should be acquired by caller).
9421  *
9422  * @param[in] dev
9423  *   Pointer to the Ethernet device structure.
9424  * @param[in, out] flow
9425  *   Pointer to flow structure.
9426  */
9427 static void
9428 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
9429 {
9430         struct mlx5_flow_handle *dev_handle;
9431         struct mlx5_priv *priv = dev->data->dev_private;
9432
9433         if (!flow)
9434                 return;
9435         __flow_dv_remove(dev, flow);
9436         if (flow->counter) {
9437                 flow_dv_counter_release(dev, flow->counter);
9438                 flow->counter = 0;
9439         }
9440         if (flow->meter) {
9441                 struct mlx5_flow_meter *fm;
9442
9443                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
9444                                     flow->meter);
9445                 if (fm)
9446                         mlx5_flow_meter_detach(fm);
9447                 flow->meter = 0;
9448         }
9449         while (flow->dev_handles) {
9450                 uint32_t tmp_idx = flow->dev_handles;
9451
9452                 dev_handle = mlx5_ipool_get(priv->sh->ipool
9453                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
9454                 if (!dev_handle)
9455                         return;
9456                 flow->dev_handles = dev_handle->next.next;
9457                 if (dev_handle->dvh.matcher)
9458                         flow_dv_matcher_release(dev, dev_handle);
9459                 if (dev_handle->dvh.rix_encap_decap)
9460                         flow_dv_encap_decap_resource_release(dev, dev_handle);
9461                 if (dev_handle->dvh.modify_hdr)
9462                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
9463                 if (dev_handle->dvh.rix_push_vlan)
9464                         flow_dv_push_vlan_action_resource_release(dev,
9465                                                                   dev_handle);
9466                 if (dev_handle->dvh.rix_tag)
9467                         flow_dv_tag_release(dev,
9468                                             dev_handle->dvh.rix_tag);
9469                 flow_dv_fate_resource_release(dev, dev_handle);
9470                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9471                            tmp_idx);
9472         }
9473 }
9474
9475 /**
9476  * Query a dv flow  rule for its statistics via devx.
9477  *
9478  * @param[in] dev
9479  *   Pointer to Ethernet device.
9480  * @param[in] flow
9481  *   Pointer to the sub flow.
9482  * @param[out] data
9483  *   data retrieved by the query.
9484  * @param[out] error
9485  *   Perform verbose error reporting if not NULL.
9486  *
9487  * @return
9488  *   0 on success, a negative errno value otherwise and rte_errno is set.
9489  */
9490 static int
9491 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
9492                     void *data, struct rte_flow_error *error)
9493 {
9494         struct mlx5_priv *priv = dev->data->dev_private;
9495         struct rte_flow_query_count *qc = data;
9496
9497         if (!priv->config.devx)
9498                 return rte_flow_error_set(error, ENOTSUP,
9499                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9500                                           NULL,
9501                                           "counters are not supported");
9502         if (flow->counter) {
9503                 uint64_t pkts, bytes;
9504                 struct mlx5_flow_counter *cnt;
9505
9506                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
9507                                                  NULL);
9508                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
9509                                                &bytes);
9510
9511                 if (err)
9512                         return rte_flow_error_set(error, -err,
9513                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9514                                         NULL, "cannot read counters");
9515                 qc->hits_set = 1;
9516                 qc->bytes_set = 1;
9517                 qc->hits = pkts - cnt->hits;
9518                 qc->bytes = bytes - cnt->bytes;
9519                 if (qc->reset) {
9520                         cnt->hits = pkts;
9521                         cnt->bytes = bytes;
9522                 }
9523                 return 0;
9524         }
9525         return rte_flow_error_set(error, EINVAL,
9526                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9527                                   NULL,
9528                                   "counters are not available");
9529 }
9530
9531 /**
9532  * Query a flow.
9533  *
9534  * @see rte_flow_query()
9535  * @see rte_flow_ops
9536  */
9537 static int
9538 flow_dv_query(struct rte_eth_dev *dev,
9539               struct rte_flow *flow __rte_unused,
9540               const struct rte_flow_action *actions __rte_unused,
9541               void *data __rte_unused,
9542               struct rte_flow_error *error __rte_unused)
9543 {
9544         int ret = -EINVAL;
9545
9546         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
9547                 switch (actions->type) {
9548                 case RTE_FLOW_ACTION_TYPE_VOID:
9549                         break;
9550                 case RTE_FLOW_ACTION_TYPE_COUNT:
9551                         ret = flow_dv_query_count(dev, flow, data, error);
9552                         break;
9553                 default:
9554                         return rte_flow_error_set(error, ENOTSUP,
9555                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9556                                                   actions,
9557                                                   "action not supported");
9558                 }
9559         }
9560         return ret;
9561 }
9562
9563 /**
9564  * Destroy the meter table set.
9565  * Lock free, (mutex should be acquired by caller).
9566  *
9567  * @param[in] dev
9568  *   Pointer to Ethernet device.
9569  * @param[in] tbl
9570  *   Pointer to the meter table set.
9571  *
9572  * @return
9573  *   Always 0.
9574  */
9575 static int
9576 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
9577                         struct mlx5_meter_domains_infos *tbl)
9578 {
9579         struct mlx5_priv *priv = dev->data->dev_private;
9580         struct mlx5_meter_domains_infos *mtd =
9581                                 (struct mlx5_meter_domains_infos *)tbl;
9582
9583         if (!mtd || !priv->config.dv_flow_en)
9584                 return 0;
9585         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
9586                 claim_zero(mlx5_flow_os_destroy_flow
9587                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
9588         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
9589                 claim_zero(mlx5_flow_os_destroy_flow
9590                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
9591         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
9592                 claim_zero(mlx5_flow_os_destroy_flow
9593                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
9594         if (mtd->egress.color_matcher)
9595                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9596                            (mtd->egress.color_matcher));
9597         if (mtd->egress.any_matcher)
9598                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9599                            (mtd->egress.any_matcher));
9600         if (mtd->egress.tbl)
9601                 flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
9602         if (mtd->egress.sfx_tbl)
9603                 flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
9604         if (mtd->ingress.color_matcher)
9605                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9606                            (mtd->ingress.color_matcher));
9607         if (mtd->ingress.any_matcher)
9608                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9609                            (mtd->ingress.any_matcher));
9610         if (mtd->ingress.tbl)
9611                 flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
9612         if (mtd->ingress.sfx_tbl)
9613                 flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
9614         if (mtd->transfer.color_matcher)
9615                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9616                            (mtd->transfer.color_matcher));
9617         if (mtd->transfer.any_matcher)
9618                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9619                            (mtd->transfer.any_matcher));
9620         if (mtd->transfer.tbl)
9621                 flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
9622         if (mtd->transfer.sfx_tbl)
9623                 flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
9624         if (mtd->drop_actn)
9625                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
9626         mlx5_free(mtd);
9627         return 0;
9628 }
9629
9630 /* Number of meter flow actions, count and jump or count and drop. */
9631 #define METER_ACTIONS 2
9632
9633 /**
9634  * Create specify domain meter table and suffix table.
9635  *
9636  * @param[in] dev
9637  *   Pointer to Ethernet device.
9638  * @param[in,out] mtb
9639  *   Pointer to DV meter table set.
9640  * @param[in] egress
9641  *   Table attribute.
9642  * @param[in] transfer
9643  *   Table attribute.
9644  * @param[in] color_reg_c_idx
9645  *   Reg C index for color match.
9646  *
9647  * @return
9648  *   0 on success, -1 otherwise and rte_errno is set.
9649  */
9650 static int
9651 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
9652                            struct mlx5_meter_domains_infos *mtb,
9653                            uint8_t egress, uint8_t transfer,
9654                            uint32_t color_reg_c_idx)
9655 {
9656         struct mlx5_priv *priv = dev->data->dev_private;
9657         struct mlx5_dev_ctx_shared *sh = priv->sh;
9658         struct mlx5_flow_dv_match_params mask = {
9659                 .size = sizeof(mask.buf),
9660         };
9661         struct mlx5_flow_dv_match_params value = {
9662                 .size = sizeof(value.buf),
9663         };
9664         struct mlx5dv_flow_matcher_attr dv_attr = {
9665                 .type = IBV_FLOW_ATTR_NORMAL,
9666                 .priority = 0,
9667                 .match_criteria_enable = 0,
9668                 .match_mask = (void *)&mask,
9669         };
9670         void *actions[METER_ACTIONS];
9671         struct mlx5_meter_domain_info *dtb;
9672         struct rte_flow_error error;
9673         int i = 0;
9674         int ret;
9675
9676         if (transfer)
9677                 dtb = &mtb->transfer;
9678         else if (egress)
9679                 dtb = &mtb->egress;
9680         else
9681                 dtb = &mtb->ingress;
9682         /* Create the meter table with METER level. */
9683         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
9684                                             egress, transfer, &error);
9685         if (!dtb->tbl) {
9686                 DRV_LOG(ERR, "Failed to create meter policer table.");
9687                 return -1;
9688         }
9689         /* Create the meter suffix table with SUFFIX level. */
9690         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
9691                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
9692                                             egress, transfer, &error);
9693         if (!dtb->sfx_tbl) {
9694                 DRV_LOG(ERR, "Failed to create meter suffix table.");
9695                 return -1;
9696         }
9697         /* Create matchers, Any and Color. */
9698         dv_attr.priority = 3;
9699         dv_attr.match_criteria_enable = 0;
9700         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
9701                                                &dtb->any_matcher);
9702         if (ret) {
9703                 DRV_LOG(ERR, "Failed to create meter"
9704                              " policer default matcher.");
9705                 goto error_exit;
9706         }
9707         dv_attr.priority = 0;
9708         dv_attr.match_criteria_enable =
9709                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9710         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
9711                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
9712         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
9713                                                &dtb->color_matcher);
9714         if (ret) {
9715                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
9716                 goto error_exit;
9717         }
9718         if (mtb->count_actns[RTE_MTR_DROPPED])
9719                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
9720         actions[i++] = mtb->drop_actn;
9721         /* Default rule: lowest priority, match any, actions: drop. */
9722         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
9723                                        actions,
9724                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
9725         if (ret) {
9726                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
9727                 goto error_exit;
9728         }
9729         return 0;
9730 error_exit:
9731         return -1;
9732 }
9733
9734 /**
9735  * Create the needed meter and suffix tables.
9736  * Lock free, (mutex should be acquired by caller).
9737  *
9738  * @param[in] dev
9739  *   Pointer to Ethernet device.
9740  * @param[in] fm
9741  *   Pointer to the flow meter.
9742  *
9743  * @return
9744  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
9745  */
9746 static struct mlx5_meter_domains_infos *
9747 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
9748                        const struct mlx5_flow_meter *fm)
9749 {
9750         struct mlx5_priv *priv = dev->data->dev_private;
9751         struct mlx5_meter_domains_infos *mtb;
9752         int ret;
9753         int i;
9754
9755         if (!priv->mtr_en) {
9756                 rte_errno = ENOTSUP;
9757                 return NULL;
9758         }
9759         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
9760         if (!mtb) {
9761                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
9762                 return NULL;
9763         }
9764         /* Create meter count actions */
9765         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
9766                 struct mlx5_flow_counter *cnt;
9767                 if (!fm->policer_stats.cnt[i])
9768                         continue;
9769                 cnt = flow_dv_counter_get_by_idx(dev,
9770                       fm->policer_stats.cnt[i], NULL);
9771                 mtb->count_actns[i] = cnt->action;
9772         }
9773         /* Create drop action. */
9774         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
9775         if (ret) {
9776                 DRV_LOG(ERR, "Failed to create drop action.");
9777                 goto error_exit;
9778         }
9779         /* Egress meter table. */
9780         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
9781         if (ret) {
9782                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
9783                 goto error_exit;
9784         }
9785         /* Ingress meter table. */
9786         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
9787         if (ret) {
9788                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
9789                 goto error_exit;
9790         }
9791         /* FDB meter table. */
9792         if (priv->config.dv_esw_en) {
9793                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
9794                                                  priv->mtr_color_reg);
9795                 if (ret) {
9796                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
9797                         goto error_exit;
9798                 }
9799         }
9800         return mtb;
9801 error_exit:
9802         flow_dv_destroy_mtr_tbl(dev, mtb);
9803         return NULL;
9804 }
9805
9806 /**
9807  * Destroy domain policer rule.
9808  *
9809  * @param[in] dt
9810  *   Pointer to domain table.
9811  */
9812 static void
9813 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
9814 {
9815         int i;
9816
9817         for (i = 0; i < RTE_MTR_DROPPED; i++) {
9818                 if (dt->policer_rules[i]) {
9819                         claim_zero(mlx5_flow_os_destroy_flow
9820                                    (dt->policer_rules[i]));
9821                         dt->policer_rules[i] = NULL;
9822                 }
9823         }
9824         if (dt->jump_actn) {
9825                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
9826                 dt->jump_actn = NULL;
9827         }
9828 }
9829
9830 /**
9831  * Destroy policer rules.
9832  *
9833  * @param[in] dev
9834  *   Pointer to Ethernet device.
9835  * @param[in] fm
9836  *   Pointer to flow meter structure.
9837  * @param[in] attr
9838  *   Pointer to flow attributes.
9839  *
9840  * @return
9841  *   Always 0.
9842  */
9843 static int
9844 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
9845                               const struct mlx5_flow_meter *fm,
9846                               const struct rte_flow_attr *attr)
9847 {
9848         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
9849
9850         if (!mtb)
9851                 return 0;
9852         if (attr->egress)
9853                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
9854         if (attr->ingress)
9855                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
9856         if (attr->transfer)
9857                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
9858         return 0;
9859 }
9860
9861 /**
9862  * Create specify domain meter policer rule.
9863  *
9864  * @param[in] fm
9865  *   Pointer to flow meter structure.
9866  * @param[in] mtb
9867  *   Pointer to DV meter table set.
9868  * @param[in] mtr_reg_c
9869  *   Color match REG_C.
9870  *
9871  * @return
9872  *   0 on success, -1 otherwise.
9873  */
9874 static int
9875 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
9876                                     struct mlx5_meter_domain_info *dtb,
9877                                     uint8_t mtr_reg_c)
9878 {
9879         struct mlx5_flow_dv_match_params matcher = {
9880                 .size = sizeof(matcher.buf),
9881         };
9882         struct mlx5_flow_dv_match_params value = {
9883                 .size = sizeof(value.buf),
9884         };
9885         struct mlx5_meter_domains_infos *mtb = fm->mfts;
9886         void *actions[METER_ACTIONS];
9887         int i;
9888         int ret = 0;
9889
9890         /* Create jump action. */
9891         if (!dtb->jump_actn)
9892                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9893                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
9894         if (ret) {
9895                 DRV_LOG(ERR, "Failed to create policer jump action.");
9896                 goto error;
9897         }
9898         for (i = 0; i < RTE_MTR_DROPPED; i++) {
9899                 int j = 0;
9900
9901                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
9902                                        rte_col_2_mlx5_col(i), UINT8_MAX);
9903                 if (mtb->count_actns[i])
9904                         actions[j++] = mtb->count_actns[i];
9905                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
9906                         actions[j++] = mtb->drop_actn;
9907                 else
9908                         actions[j++] = dtb->jump_actn;
9909                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
9910                                                (void *)&value, j, actions,
9911                                                &dtb->policer_rules[i]);
9912                 if (ret) {
9913                         DRV_LOG(ERR, "Failed to create policer rule.");
9914                         goto error;
9915                 }
9916         }
9917         return 0;
9918 error:
9919         rte_errno = errno;
9920         return -1;
9921 }
9922
9923 /**
9924  * Create policer rules.
9925  *
9926  * @param[in] dev
9927  *   Pointer to Ethernet device.
9928  * @param[in] fm
9929  *   Pointer to flow meter structure.
9930  * @param[in] attr
9931  *   Pointer to flow attributes.
9932  *
9933  * @return
9934  *   0 on success, -1 otherwise.
9935  */
9936 static int
9937 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
9938                              struct mlx5_flow_meter *fm,
9939                              const struct rte_flow_attr *attr)
9940 {
9941         struct mlx5_priv *priv = dev->data->dev_private;
9942         struct mlx5_meter_domains_infos *mtb = fm->mfts;
9943         int ret;
9944
9945         if (attr->egress) {
9946                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
9947                                                 priv->mtr_color_reg);
9948                 if (ret) {
9949                         DRV_LOG(ERR, "Failed to create egress policer.");
9950                         goto error;
9951                 }
9952         }
9953         if (attr->ingress) {
9954                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
9955                                                 priv->mtr_color_reg);
9956                 if (ret) {
9957                         DRV_LOG(ERR, "Failed to create ingress policer.");
9958                         goto error;
9959                 }
9960         }
9961         if (attr->transfer) {
9962                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
9963                                                 priv->mtr_color_reg);
9964                 if (ret) {
9965                         DRV_LOG(ERR, "Failed to create transfer policer.");
9966                         goto error;
9967                 }
9968         }
9969         return 0;
9970 error:
9971         flow_dv_destroy_policer_rules(dev, fm, attr);
9972         return -1;
9973 }
9974
9975 /**
9976  * Query a devx counter.
9977  *
9978  * @param[in] dev
9979  *   Pointer to the Ethernet device structure.
9980  * @param[in] cnt
9981  *   Index to the flow counter.
9982  * @param[in] clear
9983  *   Set to clear the counter statistics.
9984  * @param[out] pkts
9985  *   The statistics value of packets.
9986  * @param[out] bytes
9987  *   The statistics value of bytes.
9988  *
9989  * @return
9990  *   0 on success, otherwise return -1.
9991  */
9992 static int
9993 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
9994                       uint64_t *pkts, uint64_t *bytes)
9995 {
9996         struct mlx5_priv *priv = dev->data->dev_private;
9997         struct mlx5_flow_counter *cnt;
9998         uint64_t inn_pkts, inn_bytes;
9999         int ret;
10000
10001         if (!priv->config.devx)
10002                 return -1;
10003
10004         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
10005         if (ret)
10006                 return -1;
10007         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
10008         *pkts = inn_pkts - cnt->hits;
10009         *bytes = inn_bytes - cnt->bytes;
10010         if (clear) {
10011                 cnt->hits = inn_pkts;
10012                 cnt->bytes = inn_bytes;
10013         }
10014         return 0;
10015 }
10016
10017 /**
10018  * Get aged-out flows.
10019  *
10020  * @param[in] dev
10021  *   Pointer to the Ethernet device structure.
10022  * @param[in] context
10023  *   The address of an array of pointers to the aged-out flows contexts.
10024  * @param[in] nb_contexts
10025  *   The length of context array pointers.
10026  * @param[out] error
10027  *   Perform verbose error reporting if not NULL. Initialized in case of
10028  *   error only.
10029  *
10030  * @return
10031  *   how many contexts get in success, otherwise negative errno value.
10032  *   if nb_contexts is 0, return the amount of all aged contexts.
10033  *   if nb_contexts is not 0 , return the amount of aged flows reported
10034  *   in the context array.
10035  * @note: only stub for now
10036  */
10037 static int
10038 flow_get_aged_flows(struct rte_eth_dev *dev,
10039                     void **context,
10040                     uint32_t nb_contexts,
10041                     struct rte_flow_error *error)
10042 {
10043         struct mlx5_priv *priv = dev->data->dev_private;
10044         struct mlx5_age_info *age_info;
10045         struct mlx5_age_param *age_param;
10046         struct mlx5_flow_counter *counter;
10047         int nb_flows = 0;
10048
10049         if (nb_contexts && !context)
10050                 return rte_flow_error_set(error, EINVAL,
10051                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10052                                           NULL,
10053                                           "Should assign at least one flow or"
10054                                           " context to get if nb_contexts != 0");
10055         age_info = GET_PORT_AGE_INFO(priv);
10056         rte_spinlock_lock(&age_info->aged_sl);
10057         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
10058                 nb_flows++;
10059                 if (nb_contexts) {
10060                         age_param = MLX5_CNT_TO_AGE(counter);
10061                         context[nb_flows - 1] = age_param->context;
10062                         if (!(--nb_contexts))
10063                                 break;
10064                 }
10065         }
10066         rte_spinlock_unlock(&age_info->aged_sl);
10067         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
10068         return nb_flows;
10069 }
10070
10071 /*
10072  * Mutex-protected thunk to lock-free  __flow_dv_translate().
10073  */
10074 static int
10075 flow_dv_translate(struct rte_eth_dev *dev,
10076                   struct mlx5_flow *dev_flow,
10077                   const struct rte_flow_attr *attr,
10078                   const struct rte_flow_item items[],
10079                   const struct rte_flow_action actions[],
10080                   struct rte_flow_error *error)
10081 {
10082         int ret;
10083
10084         flow_dv_shared_lock(dev);
10085         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
10086         flow_dv_shared_unlock(dev);
10087         return ret;
10088 }
10089
10090 /*
10091  * Mutex-protected thunk to lock-free  __flow_dv_apply().
10092  */
10093 static int
10094 flow_dv_apply(struct rte_eth_dev *dev,
10095               struct rte_flow *flow,
10096               struct rte_flow_error *error)
10097 {
10098         int ret;
10099
10100         flow_dv_shared_lock(dev);
10101         ret = __flow_dv_apply(dev, flow, error);
10102         flow_dv_shared_unlock(dev);
10103         return ret;
10104 }
10105
10106 /*
10107  * Mutex-protected thunk to lock-free __flow_dv_remove().
10108  */
10109 static void
10110 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
10111 {
10112         flow_dv_shared_lock(dev);
10113         __flow_dv_remove(dev, flow);
10114         flow_dv_shared_unlock(dev);
10115 }
10116
10117 /*
10118  * Mutex-protected thunk to lock-free __flow_dv_destroy().
10119  */
10120 static void
10121 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
10122 {
10123         flow_dv_shared_lock(dev);
10124         __flow_dv_destroy(dev, flow);
10125         flow_dv_shared_unlock(dev);
10126 }
10127
10128 /*
10129  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
10130  */
10131 static uint32_t
10132 flow_dv_counter_allocate(struct rte_eth_dev *dev)
10133 {
10134         uint32_t cnt;
10135
10136         flow_dv_shared_lock(dev);
10137         cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0);
10138         flow_dv_shared_unlock(dev);
10139         return cnt;
10140 }
10141
10142 /*
10143  * Mutex-protected thunk to lock-free flow_dv_counter_release().
10144  */
10145 static void
10146 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
10147 {
10148         flow_dv_shared_lock(dev);
10149         flow_dv_counter_release(dev, cnt);
10150         flow_dv_shared_unlock(dev);
10151 }
10152
10153 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
10154         .validate = flow_dv_validate,
10155         .prepare = flow_dv_prepare,
10156         .translate = flow_dv_translate,
10157         .apply = flow_dv_apply,
10158         .remove = flow_dv_remove,
10159         .destroy = flow_dv_destroy,
10160         .query = flow_dv_query,
10161         .create_mtr_tbls = flow_dv_create_mtr_tbl,
10162         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
10163         .create_policer_rules = flow_dv_create_policer_rules,
10164         .destroy_policer_rules = flow_dv_destroy_policer_rules,
10165         .counter_alloc = flow_dv_counter_allocate,
10166         .counter_free = flow_dv_counter_free,
10167         .counter_query = flow_dv_counter_query,
10168         .get_aged_flows = flow_get_aged_flows,
10169 };
10170
10171 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
10172