net/mlx5: fix group value of sample suffix flow
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                       uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84
85 /**
86  * Initialize flow attributes structure according to flow items' types.
87  *
88  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
89  * mode. For tunnel mode, the items to be modified are the outermost ones.
90  *
91  * @param[in] item
92  *   Pointer to item specification.
93  * @param[out] attr
94  *   Pointer to flow attributes structure.
95  * @param[in] dev_flow
96  *   Pointer to the sub flow.
97  * @param[in] tunnel_decap
98  *   Whether action is after tunnel decapsulation.
99  */
100 static void
101 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
102                   struct mlx5_flow *dev_flow, bool tunnel_decap)
103 {
104         uint64_t layers = dev_flow->handle->layers;
105
106         /*
107          * If layers is already initialized, it means this dev_flow is the
108          * suffix flow, the layers flags is set by the prefix flow. Need to
109          * use the layer flags from prefix flow as the suffix flow may not
110          * have the user defined items as the flow is split.
111          */
112         if (layers) {
113                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
114                         attr->ipv4 = 1;
115                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
116                         attr->ipv6 = 1;
117                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
118                         attr->tcp = 1;
119                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
120                         attr->udp = 1;
121                 attr->valid = 1;
122                 return;
123         }
124         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
125                 uint8_t next_protocol = 0xff;
126                 switch (item->type) {
127                 case RTE_FLOW_ITEM_TYPE_GRE:
128                 case RTE_FLOW_ITEM_TYPE_NVGRE:
129                 case RTE_FLOW_ITEM_TYPE_VXLAN:
130                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
131                 case RTE_FLOW_ITEM_TYPE_GENEVE:
132                 case RTE_FLOW_ITEM_TYPE_MPLS:
133                         if (tunnel_decap)
134                                 attr->attr = 0;
135                         break;
136                 case RTE_FLOW_ITEM_TYPE_IPV4:
137                         if (!attr->ipv6)
138                                 attr->ipv4 = 1;
139                         if (item->mask != NULL &&
140                             ((const struct rte_flow_item_ipv4 *)
141                             item->mask)->hdr.next_proto_id)
142                                 next_protocol =
143                                     ((const struct rte_flow_item_ipv4 *)
144                                       (item->spec))->hdr.next_proto_id &
145                                     ((const struct rte_flow_item_ipv4 *)
146                                       (item->mask))->hdr.next_proto_id;
147                         if ((next_protocol == IPPROTO_IPIP ||
148                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
149                                 attr->attr = 0;
150                         break;
151                 case RTE_FLOW_ITEM_TYPE_IPV6:
152                         if (!attr->ipv4)
153                                 attr->ipv6 = 1;
154                         if (item->mask != NULL &&
155                             ((const struct rte_flow_item_ipv6 *)
156                             item->mask)->hdr.proto)
157                                 next_protocol =
158                                     ((const struct rte_flow_item_ipv6 *)
159                                       (item->spec))->hdr.proto &
160                                     ((const struct rte_flow_item_ipv6 *)
161                                       (item->mask))->hdr.proto;
162                         if ((next_protocol == IPPROTO_IPIP ||
163                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
164                                 attr->attr = 0;
165                         break;
166                 case RTE_FLOW_ITEM_TYPE_UDP:
167                         if (!attr->tcp)
168                                 attr->udp = 1;
169                         break;
170                 case RTE_FLOW_ITEM_TYPE_TCP:
171                         if (!attr->udp)
172                                 attr->tcp = 1;
173                         break;
174                 default:
175                         break;
176                 }
177         }
178         attr->valid = 1;
179 }
180
181 /**
182  * Convert rte_mtr_color to mlx5 color.
183  *
184  * @param[in] rcol
185  *   rte_mtr_color.
186  *
187  * @return
188  *   mlx5 color.
189  */
190 static int
191 rte_col_2_mlx5_col(enum rte_color rcol)
192 {
193         switch (rcol) {
194         case RTE_COLOR_GREEN:
195                 return MLX5_FLOW_COLOR_GREEN;
196         case RTE_COLOR_YELLOW:
197                 return MLX5_FLOW_COLOR_YELLOW;
198         case RTE_COLOR_RED:
199                 return MLX5_FLOW_COLOR_RED;
200         default:
201                 break;
202         }
203         return MLX5_FLOW_COLOR_UNDEFINED;
204 }
205
206 struct field_modify_info {
207         uint32_t size; /* Size of field in protocol header, in bytes. */
208         uint32_t offset; /* Offset of field in protocol header, in bytes. */
209         enum mlx5_modification_field id;
210 };
211
212 struct field_modify_info modify_eth[] = {
213         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
214         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
215         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
216         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
217         {0, 0, 0},
218 };
219
220 struct field_modify_info modify_vlan_out_first_vid[] = {
221         /* Size in bits !!! */
222         {12, 0, MLX5_MODI_OUT_FIRST_VID},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_ipv4[] = {
227         {1,  1, MLX5_MODI_OUT_IP_DSCP},
228         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
229         {4, 12, MLX5_MODI_OUT_SIPV4},
230         {4, 16, MLX5_MODI_OUT_DIPV4},
231         {0, 0, 0},
232 };
233
234 struct field_modify_info modify_ipv6[] = {
235         {1,  0, MLX5_MODI_OUT_IP_DSCP},
236         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
237         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
238         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
239         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
240         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
241         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
242         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
243         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
244         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
245         {0, 0, 0},
246 };
247
248 struct field_modify_info modify_udp[] = {
249         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
250         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_tcp[] = {
255         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
256         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
257         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
258         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
259         {0, 0, 0},
260 };
261
262 static void
263 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
264                           uint8_t next_protocol, uint64_t *item_flags,
265                           int *tunnel)
266 {
267         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
268                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
269         if (next_protocol == IPPROTO_IPIP) {
270                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
271                 *tunnel = 1;
272         }
273         if (next_protocol == IPPROTO_IPV6) {
274                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
275                 *tunnel = 1;
276         }
277 }
278
279 /* Update VLAN's VID/PCP based on input rte_flow_action.
280  *
281  * @param[in] action
282  *   Pointer to struct rte_flow_action.
283  * @param[out] vlan
284  *   Pointer to struct rte_vlan_hdr.
285  */
286 static void
287 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
288                          struct rte_vlan_hdr *vlan)
289 {
290         uint16_t vlan_tci;
291         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
292                 vlan_tci =
293                     ((const struct rte_flow_action_of_set_vlan_pcp *)
294                                                action->conf)->vlan_pcp;
295                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
296                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
297                 vlan->vlan_tci |= vlan_tci;
298         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
299                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
300                 vlan->vlan_tci |= rte_be_to_cpu_16
301                     (((const struct rte_flow_action_of_set_vlan_vid *)
302                                              action->conf)->vlan_vid);
303         }
304 }
305
306 /**
307  * Fetch 1, 2, 3 or 4 byte field from the byte array
308  * and return as unsigned integer in host-endian format.
309  *
310  * @param[in] data
311  *   Pointer to data array.
312  * @param[in] size
313  *   Size of field to extract.
314  *
315  * @return
316  *   converted field in host endian format.
317  */
318 static inline uint32_t
319 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
320 {
321         uint32_t ret;
322
323         switch (size) {
324         case 1:
325                 ret = *data;
326                 break;
327         case 2:
328                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
329                 break;
330         case 3:
331                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
332                 ret = (ret << 8) | *(data + sizeof(uint16_t));
333                 break;
334         case 4:
335                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
336                 break;
337         default:
338                 MLX5_ASSERT(false);
339                 ret = 0;
340                 break;
341         }
342         return ret;
343 }
344
345 /**
346  * Convert modify-header action to DV specification.
347  *
348  * Data length of each action is determined by provided field description
349  * and the item mask. Data bit offset and width of each action is determined
350  * by provided item mask.
351  *
352  * @param[in] item
353  *   Pointer to item specification.
354  * @param[in] field
355  *   Pointer to field modification information.
356  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
357  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
358  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
359  * @param[in] dcopy
360  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
361  *   Negative offset value sets the same offset as source offset.
362  *   size field is ignored, value is taken from source field.
363  * @param[in,out] resource
364  *   Pointer to the modify-header resource.
365  * @param[in] type
366  *   Type of modification.
367  * @param[out] error
368  *   Pointer to the error structure.
369  *
370  * @return
371  *   0 on success, a negative errno value otherwise and rte_errno is set.
372  */
373 static int
374 flow_dv_convert_modify_action(struct rte_flow_item *item,
375                               struct field_modify_info *field,
376                               struct field_modify_info *dcopy,
377                               struct mlx5_flow_dv_modify_hdr_resource *resource,
378                               uint32_t type, struct rte_flow_error *error)
379 {
380         uint32_t i = resource->actions_num;
381         struct mlx5_modification_cmd *actions = resource->actions;
382
383         /*
384          * The item and mask are provided in big-endian format.
385          * The fields should be presented as in big-endian format either.
386          * Mask must be always present, it defines the actual field width.
387          */
388         MLX5_ASSERT(item->mask);
389         MLX5_ASSERT(field->size);
390         do {
391                 unsigned int size_b;
392                 unsigned int off_b;
393                 uint32_t mask;
394                 uint32_t data;
395
396                 if (i >= MLX5_MAX_MODIFY_NUM)
397                         return rte_flow_error_set(error, EINVAL,
398                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
399                                  "too many items to modify");
400                 /* Fetch variable byte size mask from the array. */
401                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
402                                            field->offset, field->size);
403                 if (!mask) {
404                         ++field;
405                         continue;
406                 }
407                 /* Deduce actual data width in bits from mask value. */
408                 off_b = rte_bsf32(mask);
409                 size_b = sizeof(uint32_t) * CHAR_BIT -
410                          off_b - __builtin_clz(mask);
411                 MLX5_ASSERT(size_b);
412                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
413                 actions[i] = (struct mlx5_modification_cmd) {
414                         .action_type = type,
415                         .field = field->id,
416                         .offset = off_b,
417                         .length = size_b,
418                 };
419                 /* Convert entire record to expected big-endian format. */
420                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
421                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
422                         MLX5_ASSERT(dcopy);
423                         actions[i].dst_field = dcopy->id;
424                         actions[i].dst_offset =
425                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
426                         /* Convert entire record to big-endian format. */
427                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
428                 } else {
429                         MLX5_ASSERT(item->spec);
430                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
431                                                    field->offset, field->size);
432                         /* Shift out the trailing masked bits from data. */
433                         data = (data & mask) >> off_b;
434                         actions[i].data1 = rte_cpu_to_be_32(data);
435                 }
436                 ++i;
437                 ++field;
438         } while (field->size);
439         if (resource->actions_num == i)
440                 return rte_flow_error_set(error, EINVAL,
441                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
442                                           "invalid modification flow item");
443         resource->actions_num = i;
444         return 0;
445 }
446
447 /**
448  * Convert modify-header set IPv4 address action to DV specification.
449  *
450  * @param[in,out] resource
451  *   Pointer to the modify-header resource.
452  * @param[in] action
453  *   Pointer to action specification.
454  * @param[out] error
455  *   Pointer to the error structure.
456  *
457  * @return
458  *   0 on success, a negative errno value otherwise and rte_errno is set.
459  */
460 static int
461 flow_dv_convert_action_modify_ipv4
462                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
463                          const struct rte_flow_action *action,
464                          struct rte_flow_error *error)
465 {
466         const struct rte_flow_action_set_ipv4 *conf =
467                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
468         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
469         struct rte_flow_item_ipv4 ipv4;
470         struct rte_flow_item_ipv4 ipv4_mask;
471
472         memset(&ipv4, 0, sizeof(ipv4));
473         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
474         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
475                 ipv4.hdr.src_addr = conf->ipv4_addr;
476                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
477         } else {
478                 ipv4.hdr.dst_addr = conf->ipv4_addr;
479                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
480         }
481         item.spec = &ipv4;
482         item.mask = &ipv4_mask;
483         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
484                                              MLX5_MODIFICATION_TYPE_SET, error);
485 }
486
487 /**
488  * Convert modify-header set IPv6 address action to DV specification.
489  *
490  * @param[in,out] resource
491  *   Pointer to the modify-header resource.
492  * @param[in] action
493  *   Pointer to action specification.
494  * @param[out] error
495  *   Pointer to the error structure.
496  *
497  * @return
498  *   0 on success, a negative errno value otherwise and rte_errno is set.
499  */
500 static int
501 flow_dv_convert_action_modify_ipv6
502                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
503                          const struct rte_flow_action *action,
504                          struct rte_flow_error *error)
505 {
506         const struct rte_flow_action_set_ipv6 *conf =
507                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
508         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
509         struct rte_flow_item_ipv6 ipv6;
510         struct rte_flow_item_ipv6 ipv6_mask;
511
512         memset(&ipv6, 0, sizeof(ipv6));
513         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
514         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
515                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
516                        sizeof(ipv6.hdr.src_addr));
517                 memcpy(&ipv6_mask.hdr.src_addr,
518                        &rte_flow_item_ipv6_mask.hdr.src_addr,
519                        sizeof(ipv6.hdr.src_addr));
520         } else {
521                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
522                        sizeof(ipv6.hdr.dst_addr));
523                 memcpy(&ipv6_mask.hdr.dst_addr,
524                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
525                        sizeof(ipv6.hdr.dst_addr));
526         }
527         item.spec = &ipv6;
528         item.mask = &ipv6_mask;
529         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
530                                              MLX5_MODIFICATION_TYPE_SET, error);
531 }
532
533 /**
534  * Convert modify-header set MAC address action to DV specification.
535  *
536  * @param[in,out] resource
537  *   Pointer to the modify-header resource.
538  * @param[in] action
539  *   Pointer to action specification.
540  * @param[out] error
541  *   Pointer to the error structure.
542  *
543  * @return
544  *   0 on success, a negative errno value otherwise and rte_errno is set.
545  */
546 static int
547 flow_dv_convert_action_modify_mac
548                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
549                          const struct rte_flow_action *action,
550                          struct rte_flow_error *error)
551 {
552         const struct rte_flow_action_set_mac *conf =
553                 (const struct rte_flow_action_set_mac *)(action->conf);
554         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
555         struct rte_flow_item_eth eth;
556         struct rte_flow_item_eth eth_mask;
557
558         memset(&eth, 0, sizeof(eth));
559         memset(&eth_mask, 0, sizeof(eth_mask));
560         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
561                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
562                        sizeof(eth.src.addr_bytes));
563                 memcpy(&eth_mask.src.addr_bytes,
564                        &rte_flow_item_eth_mask.src.addr_bytes,
565                        sizeof(eth_mask.src.addr_bytes));
566         } else {
567                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
568                        sizeof(eth.dst.addr_bytes));
569                 memcpy(&eth_mask.dst.addr_bytes,
570                        &rte_flow_item_eth_mask.dst.addr_bytes,
571                        sizeof(eth_mask.dst.addr_bytes));
572         }
573         item.spec = &eth;
574         item.mask = &eth_mask;
575         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
576                                              MLX5_MODIFICATION_TYPE_SET, error);
577 }
578
579 /**
580  * Convert modify-header set VLAN VID action to DV specification.
581  *
582  * @param[in,out] resource
583  *   Pointer to the modify-header resource.
584  * @param[in] action
585  *   Pointer to action specification.
586  * @param[out] error
587  *   Pointer to the error structure.
588  *
589  * @return
590  *   0 on success, a negative errno value otherwise and rte_errno is set.
591  */
592 static int
593 flow_dv_convert_action_modify_vlan_vid
594                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
595                          const struct rte_flow_action *action,
596                          struct rte_flow_error *error)
597 {
598         const struct rte_flow_action_of_set_vlan_vid *conf =
599                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
600         int i = resource->actions_num;
601         struct mlx5_modification_cmd *actions = resource->actions;
602         struct field_modify_info *field = modify_vlan_out_first_vid;
603
604         if (i >= MLX5_MAX_MODIFY_NUM)
605                 return rte_flow_error_set(error, EINVAL,
606                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
607                          "too many items to modify");
608         actions[i] = (struct mlx5_modification_cmd) {
609                 .action_type = MLX5_MODIFICATION_TYPE_SET,
610                 .field = field->id,
611                 .length = field->size,
612                 .offset = field->offset,
613         };
614         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
615         actions[i].data1 = conf->vlan_vid;
616         actions[i].data1 = actions[i].data1 << 16;
617         resource->actions_num = ++i;
618         return 0;
619 }
620
621 /**
622  * Convert modify-header set TP action to DV specification.
623  *
624  * @param[in,out] resource
625  *   Pointer to the modify-header resource.
626  * @param[in] action
627  *   Pointer to action specification.
628  * @param[in] items
629  *   Pointer to rte_flow_item objects list.
630  * @param[in] attr
631  *   Pointer to flow attributes structure.
632  * @param[in] dev_flow
633  *   Pointer to the sub flow.
634  * @param[in] tunnel_decap
635  *   Whether action is after tunnel decapsulation.
636  * @param[out] error
637  *   Pointer to the error structure.
638  *
639  * @return
640  *   0 on success, a negative errno value otherwise and rte_errno is set.
641  */
642 static int
643 flow_dv_convert_action_modify_tp
644                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
645                          const struct rte_flow_action *action,
646                          const struct rte_flow_item *items,
647                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
648                          bool tunnel_decap, struct rte_flow_error *error)
649 {
650         const struct rte_flow_action_set_tp *conf =
651                 (const struct rte_flow_action_set_tp *)(action->conf);
652         struct rte_flow_item item;
653         struct rte_flow_item_udp udp;
654         struct rte_flow_item_udp udp_mask;
655         struct rte_flow_item_tcp tcp;
656         struct rte_flow_item_tcp tcp_mask;
657         struct field_modify_info *field;
658
659         if (!attr->valid)
660                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
661         if (attr->udp) {
662                 memset(&udp, 0, sizeof(udp));
663                 memset(&udp_mask, 0, sizeof(udp_mask));
664                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
665                         udp.hdr.src_port = conf->port;
666                         udp_mask.hdr.src_port =
667                                         rte_flow_item_udp_mask.hdr.src_port;
668                 } else {
669                         udp.hdr.dst_port = conf->port;
670                         udp_mask.hdr.dst_port =
671                                         rte_flow_item_udp_mask.hdr.dst_port;
672                 }
673                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
674                 item.spec = &udp;
675                 item.mask = &udp_mask;
676                 field = modify_udp;
677         } else {
678                 MLX5_ASSERT(attr->tcp);
679                 memset(&tcp, 0, sizeof(tcp));
680                 memset(&tcp_mask, 0, sizeof(tcp_mask));
681                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
682                         tcp.hdr.src_port = conf->port;
683                         tcp_mask.hdr.src_port =
684                                         rte_flow_item_tcp_mask.hdr.src_port;
685                 } else {
686                         tcp.hdr.dst_port = conf->port;
687                         tcp_mask.hdr.dst_port =
688                                         rte_flow_item_tcp_mask.hdr.dst_port;
689                 }
690                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
691                 item.spec = &tcp;
692                 item.mask = &tcp_mask;
693                 field = modify_tcp;
694         }
695         return flow_dv_convert_modify_action(&item, field, NULL, resource,
696                                              MLX5_MODIFICATION_TYPE_SET, error);
697 }
698
699 /**
700  * Convert modify-header set TTL action to DV specification.
701  *
702  * @param[in,out] resource
703  *   Pointer to the modify-header resource.
704  * @param[in] action
705  *   Pointer to action specification.
706  * @param[in] items
707  *   Pointer to rte_flow_item objects list.
708  * @param[in] attr
709  *   Pointer to flow attributes structure.
710  * @param[in] dev_flow
711  *   Pointer to the sub flow.
712  * @param[in] tunnel_decap
713  *   Whether action is after tunnel decapsulation.
714  * @param[out] error
715  *   Pointer to the error structure.
716  *
717  * @return
718  *   0 on success, a negative errno value otherwise and rte_errno is set.
719  */
720 static int
721 flow_dv_convert_action_modify_ttl
722                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
723                          const struct rte_flow_action *action,
724                          const struct rte_flow_item *items,
725                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
726                          bool tunnel_decap, struct rte_flow_error *error)
727 {
728         const struct rte_flow_action_set_ttl *conf =
729                 (const struct rte_flow_action_set_ttl *)(action->conf);
730         struct rte_flow_item item;
731         struct rte_flow_item_ipv4 ipv4;
732         struct rte_flow_item_ipv4 ipv4_mask;
733         struct rte_flow_item_ipv6 ipv6;
734         struct rte_flow_item_ipv6 ipv6_mask;
735         struct field_modify_info *field;
736
737         if (!attr->valid)
738                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
739         if (attr->ipv4) {
740                 memset(&ipv4, 0, sizeof(ipv4));
741                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
742                 ipv4.hdr.time_to_live = conf->ttl_value;
743                 ipv4_mask.hdr.time_to_live = 0xFF;
744                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
745                 item.spec = &ipv4;
746                 item.mask = &ipv4_mask;
747                 field = modify_ipv4;
748         } else {
749                 MLX5_ASSERT(attr->ipv6);
750                 memset(&ipv6, 0, sizeof(ipv6));
751                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
752                 ipv6.hdr.hop_limits = conf->ttl_value;
753                 ipv6_mask.hdr.hop_limits = 0xFF;
754                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
755                 item.spec = &ipv6;
756                 item.mask = &ipv6_mask;
757                 field = modify_ipv6;
758         }
759         return flow_dv_convert_modify_action(&item, field, NULL, resource,
760                                              MLX5_MODIFICATION_TYPE_SET, error);
761 }
762
763 /**
764  * Convert modify-header decrement TTL action to DV specification.
765  *
766  * @param[in,out] resource
767  *   Pointer to the modify-header resource.
768  * @param[in] action
769  *   Pointer to action specification.
770  * @param[in] items
771  *   Pointer to rte_flow_item objects list.
772  * @param[in] attr
773  *   Pointer to flow attributes structure.
774  * @param[in] dev_flow
775  *   Pointer to the sub flow.
776  * @param[in] tunnel_decap
777  *   Whether action is after tunnel decapsulation.
778  * @param[out] error
779  *   Pointer to the error structure.
780  *
781  * @return
782  *   0 on success, a negative errno value otherwise and rte_errno is set.
783  */
784 static int
785 flow_dv_convert_action_modify_dec_ttl
786                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
787                          const struct rte_flow_item *items,
788                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
789                          bool tunnel_decap, struct rte_flow_error *error)
790 {
791         struct rte_flow_item item;
792         struct rte_flow_item_ipv4 ipv4;
793         struct rte_flow_item_ipv4 ipv4_mask;
794         struct rte_flow_item_ipv6 ipv6;
795         struct rte_flow_item_ipv6 ipv6_mask;
796         struct field_modify_info *field;
797
798         if (!attr->valid)
799                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
800         if (attr->ipv4) {
801                 memset(&ipv4, 0, sizeof(ipv4));
802                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
803                 ipv4.hdr.time_to_live = 0xFF;
804                 ipv4_mask.hdr.time_to_live = 0xFF;
805                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
806                 item.spec = &ipv4;
807                 item.mask = &ipv4_mask;
808                 field = modify_ipv4;
809         } else {
810                 MLX5_ASSERT(attr->ipv6);
811                 memset(&ipv6, 0, sizeof(ipv6));
812                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
813                 ipv6.hdr.hop_limits = 0xFF;
814                 ipv6_mask.hdr.hop_limits = 0xFF;
815                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
816                 item.spec = &ipv6;
817                 item.mask = &ipv6_mask;
818                 field = modify_ipv6;
819         }
820         return flow_dv_convert_modify_action(&item, field, NULL, resource,
821                                              MLX5_MODIFICATION_TYPE_ADD, error);
822 }
823
824 /**
825  * Convert modify-header increment/decrement TCP Sequence number
826  * to DV specification.
827  *
828  * @param[in,out] resource
829  *   Pointer to the modify-header resource.
830  * @param[in] action
831  *   Pointer to action specification.
832  * @param[out] error
833  *   Pointer to the error structure.
834  *
835  * @return
836  *   0 on success, a negative errno value otherwise and rte_errno is set.
837  */
838 static int
839 flow_dv_convert_action_modify_tcp_seq
840                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
841                          const struct rte_flow_action *action,
842                          struct rte_flow_error *error)
843 {
844         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
845         uint64_t value = rte_be_to_cpu_32(*conf);
846         struct rte_flow_item item;
847         struct rte_flow_item_tcp tcp;
848         struct rte_flow_item_tcp tcp_mask;
849
850         memset(&tcp, 0, sizeof(tcp));
851         memset(&tcp_mask, 0, sizeof(tcp_mask));
852         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
853                 /*
854                  * The HW has no decrement operation, only increment operation.
855                  * To simulate decrement X from Y using increment operation
856                  * we need to add UINT32_MAX X times to Y.
857                  * Each adding of UINT32_MAX decrements Y by 1.
858                  */
859                 value *= UINT32_MAX;
860         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
861         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
862         item.type = RTE_FLOW_ITEM_TYPE_TCP;
863         item.spec = &tcp;
864         item.mask = &tcp_mask;
865         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
866                                              MLX5_MODIFICATION_TYPE_ADD, error);
867 }
868
869 /**
870  * Convert modify-header increment/decrement TCP Acknowledgment number
871  * to DV specification.
872  *
873  * @param[in,out] resource
874  *   Pointer to the modify-header resource.
875  * @param[in] action
876  *   Pointer to action specification.
877  * @param[out] error
878  *   Pointer to the error structure.
879  *
880  * @return
881  *   0 on success, a negative errno value otherwise and rte_errno is set.
882  */
883 static int
884 flow_dv_convert_action_modify_tcp_ack
885                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
886                          const struct rte_flow_action *action,
887                          struct rte_flow_error *error)
888 {
889         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
890         uint64_t value = rte_be_to_cpu_32(*conf);
891         struct rte_flow_item item;
892         struct rte_flow_item_tcp tcp;
893         struct rte_flow_item_tcp tcp_mask;
894
895         memset(&tcp, 0, sizeof(tcp));
896         memset(&tcp_mask, 0, sizeof(tcp_mask));
897         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
898                 /*
899                  * The HW has no decrement operation, only increment operation.
900                  * To simulate decrement X from Y using increment operation
901                  * we need to add UINT32_MAX X times to Y.
902                  * Each adding of UINT32_MAX decrements Y by 1.
903                  */
904                 value *= UINT32_MAX;
905         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
906         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
907         item.type = RTE_FLOW_ITEM_TYPE_TCP;
908         item.spec = &tcp;
909         item.mask = &tcp_mask;
910         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
911                                              MLX5_MODIFICATION_TYPE_ADD, error);
912 }
913
914 static enum mlx5_modification_field reg_to_field[] = {
915         [REG_NON] = MLX5_MODI_OUT_NONE,
916         [REG_A] = MLX5_MODI_META_DATA_REG_A,
917         [REG_B] = MLX5_MODI_META_DATA_REG_B,
918         [REG_C_0] = MLX5_MODI_META_REG_C_0,
919         [REG_C_1] = MLX5_MODI_META_REG_C_1,
920         [REG_C_2] = MLX5_MODI_META_REG_C_2,
921         [REG_C_3] = MLX5_MODI_META_REG_C_3,
922         [REG_C_4] = MLX5_MODI_META_REG_C_4,
923         [REG_C_5] = MLX5_MODI_META_REG_C_5,
924         [REG_C_6] = MLX5_MODI_META_REG_C_6,
925         [REG_C_7] = MLX5_MODI_META_REG_C_7,
926 };
927
928 /**
929  * Convert register set to DV specification.
930  *
931  * @param[in,out] resource
932  *   Pointer to the modify-header resource.
933  * @param[in] action
934  *   Pointer to action specification.
935  * @param[out] error
936  *   Pointer to the error structure.
937  *
938  * @return
939  *   0 on success, a negative errno value otherwise and rte_errno is set.
940  */
941 static int
942 flow_dv_convert_action_set_reg
943                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
944                          const struct rte_flow_action *action,
945                          struct rte_flow_error *error)
946 {
947         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
948         struct mlx5_modification_cmd *actions = resource->actions;
949         uint32_t i = resource->actions_num;
950
951         if (i >= MLX5_MAX_MODIFY_NUM)
952                 return rte_flow_error_set(error, EINVAL,
953                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
954                                           "too many items to modify");
955         MLX5_ASSERT(conf->id != REG_NON);
956         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
957         actions[i] = (struct mlx5_modification_cmd) {
958                 .action_type = MLX5_MODIFICATION_TYPE_SET,
959                 .field = reg_to_field[conf->id],
960         };
961         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
962         actions[i].data1 = rte_cpu_to_be_32(conf->data);
963         ++i;
964         resource->actions_num = i;
965         return 0;
966 }
967
968 /**
969  * Convert SET_TAG action to DV specification.
970  *
971  * @param[in] dev
972  *   Pointer to the rte_eth_dev structure.
973  * @param[in,out] resource
974  *   Pointer to the modify-header resource.
975  * @param[in] conf
976  *   Pointer to action specification.
977  * @param[out] error
978  *   Pointer to the error structure.
979  *
980  * @return
981  *   0 on success, a negative errno value otherwise and rte_errno is set.
982  */
983 static int
984 flow_dv_convert_action_set_tag
985                         (struct rte_eth_dev *dev,
986                          struct mlx5_flow_dv_modify_hdr_resource *resource,
987                          const struct rte_flow_action_set_tag *conf,
988                          struct rte_flow_error *error)
989 {
990         rte_be32_t data = rte_cpu_to_be_32(conf->data);
991         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
992         struct rte_flow_item item = {
993                 .spec = &data,
994                 .mask = &mask,
995         };
996         struct field_modify_info reg_c_x[] = {
997                 [1] = {0, 0, 0},
998         };
999         enum mlx5_modification_field reg_type;
1000         int ret;
1001
1002         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1003         if (ret < 0)
1004                 return ret;
1005         MLX5_ASSERT(ret != REG_NON);
1006         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1007         reg_type = reg_to_field[ret];
1008         MLX5_ASSERT(reg_type > 0);
1009         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1010         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1011                                              MLX5_MODIFICATION_TYPE_SET, error);
1012 }
1013
1014 /**
1015  * Convert internal COPY_REG action to DV specification.
1016  *
1017  * @param[in] dev
1018  *   Pointer to the rte_eth_dev structure.
1019  * @param[in,out] res
1020  *   Pointer to the modify-header resource.
1021  * @param[in] action
1022  *   Pointer to action specification.
1023  * @param[out] error
1024  *   Pointer to the error structure.
1025  *
1026  * @return
1027  *   0 on success, a negative errno value otherwise and rte_errno is set.
1028  */
1029 static int
1030 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1031                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1032                                  const struct rte_flow_action *action,
1033                                  struct rte_flow_error *error)
1034 {
1035         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1036         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1037         struct rte_flow_item item = {
1038                 .spec = NULL,
1039                 .mask = &mask,
1040         };
1041         struct field_modify_info reg_src[] = {
1042                 {4, 0, reg_to_field[conf->src]},
1043                 {0, 0, 0},
1044         };
1045         struct field_modify_info reg_dst = {
1046                 .offset = 0,
1047                 .id = reg_to_field[conf->dst],
1048         };
1049         /* Adjust reg_c[0] usage according to reported mask. */
1050         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1051                 struct mlx5_priv *priv = dev->data->dev_private;
1052                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1053
1054                 MLX5_ASSERT(reg_c0);
1055                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1056                 if (conf->dst == REG_C_0) {
1057                         /* Copy to reg_c[0], within mask only. */
1058                         reg_dst.offset = rte_bsf32(reg_c0);
1059                         /*
1060                          * Mask is ignoring the enianness, because
1061                          * there is no conversion in datapath.
1062                          */
1063 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1064                         /* Copy from destination lower bits to reg_c[0]. */
1065                         mask = reg_c0 >> reg_dst.offset;
1066 #else
1067                         /* Copy from destination upper bits to reg_c[0]. */
1068                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1069                                           rte_fls_u32(reg_c0));
1070 #endif
1071                 } else {
1072                         mask = rte_cpu_to_be_32(reg_c0);
1073 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1074                         /* Copy from reg_c[0] to destination lower bits. */
1075                         reg_dst.offset = 0;
1076 #else
1077                         /* Copy from reg_c[0] to destination upper bits. */
1078                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1079                                          (rte_fls_u32(reg_c0) -
1080                                           rte_bsf32(reg_c0));
1081 #endif
1082                 }
1083         }
1084         return flow_dv_convert_modify_action(&item,
1085                                              reg_src, &reg_dst, res,
1086                                              MLX5_MODIFICATION_TYPE_COPY,
1087                                              error);
1088 }
1089
1090 /**
1091  * Convert MARK action to DV specification. This routine is used
1092  * in extensive metadata only and requires metadata register to be
1093  * handled. In legacy mode hardware tag resource is engaged.
1094  *
1095  * @param[in] dev
1096  *   Pointer to the rte_eth_dev structure.
1097  * @param[in] conf
1098  *   Pointer to MARK action specification.
1099  * @param[in,out] resource
1100  *   Pointer to the modify-header resource.
1101  * @param[out] error
1102  *   Pointer to the error structure.
1103  *
1104  * @return
1105  *   0 on success, a negative errno value otherwise and rte_errno is set.
1106  */
1107 static int
1108 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1109                             const struct rte_flow_action_mark *conf,
1110                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1111                             struct rte_flow_error *error)
1112 {
1113         struct mlx5_priv *priv = dev->data->dev_private;
1114         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1115                                            priv->sh->dv_mark_mask);
1116         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1117         struct rte_flow_item item = {
1118                 .spec = &data,
1119                 .mask = &mask,
1120         };
1121         struct field_modify_info reg_c_x[] = {
1122                 [1] = {0, 0, 0},
1123         };
1124         int reg;
1125
1126         if (!mask)
1127                 return rte_flow_error_set(error, EINVAL,
1128                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1129                                           NULL, "zero mark action mask");
1130         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1131         if (reg < 0)
1132                 return reg;
1133         MLX5_ASSERT(reg > 0);
1134         if (reg == REG_C_0) {
1135                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1136                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1137
1138                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1139                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1140                 mask = rte_cpu_to_be_32(mask << shl_c0);
1141         }
1142         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1143         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1144                                              MLX5_MODIFICATION_TYPE_SET, error);
1145 }
1146
1147 /**
1148  * Get metadata register index for specified steering domain.
1149  *
1150  * @param[in] dev
1151  *   Pointer to the rte_eth_dev structure.
1152  * @param[in] attr
1153  *   Attributes of flow to determine steering domain.
1154  * @param[out] error
1155  *   Pointer to the error structure.
1156  *
1157  * @return
1158  *   positive index on success, a negative errno value otherwise
1159  *   and rte_errno is set.
1160  */
1161 static enum modify_reg
1162 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1163                          const struct rte_flow_attr *attr,
1164                          struct rte_flow_error *error)
1165 {
1166         int reg =
1167                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1168                                           MLX5_METADATA_FDB :
1169                                             attr->egress ?
1170                                             MLX5_METADATA_TX :
1171                                             MLX5_METADATA_RX, 0, error);
1172         if (reg < 0)
1173                 return rte_flow_error_set(error,
1174                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1175                                           NULL, "unavailable "
1176                                           "metadata register");
1177         return reg;
1178 }
1179
1180 /**
1181  * Convert SET_META action to DV specification.
1182  *
1183  * @param[in] dev
1184  *   Pointer to the rte_eth_dev structure.
1185  * @param[in,out] resource
1186  *   Pointer to the modify-header resource.
1187  * @param[in] attr
1188  *   Attributes of flow that includes this item.
1189  * @param[in] conf
1190  *   Pointer to action specification.
1191  * @param[out] error
1192  *   Pointer to the error structure.
1193  *
1194  * @return
1195  *   0 on success, a negative errno value otherwise and rte_errno is set.
1196  */
1197 static int
1198 flow_dv_convert_action_set_meta
1199                         (struct rte_eth_dev *dev,
1200                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1201                          const struct rte_flow_attr *attr,
1202                          const struct rte_flow_action_set_meta *conf,
1203                          struct rte_flow_error *error)
1204 {
1205         uint32_t data = conf->data;
1206         uint32_t mask = conf->mask;
1207         struct rte_flow_item item = {
1208                 .spec = &data,
1209                 .mask = &mask,
1210         };
1211         struct field_modify_info reg_c_x[] = {
1212                 [1] = {0, 0, 0},
1213         };
1214         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1215
1216         if (reg < 0)
1217                 return reg;
1218         /*
1219          * In datapath code there is no endianness
1220          * coversions for perfromance reasons, all
1221          * pattern conversions are done in rte_flow.
1222          */
1223         if (reg == REG_C_0) {
1224                 struct mlx5_priv *priv = dev->data->dev_private;
1225                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1226                 uint32_t shl_c0;
1227
1228                 MLX5_ASSERT(msk_c0);
1229 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1230                 shl_c0 = rte_bsf32(msk_c0);
1231 #else
1232                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1233 #endif
1234                 mask <<= shl_c0;
1235                 data <<= shl_c0;
1236                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1237         }
1238         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1239         /* The routine expects parameters in memory as big-endian ones. */
1240         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1241                                              MLX5_MODIFICATION_TYPE_SET, error);
1242 }
1243
1244 /**
1245  * Convert modify-header set IPv4 DSCP action to DV specification.
1246  *
1247  * @param[in,out] resource
1248  *   Pointer to the modify-header resource.
1249  * @param[in] action
1250  *   Pointer to action specification.
1251  * @param[out] error
1252  *   Pointer to the error structure.
1253  *
1254  * @return
1255  *   0 on success, a negative errno value otherwise and rte_errno is set.
1256  */
1257 static int
1258 flow_dv_convert_action_modify_ipv4_dscp
1259                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1260                          const struct rte_flow_action *action,
1261                          struct rte_flow_error *error)
1262 {
1263         const struct rte_flow_action_set_dscp *conf =
1264                 (const struct rte_flow_action_set_dscp *)(action->conf);
1265         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1266         struct rte_flow_item_ipv4 ipv4;
1267         struct rte_flow_item_ipv4 ipv4_mask;
1268
1269         memset(&ipv4, 0, sizeof(ipv4));
1270         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1271         ipv4.hdr.type_of_service = conf->dscp;
1272         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1273         item.spec = &ipv4;
1274         item.mask = &ipv4_mask;
1275         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1276                                              MLX5_MODIFICATION_TYPE_SET, error);
1277 }
1278
1279 /**
1280  * Convert modify-header set IPv6 DSCP action to DV specification.
1281  *
1282  * @param[in,out] resource
1283  *   Pointer to the modify-header resource.
1284  * @param[in] action
1285  *   Pointer to action specification.
1286  * @param[out] error
1287  *   Pointer to the error structure.
1288  *
1289  * @return
1290  *   0 on success, a negative errno value otherwise and rte_errno is set.
1291  */
1292 static int
1293 flow_dv_convert_action_modify_ipv6_dscp
1294                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1295                          const struct rte_flow_action *action,
1296                          struct rte_flow_error *error)
1297 {
1298         const struct rte_flow_action_set_dscp *conf =
1299                 (const struct rte_flow_action_set_dscp *)(action->conf);
1300         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1301         struct rte_flow_item_ipv6 ipv6;
1302         struct rte_flow_item_ipv6 ipv6_mask;
1303
1304         memset(&ipv6, 0, sizeof(ipv6));
1305         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1306         /*
1307          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1308          * rdma-core only accept the DSCP bits byte aligned start from
1309          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1310          * bits in IPv6 case as rdma-core requires byte aligned value.
1311          */
1312         ipv6.hdr.vtc_flow = conf->dscp;
1313         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1314         item.spec = &ipv6;
1315         item.mask = &ipv6_mask;
1316         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1317                                              MLX5_MODIFICATION_TYPE_SET, error);
1318 }
1319
1320 /**
1321  * Validate MARK item.
1322  *
1323  * @param[in] dev
1324  *   Pointer to the rte_eth_dev structure.
1325  * @param[in] item
1326  *   Item specification.
1327  * @param[in] attr
1328  *   Attributes of flow that includes this item.
1329  * @param[out] error
1330  *   Pointer to error structure.
1331  *
1332  * @return
1333  *   0 on success, a negative errno value otherwise and rte_errno is set.
1334  */
1335 static int
1336 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1337                            const struct rte_flow_item *item,
1338                            const struct rte_flow_attr *attr __rte_unused,
1339                            struct rte_flow_error *error)
1340 {
1341         struct mlx5_priv *priv = dev->data->dev_private;
1342         struct mlx5_dev_config *config = &priv->config;
1343         const struct rte_flow_item_mark *spec = item->spec;
1344         const struct rte_flow_item_mark *mask = item->mask;
1345         const struct rte_flow_item_mark nic_mask = {
1346                 .id = priv->sh->dv_mark_mask,
1347         };
1348         int ret;
1349
1350         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1351                 return rte_flow_error_set(error, ENOTSUP,
1352                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1353                                           "extended metadata feature"
1354                                           " isn't enabled");
1355         if (!mlx5_flow_ext_mreg_supported(dev))
1356                 return rte_flow_error_set(error, ENOTSUP,
1357                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1358                                           "extended metadata register"
1359                                           " isn't supported");
1360         if (!nic_mask.id)
1361                 return rte_flow_error_set(error, ENOTSUP,
1362                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1363                                           "extended metadata register"
1364                                           " isn't available");
1365         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1366         if (ret < 0)
1367                 return ret;
1368         if (!spec)
1369                 return rte_flow_error_set(error, EINVAL,
1370                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1371                                           item->spec,
1372                                           "data cannot be empty");
1373         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1374                 return rte_flow_error_set(error, EINVAL,
1375                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1376                                           &spec->id,
1377                                           "mark id exceeds the limit");
1378         if (!mask)
1379                 mask = &nic_mask;
1380         if (!mask->id)
1381                 return rte_flow_error_set(error, EINVAL,
1382                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1383                                         "mask cannot be zero");
1384
1385         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1386                                         (const uint8_t *)&nic_mask,
1387                                         sizeof(struct rte_flow_item_mark),
1388                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1389         if (ret < 0)
1390                 return ret;
1391         return 0;
1392 }
1393
1394 /**
1395  * Validate META item.
1396  *
1397  * @param[in] dev
1398  *   Pointer to the rte_eth_dev structure.
1399  * @param[in] item
1400  *   Item specification.
1401  * @param[in] attr
1402  *   Attributes of flow that includes this item.
1403  * @param[out] error
1404  *   Pointer to error structure.
1405  *
1406  * @return
1407  *   0 on success, a negative errno value otherwise and rte_errno is set.
1408  */
1409 static int
1410 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1411                            const struct rte_flow_item *item,
1412                            const struct rte_flow_attr *attr,
1413                            struct rte_flow_error *error)
1414 {
1415         struct mlx5_priv *priv = dev->data->dev_private;
1416         struct mlx5_dev_config *config = &priv->config;
1417         const struct rte_flow_item_meta *spec = item->spec;
1418         const struct rte_flow_item_meta *mask = item->mask;
1419         struct rte_flow_item_meta nic_mask = {
1420                 .data = UINT32_MAX
1421         };
1422         int reg;
1423         int ret;
1424
1425         if (!spec)
1426                 return rte_flow_error_set(error, EINVAL,
1427                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1428                                           item->spec,
1429                                           "data cannot be empty");
1430         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1431                 if (!mlx5_flow_ext_mreg_supported(dev))
1432                         return rte_flow_error_set(error, ENOTSUP,
1433                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1434                                           "extended metadata register"
1435                                           " isn't supported");
1436                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1437                 if (reg < 0)
1438                         return reg;
1439                 if (reg == REG_B)
1440                         return rte_flow_error_set(error, ENOTSUP,
1441                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1442                                           "match on reg_b "
1443                                           "isn't supported");
1444                 if (reg != REG_A)
1445                         nic_mask.data = priv->sh->dv_meta_mask;
1446         } else if (attr->transfer) {
1447                 return rte_flow_error_set(error, ENOTSUP,
1448                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1449                                         "extended metadata feature "
1450                                         "should be enabled when "
1451                                         "meta item is requested "
1452                                         "with e-switch mode ");
1453         }
1454         if (!mask)
1455                 mask = &rte_flow_item_meta_mask;
1456         if (!mask->data)
1457                 return rte_flow_error_set(error, EINVAL,
1458                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1459                                         "mask cannot be zero");
1460
1461         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1462                                         (const uint8_t *)&nic_mask,
1463                                         sizeof(struct rte_flow_item_meta),
1464                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1465         return ret;
1466 }
1467
1468 /**
1469  * Validate TAG item.
1470  *
1471  * @param[in] dev
1472  *   Pointer to the rte_eth_dev structure.
1473  * @param[in] item
1474  *   Item specification.
1475  * @param[in] attr
1476  *   Attributes of flow that includes this item.
1477  * @param[out] error
1478  *   Pointer to error structure.
1479  *
1480  * @return
1481  *   0 on success, a negative errno value otherwise and rte_errno is set.
1482  */
1483 static int
1484 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1485                           const struct rte_flow_item *item,
1486                           const struct rte_flow_attr *attr __rte_unused,
1487                           struct rte_flow_error *error)
1488 {
1489         const struct rte_flow_item_tag *spec = item->spec;
1490         const struct rte_flow_item_tag *mask = item->mask;
1491         const struct rte_flow_item_tag nic_mask = {
1492                 .data = RTE_BE32(UINT32_MAX),
1493                 .index = 0xff,
1494         };
1495         int ret;
1496
1497         if (!mlx5_flow_ext_mreg_supported(dev))
1498                 return rte_flow_error_set(error, ENOTSUP,
1499                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1500                                           "extensive metadata register"
1501                                           " isn't supported");
1502         if (!spec)
1503                 return rte_flow_error_set(error, EINVAL,
1504                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1505                                           item->spec,
1506                                           "data cannot be empty");
1507         if (!mask)
1508                 mask = &rte_flow_item_tag_mask;
1509         if (!mask->data)
1510                 return rte_flow_error_set(error, EINVAL,
1511                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1512                                         "mask cannot be zero");
1513
1514         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1515                                         (const uint8_t *)&nic_mask,
1516                                         sizeof(struct rte_flow_item_tag),
1517                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1518         if (ret < 0)
1519                 return ret;
1520         if (mask->index != 0xff)
1521                 return rte_flow_error_set(error, EINVAL,
1522                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1523                                           "partial mask for tag index"
1524                                           " is not supported");
1525         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1526         if (ret < 0)
1527                 return ret;
1528         MLX5_ASSERT(ret != REG_NON);
1529         return 0;
1530 }
1531
1532 /**
1533  * Validate vport item.
1534  *
1535  * @param[in] dev
1536  *   Pointer to the rte_eth_dev structure.
1537  * @param[in] item
1538  *   Item specification.
1539  * @param[in] attr
1540  *   Attributes of flow that includes this item.
1541  * @param[in] item_flags
1542  *   Bit-fields that holds the items detected until now.
1543  * @param[out] error
1544  *   Pointer to error structure.
1545  *
1546  * @return
1547  *   0 on success, a negative errno value otherwise and rte_errno is set.
1548  */
1549 static int
1550 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1551                               const struct rte_flow_item *item,
1552                               const struct rte_flow_attr *attr,
1553                               uint64_t item_flags,
1554                               struct rte_flow_error *error)
1555 {
1556         const struct rte_flow_item_port_id *spec = item->spec;
1557         const struct rte_flow_item_port_id *mask = item->mask;
1558         const struct rte_flow_item_port_id switch_mask = {
1559                         .id = 0xffffffff,
1560         };
1561         struct mlx5_priv *esw_priv;
1562         struct mlx5_priv *dev_priv;
1563         int ret;
1564
1565         if (!attr->transfer)
1566                 return rte_flow_error_set(error, EINVAL,
1567                                           RTE_FLOW_ERROR_TYPE_ITEM,
1568                                           NULL,
1569                                           "match on port id is valid only"
1570                                           " when transfer flag is enabled");
1571         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1572                 return rte_flow_error_set(error, ENOTSUP,
1573                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1574                                           "multiple source ports are not"
1575                                           " supported");
1576         if (!mask)
1577                 mask = &switch_mask;
1578         if (mask->id != 0xffffffff)
1579                 return rte_flow_error_set(error, ENOTSUP,
1580                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1581                                            mask,
1582                                            "no support for partial mask on"
1583                                            " \"id\" field");
1584         ret = mlx5_flow_item_acceptable
1585                                 (item, (const uint8_t *)mask,
1586                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1587                                  sizeof(struct rte_flow_item_port_id),
1588                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1589         if (ret)
1590                 return ret;
1591         if (!spec)
1592                 return 0;
1593         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1594         if (!esw_priv)
1595                 return rte_flow_error_set(error, rte_errno,
1596                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1597                                           "failed to obtain E-Switch info for"
1598                                           " port");
1599         dev_priv = mlx5_dev_to_eswitch_info(dev);
1600         if (!dev_priv)
1601                 return rte_flow_error_set(error, rte_errno,
1602                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1603                                           NULL,
1604                                           "failed to obtain E-Switch info");
1605         if (esw_priv->domain_id != dev_priv->domain_id)
1606                 return rte_flow_error_set(error, EINVAL,
1607                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1608                                           "cannot match on a port from a"
1609                                           " different E-Switch");
1610         return 0;
1611 }
1612
1613 /**
1614  * Validate VLAN item.
1615  *
1616  * @param[in] item
1617  *   Item specification.
1618  * @param[in] item_flags
1619  *   Bit-fields that holds the items detected until now.
1620  * @param[in] dev
1621  *   Ethernet device flow is being created on.
1622  * @param[out] error
1623  *   Pointer to error structure.
1624  *
1625  * @return
1626  *   0 on success, a negative errno value otherwise and rte_errno is set.
1627  */
1628 static int
1629 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1630                            uint64_t item_flags,
1631                            struct rte_eth_dev *dev,
1632                            struct rte_flow_error *error)
1633 {
1634         const struct rte_flow_item_vlan *mask = item->mask;
1635         const struct rte_flow_item_vlan nic_mask = {
1636                 .tci = RTE_BE16(UINT16_MAX),
1637                 .inner_type = RTE_BE16(UINT16_MAX),
1638                 .has_more_vlan = 1,
1639         };
1640         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1641         int ret;
1642         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1643                                         MLX5_FLOW_LAYER_INNER_L4) :
1644                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1645                                         MLX5_FLOW_LAYER_OUTER_L4);
1646         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1647                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1648
1649         if (item_flags & vlanm)
1650                 return rte_flow_error_set(error, EINVAL,
1651                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1652                                           "multiple VLAN layers not supported");
1653         else if ((item_flags & l34m) != 0)
1654                 return rte_flow_error_set(error, EINVAL,
1655                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1656                                           "VLAN cannot follow L3/L4 layer");
1657         if (!mask)
1658                 mask = &rte_flow_item_vlan_mask;
1659         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1660                                         (const uint8_t *)&nic_mask,
1661                                         sizeof(struct rte_flow_item_vlan),
1662                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1663         if (ret)
1664                 return ret;
1665         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1666                 struct mlx5_priv *priv = dev->data->dev_private;
1667
1668                 if (priv->vmwa_context) {
1669                         /*
1670                          * Non-NULL context means we have a virtual machine
1671                          * and SR-IOV enabled, we have to create VLAN interface
1672                          * to make hypervisor to setup E-Switch vport
1673                          * context correctly. We avoid creating the multiple
1674                          * VLAN interfaces, so we cannot support VLAN tag mask.
1675                          */
1676                         return rte_flow_error_set(error, EINVAL,
1677                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1678                                                   item,
1679                                                   "VLAN tag mask is not"
1680                                                   " supported in virtual"
1681                                                   " environment");
1682                 }
1683         }
1684         return 0;
1685 }
1686
1687 /*
1688  * GTP flags are contained in 1 byte of the format:
1689  * -------------------------------------------
1690  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1691  * |-----------------------------------------|
1692  * | value | Version | PT | Res | E | S | PN |
1693  * -------------------------------------------
1694  *
1695  * Matching is supported only for GTP flags E, S, PN.
1696  */
1697 #define MLX5_GTP_FLAGS_MASK     0x07
1698
1699 /**
1700  * Validate GTP item.
1701  *
1702  * @param[in] dev
1703  *   Pointer to the rte_eth_dev structure.
1704  * @param[in] item
1705  *   Item specification.
1706  * @param[in] item_flags
1707  *   Bit-fields that holds the items detected until now.
1708  * @param[out] error
1709  *   Pointer to error structure.
1710  *
1711  * @return
1712  *   0 on success, a negative errno value otherwise and rte_errno is set.
1713  */
1714 static int
1715 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1716                           const struct rte_flow_item *item,
1717                           uint64_t item_flags,
1718                           struct rte_flow_error *error)
1719 {
1720         struct mlx5_priv *priv = dev->data->dev_private;
1721         const struct rte_flow_item_gtp *spec = item->spec;
1722         const struct rte_flow_item_gtp *mask = item->mask;
1723         const struct rte_flow_item_gtp nic_mask = {
1724                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1725                 .msg_type = 0xff,
1726                 .teid = RTE_BE32(0xffffffff),
1727         };
1728
1729         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1730                 return rte_flow_error_set(error, ENOTSUP,
1731                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1732                                           "GTP support is not enabled");
1733         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1734                 return rte_flow_error_set(error, ENOTSUP,
1735                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1736                                           "multiple tunnel layers not"
1737                                           " supported");
1738         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1739                 return rte_flow_error_set(error, EINVAL,
1740                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1741                                           "no outer UDP layer found");
1742         if (!mask)
1743                 mask = &rte_flow_item_gtp_mask;
1744         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1745                 return rte_flow_error_set(error, ENOTSUP,
1746                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1747                                           "Match is supported for GTP"
1748                                           " flags only");
1749         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1750                                          (const uint8_t *)&nic_mask,
1751                                          sizeof(struct rte_flow_item_gtp),
1752                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1753 }
1754
1755 /**
1756  * Validate IPV4 item.
1757  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1758  * add specific validation of fragment_offset field,
1759  *
1760  * @param[in] item
1761  *   Item specification.
1762  * @param[in] item_flags
1763  *   Bit-fields that holds the items detected until now.
1764  * @param[out] error
1765  *   Pointer to error structure.
1766  *
1767  * @return
1768  *   0 on success, a negative errno value otherwise and rte_errno is set.
1769  */
1770 static int
1771 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1772                            uint64_t item_flags,
1773                            uint64_t last_item,
1774                            uint16_t ether_type,
1775                            struct rte_flow_error *error)
1776 {
1777         int ret;
1778         const struct rte_flow_item_ipv4 *spec = item->spec;
1779         const struct rte_flow_item_ipv4 *last = item->last;
1780         const struct rte_flow_item_ipv4 *mask = item->mask;
1781         rte_be16_t fragment_offset_spec = 0;
1782         rte_be16_t fragment_offset_last = 0;
1783         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1784                 .hdr = {
1785                         .src_addr = RTE_BE32(0xffffffff),
1786                         .dst_addr = RTE_BE32(0xffffffff),
1787                         .type_of_service = 0xff,
1788                         .fragment_offset = RTE_BE16(0xffff),
1789                         .next_proto_id = 0xff,
1790                         .time_to_live = 0xff,
1791                 },
1792         };
1793
1794         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1795                                            ether_type, &nic_ipv4_mask,
1796                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1797         if (ret < 0)
1798                 return ret;
1799         if (spec && mask)
1800                 fragment_offset_spec = spec->hdr.fragment_offset &
1801                                        mask->hdr.fragment_offset;
1802         if (!fragment_offset_spec)
1803                 return 0;
1804         /*
1805          * spec and mask are valid, enforce using full mask to make sure the
1806          * complete value is used correctly.
1807          */
1808         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1809                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1810                 return rte_flow_error_set(error, EINVAL,
1811                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1812                                           item, "must use full mask for"
1813                                           " fragment_offset");
1814         /*
1815          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1816          * indicating this is 1st fragment of fragmented packet.
1817          * This is not yet supported in MLX5, return appropriate error message.
1818          */
1819         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1820                 return rte_flow_error_set(error, ENOTSUP,
1821                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1822                                           "match on first fragment not "
1823                                           "supported");
1824         if (fragment_offset_spec && !last)
1825                 return rte_flow_error_set(error, ENOTSUP,
1826                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1827                                           "specified value not supported");
1828         /* spec and last are valid, validate the specified range. */
1829         fragment_offset_last = last->hdr.fragment_offset &
1830                                mask->hdr.fragment_offset;
1831         /*
1832          * Match on fragment_offset spec 0x2001 and last 0x3fff
1833          * means MF is 1 and frag-offset is > 0.
1834          * This packet is fragment 2nd and onward, excluding last.
1835          * This is not yet supported in MLX5, return appropriate
1836          * error message.
1837          */
1838         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1839             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1840                 return rte_flow_error_set(error, ENOTSUP,
1841                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1842                                           last, "match on following "
1843                                           "fragments not supported");
1844         /*
1845          * Match on fragment_offset spec 0x0001 and last 0x1fff
1846          * means MF is 0 and frag-offset is > 0.
1847          * This packet is last fragment of fragmented packet.
1848          * This is not yet supported in MLX5, return appropriate
1849          * error message.
1850          */
1851         if (fragment_offset_spec == RTE_BE16(1) &&
1852             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1853                 return rte_flow_error_set(error, ENOTSUP,
1854                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1855                                           last, "match on last "
1856                                           "fragment not supported");
1857         /*
1858          * Match on fragment_offset spec 0x0001 and last 0x3fff
1859          * means MF and/or frag-offset is not 0.
1860          * This is a fragmented packet.
1861          * Other range values are invalid and rejected.
1862          */
1863         if (!(fragment_offset_spec == RTE_BE16(1) &&
1864               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1865                 return rte_flow_error_set(error, ENOTSUP,
1866                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1867                                           "specified range not supported");
1868         return 0;
1869 }
1870
1871 /**
1872  * Validate IPV6 fragment extension item.
1873  *
1874  * @param[in] item
1875  *   Item specification.
1876  * @param[in] item_flags
1877  *   Bit-fields that holds the items detected until now.
1878  * @param[out] error
1879  *   Pointer to error structure.
1880  *
1881  * @return
1882  *   0 on success, a negative errno value otherwise and rte_errno is set.
1883  */
1884 static int
1885 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1886                                     uint64_t item_flags,
1887                                     struct rte_flow_error *error)
1888 {
1889         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1890         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1891         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1892         rte_be16_t frag_data_spec = 0;
1893         rte_be16_t frag_data_last = 0;
1894         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1895         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1896                                       MLX5_FLOW_LAYER_OUTER_L4;
1897         int ret = 0;
1898         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1899                 .hdr = {
1900                         .next_header = 0xff,
1901                         .frag_data = RTE_BE16(0xffff),
1902                 },
1903         };
1904
1905         if (item_flags & l4m)
1906                 return rte_flow_error_set(error, EINVAL,
1907                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1908                                           "ipv6 fragment extension item cannot "
1909                                           "follow L4 item.");
1910         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1911             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1912                 return rte_flow_error_set(error, EINVAL,
1913                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1914                                           "ipv6 fragment extension item must "
1915                                           "follow ipv6 item");
1916         if (spec && mask)
1917                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1918         if (!frag_data_spec)
1919                 return 0;
1920         /*
1921          * spec and mask are valid, enforce using full mask to make sure the
1922          * complete value is used correctly.
1923          */
1924         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1925                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1926                 return rte_flow_error_set(error, EINVAL,
1927                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1928                                           item, "must use full mask for"
1929                                           " frag_data");
1930         /*
1931          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1932          * This is 1st fragment of fragmented packet.
1933          */
1934         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1935                 return rte_flow_error_set(error, ENOTSUP,
1936                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1937                                           "match on first fragment not "
1938                                           "supported");
1939         if (frag_data_spec && !last)
1940                 return rte_flow_error_set(error, EINVAL,
1941                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1942                                           "specified value not supported");
1943         ret = mlx5_flow_item_acceptable
1944                                 (item, (const uint8_t *)mask,
1945                                  (const uint8_t *)&nic_mask,
1946                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
1947                                  MLX5_ITEM_RANGE_ACCEPTED, error);
1948         if (ret)
1949                 return ret;
1950         /* spec and last are valid, validate the specified range. */
1951         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1952         /*
1953          * Match on frag_data spec 0x0009 and last 0xfff9
1954          * means M is 1 and frag-offset is > 0.
1955          * This packet is fragment 2nd and onward, excluding last.
1956          * This is not yet supported in MLX5, return appropriate
1957          * error message.
1958          */
1959         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1960                                        RTE_IPV6_EHDR_MF_MASK) &&
1961             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1962                 return rte_flow_error_set(error, ENOTSUP,
1963                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1964                                           last, "match on following "
1965                                           "fragments not supported");
1966         /*
1967          * Match on frag_data spec 0x0008 and last 0xfff8
1968          * means M is 0 and frag-offset is > 0.
1969          * This packet is last fragment of fragmented packet.
1970          * This is not yet supported in MLX5, return appropriate
1971          * error message.
1972          */
1973         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
1974             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
1975                 return rte_flow_error_set(error, ENOTSUP,
1976                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1977                                           last, "match on last "
1978                                           "fragment not supported");
1979         /* Other range values are invalid and rejected. */
1980         return rte_flow_error_set(error, EINVAL,
1981                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1982                                   "specified range not supported");
1983 }
1984
1985 /**
1986  * Validate the pop VLAN action.
1987  *
1988  * @param[in] dev
1989  *   Pointer to the rte_eth_dev structure.
1990  * @param[in] action_flags
1991  *   Holds the actions detected until now.
1992  * @param[in] action
1993  *   Pointer to the pop vlan action.
1994  * @param[in] item_flags
1995  *   The items found in this flow rule.
1996  * @param[in] attr
1997  *   Pointer to flow attributes.
1998  * @param[out] error
1999  *   Pointer to error structure.
2000  *
2001  * @return
2002  *   0 on success, a negative errno value otherwise and rte_errno is set.
2003  */
2004 static int
2005 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2006                                  uint64_t action_flags,
2007                                  const struct rte_flow_action *action,
2008                                  uint64_t item_flags,
2009                                  const struct rte_flow_attr *attr,
2010                                  struct rte_flow_error *error)
2011 {
2012         const struct mlx5_priv *priv = dev->data->dev_private;
2013
2014         (void)action;
2015         (void)attr;
2016         if (!priv->sh->pop_vlan_action)
2017                 return rte_flow_error_set(error, ENOTSUP,
2018                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2019                                           NULL,
2020                                           "pop vlan action is not supported");
2021         if (attr->egress)
2022                 return rte_flow_error_set(error, ENOTSUP,
2023                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2024                                           NULL,
2025                                           "pop vlan action not supported for "
2026                                           "egress");
2027         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2028                 return rte_flow_error_set(error, ENOTSUP,
2029                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2030                                           "no support for multiple VLAN "
2031                                           "actions");
2032         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2033         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2034             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2035                 return rte_flow_error_set(error, ENOTSUP,
2036                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2037                                           NULL,
2038                                           "cannot pop vlan after decap without "
2039                                           "match on inner vlan in the flow");
2040         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2041         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2042             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2043                 return rte_flow_error_set(error, ENOTSUP,
2044                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2045                                           NULL,
2046                                           "cannot pop vlan without a "
2047                                           "match on (outer) vlan in the flow");
2048         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2049                 return rte_flow_error_set(error, EINVAL,
2050                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2051                                           "wrong action order, port_id should "
2052                                           "be after pop VLAN action");
2053         if (!attr->transfer && priv->representor)
2054                 return rte_flow_error_set(error, ENOTSUP,
2055                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2056                                           "pop vlan action for VF representor "
2057                                           "not supported on NIC table");
2058         return 0;
2059 }
2060
2061 /**
2062  * Get VLAN default info from vlan match info.
2063  *
2064  * @param[in] items
2065  *   the list of item specifications.
2066  * @param[out] vlan
2067  *   pointer VLAN info to fill to.
2068  *
2069  * @return
2070  *   0 on success, a negative errno value otherwise and rte_errno is set.
2071  */
2072 static void
2073 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2074                                   struct rte_vlan_hdr *vlan)
2075 {
2076         const struct rte_flow_item_vlan nic_mask = {
2077                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2078                                 MLX5DV_FLOW_VLAN_VID_MASK),
2079                 .inner_type = RTE_BE16(0xffff),
2080         };
2081
2082         if (items == NULL)
2083                 return;
2084         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2085                 int type = items->type;
2086
2087                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2088                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2089                         break;
2090         }
2091         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2092                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2093                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2094
2095                 /* If VLAN item in pattern doesn't contain data, return here. */
2096                 if (!vlan_v)
2097                         return;
2098                 if (!vlan_m)
2099                         vlan_m = &nic_mask;
2100                 /* Only full match values are accepted */
2101                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2102                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2103                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2104                         vlan->vlan_tci |=
2105                                 rte_be_to_cpu_16(vlan_v->tci &
2106                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2107                 }
2108                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2109                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2110                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2111                         vlan->vlan_tci |=
2112                                 rte_be_to_cpu_16(vlan_v->tci &
2113                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2114                 }
2115                 if (vlan_m->inner_type == nic_mask.inner_type)
2116                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2117                                                            vlan_m->inner_type);
2118         }
2119 }
2120
2121 /**
2122  * Validate the push VLAN action.
2123  *
2124  * @param[in] dev
2125  *   Pointer to the rte_eth_dev structure.
2126  * @param[in] action_flags
2127  *   Holds the actions detected until now.
2128  * @param[in] item_flags
2129  *   The items found in this flow rule.
2130  * @param[in] action
2131  *   Pointer to the action structure.
2132  * @param[in] attr
2133  *   Pointer to flow attributes
2134  * @param[out] error
2135  *   Pointer to error structure.
2136  *
2137  * @return
2138  *   0 on success, a negative errno value otherwise and rte_errno is set.
2139  */
2140 static int
2141 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2142                                   uint64_t action_flags,
2143                                   const struct rte_flow_item_vlan *vlan_m,
2144                                   const struct rte_flow_action *action,
2145                                   const struct rte_flow_attr *attr,
2146                                   struct rte_flow_error *error)
2147 {
2148         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2149         const struct mlx5_priv *priv = dev->data->dev_private;
2150
2151         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2152             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2153                 return rte_flow_error_set(error, EINVAL,
2154                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2155                                           "invalid vlan ethertype");
2156         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2157                 return rte_flow_error_set(error, EINVAL,
2158                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2159                                           "wrong action order, port_id should "
2160                                           "be after push VLAN");
2161         if (!attr->transfer && priv->representor)
2162                 return rte_flow_error_set(error, ENOTSUP,
2163                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2164                                           "push vlan action for VF representor "
2165                                           "not supported on NIC table");
2166         if (vlan_m &&
2167             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2168             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2169                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2170             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2171             !(mlx5_flow_find_action
2172                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2173                 return rte_flow_error_set(error, EINVAL,
2174                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2175                                           "not full match mask on VLAN PCP and "
2176                                           "there is no of_set_vlan_pcp action, "
2177                                           "push VLAN action cannot figure out "
2178                                           "PCP value");
2179         if (vlan_m &&
2180             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2181             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2182                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2183             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2184             !(mlx5_flow_find_action
2185                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2186                 return rte_flow_error_set(error, EINVAL,
2187                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2188                                           "not full match mask on VLAN VID and "
2189                                           "there is no of_set_vlan_vid action, "
2190                                           "push VLAN action cannot figure out "
2191                                           "VID value");
2192         (void)attr;
2193         return 0;
2194 }
2195
2196 /**
2197  * Validate the set VLAN PCP.
2198  *
2199  * @param[in] action_flags
2200  *   Holds the actions detected until now.
2201  * @param[in] actions
2202  *   Pointer to the list of actions remaining in the flow rule.
2203  * @param[out] error
2204  *   Pointer to error structure.
2205  *
2206  * @return
2207  *   0 on success, a negative errno value otherwise and rte_errno is set.
2208  */
2209 static int
2210 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2211                                      const struct rte_flow_action actions[],
2212                                      struct rte_flow_error *error)
2213 {
2214         const struct rte_flow_action *action = actions;
2215         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2216
2217         if (conf->vlan_pcp > 7)
2218                 return rte_flow_error_set(error, EINVAL,
2219                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2220                                           "VLAN PCP value is too big");
2221         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2222                 return rte_flow_error_set(error, ENOTSUP,
2223                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2224                                           "set VLAN PCP action must follow "
2225                                           "the push VLAN action");
2226         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2227                 return rte_flow_error_set(error, ENOTSUP,
2228                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2229                                           "Multiple VLAN PCP modification are "
2230                                           "not supported");
2231         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2232                 return rte_flow_error_set(error, EINVAL,
2233                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2234                                           "wrong action order, port_id should "
2235                                           "be after set VLAN PCP");
2236         return 0;
2237 }
2238
2239 /**
2240  * Validate the set VLAN VID.
2241  *
2242  * @param[in] item_flags
2243  *   Holds the items detected in this rule.
2244  * @param[in] action_flags
2245  *   Holds the actions detected until now.
2246  * @param[in] actions
2247  *   Pointer to the list of actions remaining in the flow rule.
2248  * @param[out] error
2249  *   Pointer to error structure.
2250  *
2251  * @return
2252  *   0 on success, a negative errno value otherwise and rte_errno is set.
2253  */
2254 static int
2255 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2256                                      uint64_t action_flags,
2257                                      const struct rte_flow_action actions[],
2258                                      struct rte_flow_error *error)
2259 {
2260         const struct rte_flow_action *action = actions;
2261         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2262
2263         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2264                 return rte_flow_error_set(error, EINVAL,
2265                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2266                                           "VLAN VID value is too big");
2267         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2268             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2269                 return rte_flow_error_set(error, ENOTSUP,
2270                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2271                                           "set VLAN VID action must follow push"
2272                                           " VLAN action or match on VLAN item");
2273         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2274                 return rte_flow_error_set(error, ENOTSUP,
2275                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2276                                           "Multiple VLAN VID modifications are "
2277                                           "not supported");
2278         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2279                 return rte_flow_error_set(error, EINVAL,
2280                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2281                                           "wrong action order, port_id should "
2282                                           "be after set VLAN VID");
2283         return 0;
2284 }
2285
2286 /*
2287  * Validate the FLAG action.
2288  *
2289  * @param[in] dev
2290  *   Pointer to the rte_eth_dev structure.
2291  * @param[in] action_flags
2292  *   Holds the actions detected until now.
2293  * @param[in] attr
2294  *   Pointer to flow attributes
2295  * @param[out] error
2296  *   Pointer to error structure.
2297  *
2298  * @return
2299  *   0 on success, a negative errno value otherwise and rte_errno is set.
2300  */
2301 static int
2302 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2303                              uint64_t action_flags,
2304                              const struct rte_flow_attr *attr,
2305                              struct rte_flow_error *error)
2306 {
2307         struct mlx5_priv *priv = dev->data->dev_private;
2308         struct mlx5_dev_config *config = &priv->config;
2309         int ret;
2310
2311         /* Fall back if no extended metadata register support. */
2312         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2313                 return mlx5_flow_validate_action_flag(action_flags, attr,
2314                                                       error);
2315         /* Extensive metadata mode requires registers. */
2316         if (!mlx5_flow_ext_mreg_supported(dev))
2317                 return rte_flow_error_set(error, ENOTSUP,
2318                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2319                                           "no metadata registers "
2320                                           "to support flag action");
2321         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2322                 return rte_flow_error_set(error, ENOTSUP,
2323                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2324                                           "extended metadata register"
2325                                           " isn't available");
2326         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2327         if (ret < 0)
2328                 return ret;
2329         MLX5_ASSERT(ret > 0);
2330         if (action_flags & MLX5_FLOW_ACTION_MARK)
2331                 return rte_flow_error_set(error, EINVAL,
2332                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2333                                           "can't mark and flag in same flow");
2334         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2335                 return rte_flow_error_set(error, EINVAL,
2336                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2337                                           "can't have 2 flag"
2338                                           " actions in same flow");
2339         return 0;
2340 }
2341
2342 /**
2343  * Validate MARK action.
2344  *
2345  * @param[in] dev
2346  *   Pointer to the rte_eth_dev structure.
2347  * @param[in] action
2348  *   Pointer to action.
2349  * @param[in] action_flags
2350  *   Holds the actions detected until now.
2351  * @param[in] attr
2352  *   Pointer to flow attributes
2353  * @param[out] error
2354  *   Pointer to error structure.
2355  *
2356  * @return
2357  *   0 on success, a negative errno value otherwise and rte_errno is set.
2358  */
2359 static int
2360 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2361                              const struct rte_flow_action *action,
2362                              uint64_t action_flags,
2363                              const struct rte_flow_attr *attr,
2364                              struct rte_flow_error *error)
2365 {
2366         struct mlx5_priv *priv = dev->data->dev_private;
2367         struct mlx5_dev_config *config = &priv->config;
2368         const struct rte_flow_action_mark *mark = action->conf;
2369         int ret;
2370
2371         /* Fall back if no extended metadata register support. */
2372         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2373                 return mlx5_flow_validate_action_mark(action, action_flags,
2374                                                       attr, error);
2375         /* Extensive metadata mode requires registers. */
2376         if (!mlx5_flow_ext_mreg_supported(dev))
2377                 return rte_flow_error_set(error, ENOTSUP,
2378                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2379                                           "no metadata registers "
2380                                           "to support mark action");
2381         if (!priv->sh->dv_mark_mask)
2382                 return rte_flow_error_set(error, ENOTSUP,
2383                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2384                                           "extended metadata register"
2385                                           " isn't available");
2386         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2387         if (ret < 0)
2388                 return ret;
2389         MLX5_ASSERT(ret > 0);
2390         if (!mark)
2391                 return rte_flow_error_set(error, EINVAL,
2392                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2393                                           "configuration cannot be null");
2394         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2395                 return rte_flow_error_set(error, EINVAL,
2396                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2397                                           &mark->id,
2398                                           "mark id exceeds the limit");
2399         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2400                 return rte_flow_error_set(error, EINVAL,
2401                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2402                                           "can't flag and mark in same flow");
2403         if (action_flags & MLX5_FLOW_ACTION_MARK)
2404                 return rte_flow_error_set(error, EINVAL,
2405                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2406                                           "can't have 2 mark actions in same"
2407                                           " flow");
2408         return 0;
2409 }
2410
2411 /**
2412  * Validate SET_META action.
2413  *
2414  * @param[in] dev
2415  *   Pointer to the rte_eth_dev structure.
2416  * @param[in] action
2417  *   Pointer to the action structure.
2418  * @param[in] action_flags
2419  *   Holds the actions detected until now.
2420  * @param[in] attr
2421  *   Pointer to flow attributes
2422  * @param[out] error
2423  *   Pointer to error structure.
2424  *
2425  * @return
2426  *   0 on success, a negative errno value otherwise and rte_errno is set.
2427  */
2428 static int
2429 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2430                                  const struct rte_flow_action *action,
2431                                  uint64_t action_flags __rte_unused,
2432                                  const struct rte_flow_attr *attr,
2433                                  struct rte_flow_error *error)
2434 {
2435         const struct rte_flow_action_set_meta *conf;
2436         uint32_t nic_mask = UINT32_MAX;
2437         int reg;
2438
2439         if (!mlx5_flow_ext_mreg_supported(dev))
2440                 return rte_flow_error_set(error, ENOTSUP,
2441                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2442                                           "extended metadata register"
2443                                           " isn't supported");
2444         reg = flow_dv_get_metadata_reg(dev, attr, error);
2445         if (reg < 0)
2446                 return reg;
2447         if (reg != REG_A && reg != REG_B) {
2448                 struct mlx5_priv *priv = dev->data->dev_private;
2449
2450                 nic_mask = priv->sh->dv_meta_mask;
2451         }
2452         if (!(action->conf))
2453                 return rte_flow_error_set(error, EINVAL,
2454                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2455                                           "configuration cannot be null");
2456         conf = (const struct rte_flow_action_set_meta *)action->conf;
2457         if (!conf->mask)
2458                 return rte_flow_error_set(error, EINVAL,
2459                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2460                                           "zero mask doesn't have any effect");
2461         if (conf->mask & ~nic_mask)
2462                 return rte_flow_error_set(error, EINVAL,
2463                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2464                                           "meta data must be within reg C0");
2465         return 0;
2466 }
2467
2468 /**
2469  * Validate SET_TAG action.
2470  *
2471  * @param[in] dev
2472  *   Pointer to the rte_eth_dev structure.
2473  * @param[in] action
2474  *   Pointer to the action structure.
2475  * @param[in] action_flags
2476  *   Holds the actions detected until now.
2477  * @param[in] attr
2478  *   Pointer to flow attributes
2479  * @param[out] error
2480  *   Pointer to error structure.
2481  *
2482  * @return
2483  *   0 on success, a negative errno value otherwise and rte_errno is set.
2484  */
2485 static int
2486 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2487                                 const struct rte_flow_action *action,
2488                                 uint64_t action_flags,
2489                                 const struct rte_flow_attr *attr,
2490                                 struct rte_flow_error *error)
2491 {
2492         const struct rte_flow_action_set_tag *conf;
2493         const uint64_t terminal_action_flags =
2494                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2495                 MLX5_FLOW_ACTION_RSS;
2496         int ret;
2497
2498         if (!mlx5_flow_ext_mreg_supported(dev))
2499                 return rte_flow_error_set(error, ENOTSUP,
2500                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2501                                           "extensive metadata register"
2502                                           " isn't supported");
2503         if (!(action->conf))
2504                 return rte_flow_error_set(error, EINVAL,
2505                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2506                                           "configuration cannot be null");
2507         conf = (const struct rte_flow_action_set_tag *)action->conf;
2508         if (!conf->mask)
2509                 return rte_flow_error_set(error, EINVAL,
2510                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2511                                           "zero mask doesn't have any effect");
2512         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2513         if (ret < 0)
2514                 return ret;
2515         if (!attr->transfer && attr->ingress &&
2516             (action_flags & terminal_action_flags))
2517                 return rte_flow_error_set(error, EINVAL,
2518                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2519                                           "set_tag has no effect"
2520                                           " with terminal actions");
2521         return 0;
2522 }
2523
2524 /**
2525  * Validate count action.
2526  *
2527  * @param[in] dev
2528  *   Pointer to rte_eth_dev structure.
2529  * @param[out] error
2530  *   Pointer to error structure.
2531  *
2532  * @return
2533  *   0 on success, a negative errno value otherwise and rte_errno is set.
2534  */
2535 static int
2536 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2537                               struct rte_flow_error *error)
2538 {
2539         struct mlx5_priv *priv = dev->data->dev_private;
2540
2541         if (!priv->config.devx)
2542                 goto notsup_err;
2543 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2544         return 0;
2545 #endif
2546 notsup_err:
2547         return rte_flow_error_set
2548                       (error, ENOTSUP,
2549                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2550                        NULL,
2551                        "count action not supported");
2552 }
2553
2554 /**
2555  * Validate the L2 encap action.
2556  *
2557  * @param[in] dev
2558  *   Pointer to the rte_eth_dev structure.
2559  * @param[in] action_flags
2560  *   Holds the actions detected until now.
2561  * @param[in] action
2562  *   Pointer to the action structure.
2563  * @param[in] attr
2564  *   Pointer to flow attributes.
2565  * @param[out] error
2566  *   Pointer to error structure.
2567  *
2568  * @return
2569  *   0 on success, a negative errno value otherwise and rte_errno is set.
2570  */
2571 static int
2572 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2573                                  uint64_t action_flags,
2574                                  const struct rte_flow_action *action,
2575                                  const struct rte_flow_attr *attr,
2576                                  struct rte_flow_error *error)
2577 {
2578         const struct mlx5_priv *priv = dev->data->dev_private;
2579
2580         if (!(action->conf))
2581                 return rte_flow_error_set(error, EINVAL,
2582                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2583                                           "configuration cannot be null");
2584         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2585                 return rte_flow_error_set(error, EINVAL,
2586                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2587                                           "can only have a single encap action "
2588                                           "in a flow");
2589         if (!attr->transfer && priv->representor)
2590                 return rte_flow_error_set(error, ENOTSUP,
2591                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2592                                           "encap action for VF representor "
2593                                           "not supported on NIC table");
2594         return 0;
2595 }
2596
2597 /**
2598  * Validate a decap action.
2599  *
2600  * @param[in] dev
2601  *   Pointer to the rte_eth_dev structure.
2602  * @param[in] action_flags
2603  *   Holds the actions detected until now.
2604  * @param[in] attr
2605  *   Pointer to flow attributes
2606  * @param[out] error
2607  *   Pointer to error structure.
2608  *
2609  * @return
2610  *   0 on success, a negative errno value otherwise and rte_errno is set.
2611  */
2612 static int
2613 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2614                               uint64_t action_flags,
2615                               const struct rte_flow_attr *attr,
2616                               struct rte_flow_error *error)
2617 {
2618         const struct mlx5_priv *priv = dev->data->dev_private;
2619
2620         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2621             !priv->config.decap_en)
2622                 return rte_flow_error_set(error, ENOTSUP,
2623                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2624                                           "decap is not enabled");
2625         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2626                 return rte_flow_error_set(error, ENOTSUP,
2627                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2628                                           action_flags &
2629                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2630                                           "have a single decap action" : "decap "
2631                                           "after encap is not supported");
2632         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2633                 return rte_flow_error_set(error, EINVAL,
2634                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2635                                           "can't have decap action after"
2636                                           " modify action");
2637         if (attr->egress)
2638                 return rte_flow_error_set(error, ENOTSUP,
2639                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2640                                           NULL,
2641                                           "decap action not supported for "
2642                                           "egress");
2643         if (!attr->transfer && priv->representor)
2644                 return rte_flow_error_set(error, ENOTSUP,
2645                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2646                                           "decap action for VF representor "
2647                                           "not supported on NIC table");
2648         return 0;
2649 }
2650
2651 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2652
2653 /**
2654  * Validate the raw encap and decap actions.
2655  *
2656  * @param[in] dev
2657  *   Pointer to the rte_eth_dev structure.
2658  * @param[in] decap
2659  *   Pointer to the decap action.
2660  * @param[in] encap
2661  *   Pointer to the encap action.
2662  * @param[in] attr
2663  *   Pointer to flow attributes
2664  * @param[in/out] action_flags
2665  *   Holds the actions detected until now.
2666  * @param[out] actions_n
2667  *   pointer to the number of actions counter.
2668  * @param[out] error
2669  *   Pointer to error structure.
2670  *
2671  * @return
2672  *   0 on success, a negative errno value otherwise and rte_errno is set.
2673  */
2674 static int
2675 flow_dv_validate_action_raw_encap_decap
2676         (struct rte_eth_dev *dev,
2677          const struct rte_flow_action_raw_decap *decap,
2678          const struct rte_flow_action_raw_encap *encap,
2679          const struct rte_flow_attr *attr, uint64_t *action_flags,
2680          int *actions_n, struct rte_flow_error *error)
2681 {
2682         const struct mlx5_priv *priv = dev->data->dev_private;
2683         int ret;
2684
2685         if (encap && (!encap->size || !encap->data))
2686                 return rte_flow_error_set(error, EINVAL,
2687                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2688                                           "raw encap data cannot be empty");
2689         if (decap && encap) {
2690                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2691                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2692                         /* L3 encap. */
2693                         decap = NULL;
2694                 else if (encap->size <=
2695                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2696                            decap->size >
2697                            MLX5_ENCAPSULATION_DECISION_SIZE)
2698                         /* L3 decap. */
2699                         encap = NULL;
2700                 else if (encap->size >
2701                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2702                            decap->size >
2703                            MLX5_ENCAPSULATION_DECISION_SIZE)
2704                         /* 2 L2 actions: encap and decap. */
2705                         ;
2706                 else
2707                         return rte_flow_error_set(error,
2708                                 ENOTSUP,
2709                                 RTE_FLOW_ERROR_TYPE_ACTION,
2710                                 NULL, "unsupported too small "
2711                                 "raw decap and too small raw "
2712                                 "encap combination");
2713         }
2714         if (decap) {
2715                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2716                                                     error);
2717                 if (ret < 0)
2718                         return ret;
2719                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2720                 ++(*actions_n);
2721         }
2722         if (encap) {
2723                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2724                         return rte_flow_error_set(error, ENOTSUP,
2725                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2726                                                   NULL,
2727                                                   "small raw encap size");
2728                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2729                         return rte_flow_error_set(error, EINVAL,
2730                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2731                                                   NULL,
2732                                                   "more than one encap action");
2733                 if (!attr->transfer && priv->representor)
2734                         return rte_flow_error_set
2735                                         (error, ENOTSUP,
2736                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2737                                          "encap action for VF representor "
2738                                          "not supported on NIC table");
2739                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2740                 ++(*actions_n);
2741         }
2742         return 0;
2743 }
2744
2745 /**
2746  * Match encap_decap resource.
2747  *
2748  * @param list
2749  *   Pointer to the hash list.
2750  * @param entry
2751  *   Pointer to exist resource entry object.
2752  * @param key
2753  *   Key of the new entry.
2754  * @param ctx_cb
2755  *   Pointer to new encap_decap resource.
2756  *
2757  * @return
2758  *   0 on matching, none-zero otherwise.
2759  */
2760 int
2761 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2762                              struct mlx5_hlist_entry *entry,
2763                              uint64_t key __rte_unused, void *cb_ctx)
2764 {
2765         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2766         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2767         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2768
2769         cache_resource = container_of(entry,
2770                                       struct mlx5_flow_dv_encap_decap_resource,
2771                                       entry);
2772         if (resource->entry.key == cache_resource->entry.key &&
2773             resource->reformat_type == cache_resource->reformat_type &&
2774             resource->ft_type == cache_resource->ft_type &&
2775             resource->flags == cache_resource->flags &&
2776             resource->size == cache_resource->size &&
2777             !memcmp((const void *)resource->buf,
2778                     (const void *)cache_resource->buf,
2779                     resource->size))
2780                 return 0;
2781         return -1;
2782 }
2783
2784 /**
2785  * Allocate encap_decap resource.
2786  *
2787  * @param list
2788  *   Pointer to the hash list.
2789  * @param entry
2790  *   Pointer to exist resource entry object.
2791  * @param ctx_cb
2792  *   Pointer to new encap_decap resource.
2793  *
2794  * @return
2795  *   0 on matching, none-zero otherwise.
2796  */
2797 struct mlx5_hlist_entry *
2798 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2799                               uint64_t key __rte_unused,
2800                               void *cb_ctx)
2801 {
2802         struct mlx5_dev_ctx_shared *sh = list->ctx;
2803         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2804         struct mlx5dv_dr_domain *domain;
2805         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2806         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2807         uint32_t idx;
2808         int ret;
2809
2810         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2811                 domain = sh->fdb_domain;
2812         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2813                 domain = sh->rx_domain;
2814         else
2815                 domain = sh->tx_domain;
2816         /* Register new encap/decap resource. */
2817         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2818                                        &idx);
2819         if (!cache_resource) {
2820                 rte_flow_error_set(ctx->error, ENOMEM,
2821                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2822                                    "cannot allocate resource memory");
2823                 return NULL;
2824         }
2825         *cache_resource = *resource;
2826         cache_resource->idx = idx;
2827         ret = mlx5_flow_os_create_flow_action_packet_reformat
2828                                         (sh->ctx, domain, cache_resource,
2829                                          &cache_resource->action);
2830         if (ret) {
2831                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2832                 rte_flow_error_set(ctx->error, ENOMEM,
2833                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2834                                    NULL, "cannot create action");
2835                 return NULL;
2836         }
2837
2838         return &cache_resource->entry;
2839 }
2840
2841 /**
2842  * Find existing encap/decap resource or create and register a new one.
2843  *
2844  * @param[in, out] dev
2845  *   Pointer to rte_eth_dev structure.
2846  * @param[in, out] resource
2847  *   Pointer to encap/decap resource.
2848  * @parm[in, out] dev_flow
2849  *   Pointer to the dev_flow.
2850  * @param[out] error
2851  *   pointer to error structure.
2852  *
2853  * @return
2854  *   0 on success otherwise -errno and errno is set.
2855  */
2856 static int
2857 flow_dv_encap_decap_resource_register
2858                         (struct rte_eth_dev *dev,
2859                          struct mlx5_flow_dv_encap_decap_resource *resource,
2860                          struct mlx5_flow *dev_flow,
2861                          struct rte_flow_error *error)
2862 {
2863         struct mlx5_priv *priv = dev->data->dev_private;
2864         struct mlx5_dev_ctx_shared *sh = priv->sh;
2865         struct mlx5_hlist_entry *entry;
2866         union mlx5_flow_encap_decap_key encap_decap_key = {
2867                 {
2868                         .ft_type = resource->ft_type,
2869                         .refmt_type = resource->reformat_type,
2870                         .buf_size = resource->size,
2871                         .table_level = !!dev_flow->dv.group,
2872                         .cksum = 0,
2873                 }
2874         };
2875         struct mlx5_flow_cb_ctx ctx = {
2876                 .error = error,
2877                 .data = resource,
2878         };
2879
2880         resource->flags = dev_flow->dv.group ? 0 : 1;
2881         encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2882                                                 resource->size, 0);
2883         resource->entry.key = encap_decap_key.v64;
2884         entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
2885                                     &ctx);
2886         if (!entry)
2887                 return -rte_errno;
2888         resource = container_of(entry, typeof(*resource), entry);
2889         dev_flow->dv.encap_decap = resource;
2890         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2891         return 0;
2892 }
2893
2894 /**
2895  * Find existing table jump resource or create and register a new one.
2896  *
2897  * @param[in, out] dev
2898  *   Pointer to rte_eth_dev structure.
2899  * @param[in, out] tbl
2900  *   Pointer to flow table resource.
2901  * @parm[in, out] dev_flow
2902  *   Pointer to the dev_flow.
2903  * @param[out] error
2904  *   pointer to error structure.
2905  *
2906  * @return
2907  *   0 on success otherwise -errno and errno is set.
2908  */
2909 static int
2910 flow_dv_jump_tbl_resource_register
2911                         (struct rte_eth_dev *dev __rte_unused,
2912                          struct mlx5_flow_tbl_resource *tbl,
2913                          struct mlx5_flow *dev_flow,
2914                          struct rte_flow_error *error __rte_unused)
2915 {
2916         struct mlx5_flow_tbl_data_entry *tbl_data =
2917                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2918
2919         MLX5_ASSERT(tbl);
2920         MLX5_ASSERT(tbl_data->jump.action);
2921         dev_flow->handle->rix_jump = tbl_data->idx;
2922         dev_flow->dv.jump = &tbl_data->jump;
2923         return 0;
2924 }
2925
2926 int
2927 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
2928                          struct mlx5_cache_entry *entry, void *cb_ctx)
2929 {
2930         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2931         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2932         struct mlx5_flow_dv_port_id_action_resource *res =
2933                         container_of(entry, typeof(*res), entry);
2934
2935         return ref->port_id != res->port_id;
2936 }
2937
2938 struct mlx5_cache_entry *
2939 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
2940                           struct mlx5_cache_entry *entry __rte_unused,
2941                           void *cb_ctx)
2942 {
2943         struct mlx5_dev_ctx_shared *sh = list->ctx;
2944         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2945         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2946         struct mlx5_flow_dv_port_id_action_resource *cache;
2947         uint32_t idx;
2948         int ret;
2949
2950         /* Register new port id action resource. */
2951         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
2952         if (!cache) {
2953                 rte_flow_error_set(ctx->error, ENOMEM,
2954                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2955                                    "cannot allocate port_id action cache memory");
2956                 return NULL;
2957         }
2958         *cache = *ref;
2959         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
2960                                                         ref->port_id,
2961                                                         &cache->action);
2962         if (ret) {
2963                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
2964                 rte_flow_error_set(ctx->error, ENOMEM,
2965                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2966                                    "cannot create action");
2967                 return NULL;
2968         }
2969         return &cache->entry;
2970 }
2971
2972 /**
2973  * Find existing table port ID resource or create and register a new one.
2974  *
2975  * @param[in, out] dev
2976  *   Pointer to rte_eth_dev structure.
2977  * @param[in, out] resource
2978  *   Pointer to port ID action resource.
2979  * @parm[in, out] dev_flow
2980  *   Pointer to the dev_flow.
2981  * @param[out] error
2982  *   pointer to error structure.
2983  *
2984  * @return
2985  *   0 on success otherwise -errno and errno is set.
2986  */
2987 static int
2988 flow_dv_port_id_action_resource_register
2989                         (struct rte_eth_dev *dev,
2990                          struct mlx5_flow_dv_port_id_action_resource *resource,
2991                          struct mlx5_flow *dev_flow,
2992                          struct rte_flow_error *error)
2993 {
2994         struct mlx5_priv *priv = dev->data->dev_private;
2995         struct mlx5_cache_entry *entry;
2996         struct mlx5_flow_dv_port_id_action_resource *cache;
2997         struct mlx5_flow_cb_ctx ctx = {
2998                 .error = error,
2999                 .data = resource,
3000         };
3001
3002         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3003         if (!entry)
3004                 return -rte_errno;
3005         cache = container_of(entry, typeof(*cache), entry);
3006         dev_flow->dv.port_id_action = cache;
3007         dev_flow->handle->rix_port_id_action = cache->idx;
3008         return 0;
3009 }
3010
3011 int
3012 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3013                          struct mlx5_cache_entry *entry, void *cb_ctx)
3014 {
3015         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3016         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3017         struct mlx5_flow_dv_push_vlan_action_resource *res =
3018                         container_of(entry, typeof(*res), entry);
3019
3020         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3021 }
3022
3023 struct mlx5_cache_entry *
3024 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3025                           struct mlx5_cache_entry *entry __rte_unused,
3026                           void *cb_ctx)
3027 {
3028         struct mlx5_dev_ctx_shared *sh = list->ctx;
3029         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3030         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3031         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3032         struct mlx5dv_dr_domain *domain;
3033         uint32_t idx;
3034         int ret;
3035
3036         /* Register new port id action resource. */
3037         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3038         if (!cache) {
3039                 rte_flow_error_set(ctx->error, ENOMEM,
3040                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3041                                    "cannot allocate push_vlan action cache memory");
3042                 return NULL;
3043         }
3044         *cache = *ref;
3045         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3046                 domain = sh->fdb_domain;
3047         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3048                 domain = sh->rx_domain;
3049         else
3050                 domain = sh->tx_domain;
3051         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3052                                                         &cache->action);
3053         if (ret) {
3054                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3055                 rte_flow_error_set(ctx->error, ENOMEM,
3056                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3057                                    "cannot create push vlan action");
3058                 return NULL;
3059         }
3060         return &cache->entry;
3061 }
3062
3063 /**
3064  * Find existing push vlan resource or create and register a new one.
3065  *
3066  * @param [in, out] dev
3067  *   Pointer to rte_eth_dev structure.
3068  * @param[in, out] resource
3069  *   Pointer to port ID action resource.
3070  * @parm[in, out] dev_flow
3071  *   Pointer to the dev_flow.
3072  * @param[out] error
3073  *   pointer to error structure.
3074  *
3075  * @return
3076  *   0 on success otherwise -errno and errno is set.
3077  */
3078 static int
3079 flow_dv_push_vlan_action_resource_register
3080                        (struct rte_eth_dev *dev,
3081                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3082                         struct mlx5_flow *dev_flow,
3083                         struct rte_flow_error *error)
3084 {
3085         struct mlx5_priv *priv = dev->data->dev_private;
3086         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3087         struct mlx5_cache_entry *entry;
3088         struct mlx5_flow_cb_ctx ctx = {
3089                 .error = error,
3090                 .data = resource,
3091         };
3092
3093         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3094         if (!entry)
3095                 return -rte_errno;
3096         cache = container_of(entry, typeof(*cache), entry);
3097
3098         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3099         dev_flow->dv.push_vlan_res = cache;
3100         return 0;
3101 }
3102
3103 /**
3104  * Get the size of specific rte_flow_item_type hdr size
3105  *
3106  * @param[in] item_type
3107  *   Tested rte_flow_item_type.
3108  *
3109  * @return
3110  *   sizeof struct item_type, 0 if void or irrelevant.
3111  */
3112 static size_t
3113 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3114 {
3115         size_t retval;
3116
3117         switch (item_type) {
3118         case RTE_FLOW_ITEM_TYPE_ETH:
3119                 retval = sizeof(struct rte_ether_hdr);
3120                 break;
3121         case RTE_FLOW_ITEM_TYPE_VLAN:
3122                 retval = sizeof(struct rte_vlan_hdr);
3123                 break;
3124         case RTE_FLOW_ITEM_TYPE_IPV4:
3125                 retval = sizeof(struct rte_ipv4_hdr);
3126                 break;
3127         case RTE_FLOW_ITEM_TYPE_IPV6:
3128                 retval = sizeof(struct rte_ipv6_hdr);
3129                 break;
3130         case RTE_FLOW_ITEM_TYPE_UDP:
3131                 retval = sizeof(struct rte_udp_hdr);
3132                 break;
3133         case RTE_FLOW_ITEM_TYPE_TCP:
3134                 retval = sizeof(struct rte_tcp_hdr);
3135                 break;
3136         case RTE_FLOW_ITEM_TYPE_VXLAN:
3137         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3138                 retval = sizeof(struct rte_vxlan_hdr);
3139                 break;
3140         case RTE_FLOW_ITEM_TYPE_GRE:
3141         case RTE_FLOW_ITEM_TYPE_NVGRE:
3142                 retval = sizeof(struct rte_gre_hdr);
3143                 break;
3144         case RTE_FLOW_ITEM_TYPE_MPLS:
3145                 retval = sizeof(struct rte_mpls_hdr);
3146                 break;
3147         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3148         default:
3149                 retval = 0;
3150                 break;
3151         }
3152         return retval;
3153 }
3154
3155 #define MLX5_ENCAP_IPV4_VERSION         0x40
3156 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3157 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3158 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3159 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3160 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3161 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3162
3163 /**
3164  * Convert the encap action data from list of rte_flow_item to raw buffer
3165  *
3166  * @param[in] items
3167  *   Pointer to rte_flow_item objects list.
3168  * @param[out] buf
3169  *   Pointer to the output buffer.
3170  * @param[out] size
3171  *   Pointer to the output buffer size.
3172  * @param[out] error
3173  *   Pointer to the error structure.
3174  *
3175  * @return
3176  *   0 on success, a negative errno value otherwise and rte_errno is set.
3177  */
3178 static int
3179 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3180                            size_t *size, struct rte_flow_error *error)
3181 {
3182         struct rte_ether_hdr *eth = NULL;
3183         struct rte_vlan_hdr *vlan = NULL;
3184         struct rte_ipv4_hdr *ipv4 = NULL;
3185         struct rte_ipv6_hdr *ipv6 = NULL;
3186         struct rte_udp_hdr *udp = NULL;
3187         struct rte_vxlan_hdr *vxlan = NULL;
3188         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3189         struct rte_gre_hdr *gre = NULL;
3190         size_t len;
3191         size_t temp_size = 0;
3192
3193         if (!items)
3194                 return rte_flow_error_set(error, EINVAL,
3195                                           RTE_FLOW_ERROR_TYPE_ACTION,
3196                                           NULL, "invalid empty data");
3197         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3198                 len = flow_dv_get_item_hdr_len(items->type);
3199                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3200                         return rte_flow_error_set(error, EINVAL,
3201                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3202                                                   (void *)items->type,
3203                                                   "items total size is too big"
3204                                                   " for encap action");
3205                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3206                 switch (items->type) {
3207                 case RTE_FLOW_ITEM_TYPE_ETH:
3208                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3209                         break;
3210                 case RTE_FLOW_ITEM_TYPE_VLAN:
3211                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3212                         if (!eth)
3213                                 return rte_flow_error_set(error, EINVAL,
3214                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3215                                                 (void *)items->type,
3216                                                 "eth header not found");
3217                         if (!eth->ether_type)
3218                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3219                         break;
3220                 case RTE_FLOW_ITEM_TYPE_IPV4:
3221                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3222                         if (!vlan && !eth)
3223                                 return rte_flow_error_set(error, EINVAL,
3224                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3225                                                 (void *)items->type,
3226                                                 "neither eth nor vlan"
3227                                                 " header found");
3228                         if (vlan && !vlan->eth_proto)
3229                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3230                         else if (eth && !eth->ether_type)
3231                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3232                         if (!ipv4->version_ihl)
3233                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3234                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3235                         if (!ipv4->time_to_live)
3236                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3237                         break;
3238                 case RTE_FLOW_ITEM_TYPE_IPV6:
3239                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3240                         if (!vlan && !eth)
3241                                 return rte_flow_error_set(error, EINVAL,
3242                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3243                                                 (void *)items->type,
3244                                                 "neither eth nor vlan"
3245                                                 " header found");
3246                         if (vlan && !vlan->eth_proto)
3247                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3248                         else if (eth && !eth->ether_type)
3249                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3250                         if (!ipv6->vtc_flow)
3251                                 ipv6->vtc_flow =
3252                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3253                         if (!ipv6->hop_limits)
3254                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3255                         break;
3256                 case RTE_FLOW_ITEM_TYPE_UDP:
3257                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3258                         if (!ipv4 && !ipv6)
3259                                 return rte_flow_error_set(error, EINVAL,
3260                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3261                                                 (void *)items->type,
3262                                                 "ip header not found");
3263                         if (ipv4 && !ipv4->next_proto_id)
3264                                 ipv4->next_proto_id = IPPROTO_UDP;
3265                         else if (ipv6 && !ipv6->proto)
3266                                 ipv6->proto = IPPROTO_UDP;
3267                         break;
3268                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3269                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3270                         if (!udp)
3271                                 return rte_flow_error_set(error, EINVAL,
3272                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3273                                                 (void *)items->type,
3274                                                 "udp header not found");
3275                         if (!udp->dst_port)
3276                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3277                         if (!vxlan->vx_flags)
3278                                 vxlan->vx_flags =
3279                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3280                         break;
3281                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3282                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3283                         if (!udp)
3284                                 return rte_flow_error_set(error, EINVAL,
3285                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3286                                                 (void *)items->type,
3287                                                 "udp header not found");
3288                         if (!vxlan_gpe->proto)
3289                                 return rte_flow_error_set(error, EINVAL,
3290                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3291                                                 (void *)items->type,
3292                                                 "next protocol not found");
3293                         if (!udp->dst_port)
3294                                 udp->dst_port =
3295                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3296                         if (!vxlan_gpe->vx_flags)
3297                                 vxlan_gpe->vx_flags =
3298                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3299                         break;
3300                 case RTE_FLOW_ITEM_TYPE_GRE:
3301                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3302                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3303                         if (!gre->proto)
3304                                 return rte_flow_error_set(error, EINVAL,
3305                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3306                                                 (void *)items->type,
3307                                                 "next protocol not found");
3308                         if (!ipv4 && !ipv6)
3309                                 return rte_flow_error_set(error, EINVAL,
3310                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3311                                                 (void *)items->type,
3312                                                 "ip header not found");
3313                         if (ipv4 && !ipv4->next_proto_id)
3314                                 ipv4->next_proto_id = IPPROTO_GRE;
3315                         else if (ipv6 && !ipv6->proto)
3316                                 ipv6->proto = IPPROTO_GRE;
3317                         break;
3318                 case RTE_FLOW_ITEM_TYPE_VOID:
3319                         break;
3320                 default:
3321                         return rte_flow_error_set(error, EINVAL,
3322                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3323                                                   (void *)items->type,
3324                                                   "unsupported item type");
3325                         break;
3326                 }
3327                 temp_size += len;
3328         }
3329         *size = temp_size;
3330         return 0;
3331 }
3332
3333 static int
3334 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3335 {
3336         struct rte_ether_hdr *eth = NULL;
3337         struct rte_vlan_hdr *vlan = NULL;
3338         struct rte_ipv6_hdr *ipv6 = NULL;
3339         struct rte_udp_hdr *udp = NULL;
3340         char *next_hdr;
3341         uint16_t proto;
3342
3343         eth = (struct rte_ether_hdr *)data;
3344         next_hdr = (char *)(eth + 1);
3345         proto = RTE_BE16(eth->ether_type);
3346
3347         /* VLAN skipping */
3348         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3349                 vlan = (struct rte_vlan_hdr *)next_hdr;
3350                 proto = RTE_BE16(vlan->eth_proto);
3351                 next_hdr += sizeof(struct rte_vlan_hdr);
3352         }
3353
3354         /* HW calculates IPv4 csum. no need to proceed */
3355         if (proto == RTE_ETHER_TYPE_IPV4)
3356                 return 0;
3357
3358         /* non IPv4/IPv6 header. not supported */
3359         if (proto != RTE_ETHER_TYPE_IPV6) {
3360                 return rte_flow_error_set(error, ENOTSUP,
3361                                           RTE_FLOW_ERROR_TYPE_ACTION,
3362                                           NULL, "Cannot offload non IPv4/IPv6");
3363         }
3364
3365         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3366
3367         /* ignore non UDP */
3368         if (ipv6->proto != IPPROTO_UDP)
3369                 return 0;
3370
3371         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3372         udp->dgram_cksum = 0;
3373
3374         return 0;
3375 }
3376
3377 /**
3378  * Convert L2 encap action to DV specification.
3379  *
3380  * @param[in] dev
3381  *   Pointer to rte_eth_dev structure.
3382  * @param[in] action
3383  *   Pointer to action structure.
3384  * @param[in, out] dev_flow
3385  *   Pointer to the mlx5_flow.
3386  * @param[in] transfer
3387  *   Mark if the flow is E-Switch flow.
3388  * @param[out] error
3389  *   Pointer to the error structure.
3390  *
3391  * @return
3392  *   0 on success, a negative errno value otherwise and rte_errno is set.
3393  */
3394 static int
3395 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3396                                const struct rte_flow_action *action,
3397                                struct mlx5_flow *dev_flow,
3398                                uint8_t transfer,
3399                                struct rte_flow_error *error)
3400 {
3401         const struct rte_flow_item *encap_data;
3402         const struct rte_flow_action_raw_encap *raw_encap_data;
3403         struct mlx5_flow_dv_encap_decap_resource res = {
3404                 .reformat_type =
3405                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3406                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3407                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3408         };
3409
3410         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3411                 raw_encap_data =
3412                         (const struct rte_flow_action_raw_encap *)action->conf;
3413                 res.size = raw_encap_data->size;
3414                 memcpy(res.buf, raw_encap_data->data, res.size);
3415         } else {
3416                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3417                         encap_data =
3418                                 ((const struct rte_flow_action_vxlan_encap *)
3419                                                 action->conf)->definition;
3420                 else
3421                         encap_data =
3422                                 ((const struct rte_flow_action_nvgre_encap *)
3423                                                 action->conf)->definition;
3424                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3425                                                &res.size, error))
3426                         return -rte_errno;
3427         }
3428         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3429                 return -rte_errno;
3430         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3431                 return rte_flow_error_set(error, EINVAL,
3432                                           RTE_FLOW_ERROR_TYPE_ACTION,
3433                                           NULL, "can't create L2 encap action");
3434         return 0;
3435 }
3436
3437 /**
3438  * Convert L2 decap action to DV specification.
3439  *
3440  * @param[in] dev
3441  *   Pointer to rte_eth_dev structure.
3442  * @param[in, out] dev_flow
3443  *   Pointer to the mlx5_flow.
3444  * @param[in] transfer
3445  *   Mark if the flow is E-Switch flow.
3446  * @param[out] error
3447  *   Pointer to the error structure.
3448  *
3449  * @return
3450  *   0 on success, a negative errno value otherwise and rte_errno is set.
3451  */
3452 static int
3453 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3454                                struct mlx5_flow *dev_flow,
3455                                uint8_t transfer,
3456                                struct rte_flow_error *error)
3457 {
3458         struct mlx5_flow_dv_encap_decap_resource res = {
3459                 .size = 0,
3460                 .reformat_type =
3461                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3462                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3463                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3464         };
3465
3466         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3467                 return rte_flow_error_set(error, EINVAL,
3468                                           RTE_FLOW_ERROR_TYPE_ACTION,
3469                                           NULL, "can't create L2 decap action");
3470         return 0;
3471 }
3472
3473 /**
3474  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3475  *
3476  * @param[in] dev
3477  *   Pointer to rte_eth_dev structure.
3478  * @param[in] action
3479  *   Pointer to action structure.
3480  * @param[in, out] dev_flow
3481  *   Pointer to the mlx5_flow.
3482  * @param[in] attr
3483  *   Pointer to the flow attributes.
3484  * @param[out] error
3485  *   Pointer to the error structure.
3486  *
3487  * @return
3488  *   0 on success, a negative errno value otherwise and rte_errno is set.
3489  */
3490 static int
3491 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3492                                 const struct rte_flow_action *action,
3493                                 struct mlx5_flow *dev_flow,
3494                                 const struct rte_flow_attr *attr,
3495                                 struct rte_flow_error *error)
3496 {
3497         const struct rte_flow_action_raw_encap *encap_data;
3498         struct mlx5_flow_dv_encap_decap_resource res;
3499
3500         memset(&res, 0, sizeof(res));
3501         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3502         res.size = encap_data->size;
3503         memcpy(res.buf, encap_data->data, res.size);
3504         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3505                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3506                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3507         if (attr->transfer)
3508                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3509         else
3510                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3511                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3512         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3513                 return rte_flow_error_set(error, EINVAL,
3514                                           RTE_FLOW_ERROR_TYPE_ACTION,
3515                                           NULL, "can't create encap action");
3516         return 0;
3517 }
3518
3519 /**
3520  * Create action push VLAN.
3521  *
3522  * @param[in] dev
3523  *   Pointer to rte_eth_dev structure.
3524  * @param[in] attr
3525  *   Pointer to the flow attributes.
3526  * @param[in] vlan
3527  *   Pointer to the vlan to push to the Ethernet header.
3528  * @param[in, out] dev_flow
3529  *   Pointer to the mlx5_flow.
3530  * @param[out] error
3531  *   Pointer to the error structure.
3532  *
3533  * @return
3534  *   0 on success, a negative errno value otherwise and rte_errno is set.
3535  */
3536 static int
3537 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3538                                 const struct rte_flow_attr *attr,
3539                                 const struct rte_vlan_hdr *vlan,
3540                                 struct mlx5_flow *dev_flow,
3541                                 struct rte_flow_error *error)
3542 {
3543         struct mlx5_flow_dv_push_vlan_action_resource res;
3544
3545         memset(&res, 0, sizeof(res));
3546         res.vlan_tag =
3547                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3548                                  vlan->vlan_tci);
3549         if (attr->transfer)
3550                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3551         else
3552                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3553                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3554         return flow_dv_push_vlan_action_resource_register
3555                                             (dev, &res, dev_flow, error);
3556 }
3557
3558 static int fdb_mirror;
3559
3560 /**
3561  * Validate the modify-header actions.
3562  *
3563  * @param[in] action_flags
3564  *   Holds the actions detected until now.
3565  * @param[in] action
3566  *   Pointer to the modify action.
3567  * @param[out] error
3568  *   Pointer to error structure.
3569  *
3570  * @return
3571  *   0 on success, a negative errno value otherwise and rte_errno is set.
3572  */
3573 static int
3574 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3575                                    const struct rte_flow_action *action,
3576                                    struct rte_flow_error *error)
3577 {
3578         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3579                 return rte_flow_error_set(error, EINVAL,
3580                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3581                                           NULL, "action configuration not set");
3582         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3583                 return rte_flow_error_set(error, EINVAL,
3584                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3585                                           "can't have encap action before"
3586                                           " modify action");
3587         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3588                 return rte_flow_error_set(error, EINVAL,
3589                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3590                                           "can't support sample action before"
3591                                           " modify action for E-Switch"
3592                                           " mirroring");
3593         return 0;
3594 }
3595
3596 /**
3597  * Validate the modify-header MAC address actions.
3598  *
3599  * @param[in] action_flags
3600  *   Holds the actions detected until now.
3601  * @param[in] action
3602  *   Pointer to the modify action.
3603  * @param[in] item_flags
3604  *   Holds the items detected.
3605  * @param[out] error
3606  *   Pointer to error structure.
3607  *
3608  * @return
3609  *   0 on success, a negative errno value otherwise and rte_errno is set.
3610  */
3611 static int
3612 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3613                                    const struct rte_flow_action *action,
3614                                    const uint64_t item_flags,
3615                                    struct rte_flow_error *error)
3616 {
3617         int ret = 0;
3618
3619         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3620         if (!ret) {
3621                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3622                         return rte_flow_error_set(error, EINVAL,
3623                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3624                                                   NULL,
3625                                                   "no L2 item in pattern");
3626         }
3627         return ret;
3628 }
3629
3630 /**
3631  * Validate the modify-header IPv4 address actions.
3632  *
3633  * @param[in] action_flags
3634  *   Holds the actions detected until now.
3635  * @param[in] action
3636  *   Pointer to the modify action.
3637  * @param[in] item_flags
3638  *   Holds the items detected.
3639  * @param[out] error
3640  *   Pointer to error structure.
3641  *
3642  * @return
3643  *   0 on success, a negative errno value otherwise and rte_errno is set.
3644  */
3645 static int
3646 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3647                                     const struct rte_flow_action *action,
3648                                     const uint64_t item_flags,
3649                                     struct rte_flow_error *error)
3650 {
3651         int ret = 0;
3652         uint64_t layer;
3653
3654         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3655         if (!ret) {
3656                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3657                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3658                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3659                 if (!(item_flags & layer))
3660                         return rte_flow_error_set(error, EINVAL,
3661                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3662                                                   NULL,
3663                                                   "no ipv4 item in pattern");
3664         }
3665         return ret;
3666 }
3667
3668 /**
3669  * Validate the modify-header IPv6 address actions.
3670  *
3671  * @param[in] action_flags
3672  *   Holds the actions detected until now.
3673  * @param[in] action
3674  *   Pointer to the modify action.
3675  * @param[in] item_flags
3676  *   Holds the items detected.
3677  * @param[out] error
3678  *   Pointer to error structure.
3679  *
3680  * @return
3681  *   0 on success, a negative errno value otherwise and rte_errno is set.
3682  */
3683 static int
3684 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3685                                     const struct rte_flow_action *action,
3686                                     const uint64_t item_flags,
3687                                     struct rte_flow_error *error)
3688 {
3689         int ret = 0;
3690         uint64_t layer;
3691
3692         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3693         if (!ret) {
3694                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3695                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3696                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3697                 if (!(item_flags & layer))
3698                         return rte_flow_error_set(error, EINVAL,
3699                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3700                                                   NULL,
3701                                                   "no ipv6 item in pattern");
3702         }
3703         return ret;
3704 }
3705
3706 /**
3707  * Validate the modify-header TP actions.
3708  *
3709  * @param[in] action_flags
3710  *   Holds the actions detected until now.
3711  * @param[in] action
3712  *   Pointer to the modify action.
3713  * @param[in] item_flags
3714  *   Holds the items detected.
3715  * @param[out] error
3716  *   Pointer to error structure.
3717  *
3718  * @return
3719  *   0 on success, a negative errno value otherwise and rte_errno is set.
3720  */
3721 static int
3722 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3723                                   const struct rte_flow_action *action,
3724                                   const uint64_t item_flags,
3725                                   struct rte_flow_error *error)
3726 {
3727         int ret = 0;
3728         uint64_t layer;
3729
3730         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3731         if (!ret) {
3732                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3733                                  MLX5_FLOW_LAYER_INNER_L4 :
3734                                  MLX5_FLOW_LAYER_OUTER_L4;
3735                 if (!(item_flags & layer))
3736                         return rte_flow_error_set(error, EINVAL,
3737                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3738                                                   NULL, "no transport layer "
3739                                                   "in pattern");
3740         }
3741         return ret;
3742 }
3743
3744 /**
3745  * Validate the modify-header actions of increment/decrement
3746  * TCP Sequence-number.
3747  *
3748  * @param[in] action_flags
3749  *   Holds the actions detected until now.
3750  * @param[in] action
3751  *   Pointer to the modify action.
3752  * @param[in] item_flags
3753  *   Holds the items detected.
3754  * @param[out] error
3755  *   Pointer to error structure.
3756  *
3757  * @return
3758  *   0 on success, a negative errno value otherwise and rte_errno is set.
3759  */
3760 static int
3761 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3762                                        const struct rte_flow_action *action,
3763                                        const uint64_t item_flags,
3764                                        struct rte_flow_error *error)
3765 {
3766         int ret = 0;
3767         uint64_t layer;
3768
3769         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3770         if (!ret) {
3771                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3772                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3773                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3774                 if (!(item_flags & layer))
3775                         return rte_flow_error_set(error, EINVAL,
3776                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3777                                                   NULL, "no TCP item in"
3778                                                   " pattern");
3779                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3780                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3781                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3782                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3783                         return rte_flow_error_set(error, EINVAL,
3784                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3785                                                   NULL,
3786                                                   "cannot decrease and increase"
3787                                                   " TCP sequence number"
3788                                                   " at the same time");
3789         }
3790         return ret;
3791 }
3792
3793 /**
3794  * Validate the modify-header actions of increment/decrement
3795  * TCP Acknowledgment number.
3796  *
3797  * @param[in] action_flags
3798  *   Holds the actions detected until now.
3799  * @param[in] action
3800  *   Pointer to the modify action.
3801  * @param[in] item_flags
3802  *   Holds the items detected.
3803  * @param[out] error
3804  *   Pointer to error structure.
3805  *
3806  * @return
3807  *   0 on success, a negative errno value otherwise and rte_errno is set.
3808  */
3809 static int
3810 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3811                                        const struct rte_flow_action *action,
3812                                        const uint64_t item_flags,
3813                                        struct rte_flow_error *error)
3814 {
3815         int ret = 0;
3816         uint64_t layer;
3817
3818         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3819         if (!ret) {
3820                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3821                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3822                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3823                 if (!(item_flags & layer))
3824                         return rte_flow_error_set(error, EINVAL,
3825                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3826                                                   NULL, "no TCP item in"
3827                                                   " pattern");
3828                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3829                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3830                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3831                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3832                         return rte_flow_error_set(error, EINVAL,
3833                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3834                                                   NULL,
3835                                                   "cannot decrease and increase"
3836                                                   " TCP acknowledgment number"
3837                                                   " at the same time");
3838         }
3839         return ret;
3840 }
3841
3842 /**
3843  * Validate the modify-header TTL actions.
3844  *
3845  * @param[in] action_flags
3846  *   Holds the actions detected until now.
3847  * @param[in] action
3848  *   Pointer to the modify action.
3849  * @param[in] item_flags
3850  *   Holds the items detected.
3851  * @param[out] error
3852  *   Pointer to error structure.
3853  *
3854  * @return
3855  *   0 on success, a negative errno value otherwise and rte_errno is set.
3856  */
3857 static int
3858 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3859                                    const struct rte_flow_action *action,
3860                                    const uint64_t item_flags,
3861                                    struct rte_flow_error *error)
3862 {
3863         int ret = 0;
3864         uint64_t layer;
3865
3866         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3867         if (!ret) {
3868                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3869                                  MLX5_FLOW_LAYER_INNER_L3 :
3870                                  MLX5_FLOW_LAYER_OUTER_L3;
3871                 if (!(item_flags & layer))
3872                         return rte_flow_error_set(error, EINVAL,
3873                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3874                                                   NULL,
3875                                                   "no IP protocol in pattern");
3876         }
3877         return ret;
3878 }
3879
3880 /**
3881  * Validate jump action.
3882  *
3883  * @param[in] action
3884  *   Pointer to the jump action.
3885  * @param[in] action_flags
3886  *   Holds the actions detected until now.
3887  * @param[in] attributes
3888  *   Pointer to flow attributes
3889  * @param[in] external
3890  *   Action belongs to flow rule created by request external to PMD.
3891  * @param[out] error
3892  *   Pointer to error structure.
3893  *
3894  * @return
3895  *   0 on success, a negative errno value otherwise and rte_errno is set.
3896  */
3897 static int
3898 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3899                              const struct mlx5_flow_tunnel *tunnel,
3900                              const struct rte_flow_action *action,
3901                              uint64_t action_flags,
3902                              const struct rte_flow_attr *attributes,
3903                              bool external, struct rte_flow_error *error)
3904 {
3905         uint32_t target_group, table;
3906         int ret = 0;
3907         struct flow_grp_info grp_info = {
3908                 .external = !!external,
3909                 .transfer = !!attributes->transfer,
3910                 .fdb_def_rule = 1,
3911                 .std_tbl_fix = 0
3912         };
3913         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3914                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3915                 return rte_flow_error_set(error, EINVAL,
3916                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3917                                           "can't have 2 fate actions in"
3918                                           " same flow");
3919         if (action_flags & MLX5_FLOW_ACTION_METER)
3920                 return rte_flow_error_set(error, ENOTSUP,
3921                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3922                                           "jump with meter not support");
3923         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3924                 return rte_flow_error_set(error, EINVAL,
3925                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3926                                           "E-Switch mirroring can't support"
3927                                           " Sample action and jump action in"
3928                                           " same flow now");
3929         if (!action->conf)
3930                 return rte_flow_error_set(error, EINVAL,
3931                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3932                                           NULL, "action configuration not set");
3933         target_group =
3934                 ((const struct rte_flow_action_jump *)action->conf)->group;
3935         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3936                                        grp_info, error);
3937         if (ret)
3938                 return ret;
3939         if (attributes->group == target_group &&
3940             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3941                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3942                 return rte_flow_error_set(error, EINVAL,
3943                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3944                                           "target group must be other than"
3945                                           " the current flow group");
3946         return 0;
3947 }
3948
3949 /*
3950  * Validate the port_id action.
3951  *
3952  * @param[in] dev
3953  *   Pointer to rte_eth_dev structure.
3954  * @param[in] action_flags
3955  *   Bit-fields that holds the actions detected until now.
3956  * @param[in] action
3957  *   Port_id RTE action structure.
3958  * @param[in] attr
3959  *   Attributes of flow that includes this action.
3960  * @param[out] error
3961  *   Pointer to error structure.
3962  *
3963  * @return
3964  *   0 on success, a negative errno value otherwise and rte_errno is set.
3965  */
3966 static int
3967 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3968                                 uint64_t action_flags,
3969                                 const struct rte_flow_action *action,
3970                                 const struct rte_flow_attr *attr,
3971                                 struct rte_flow_error *error)
3972 {
3973         const struct rte_flow_action_port_id *port_id;
3974         struct mlx5_priv *act_priv;
3975         struct mlx5_priv *dev_priv;
3976         uint16_t port;
3977
3978         if (!attr->transfer)
3979                 return rte_flow_error_set(error, ENOTSUP,
3980                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3981                                           NULL,
3982                                           "port id action is valid in transfer"
3983                                           " mode only");
3984         if (!action || !action->conf)
3985                 return rte_flow_error_set(error, ENOTSUP,
3986                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3987                                           NULL,
3988                                           "port id action parameters must be"
3989                                           " specified");
3990         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3991                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3992                 return rte_flow_error_set(error, EINVAL,
3993                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3994                                           "can have only one fate actions in"
3995                                           " a flow");
3996         dev_priv = mlx5_dev_to_eswitch_info(dev);
3997         if (!dev_priv)
3998                 return rte_flow_error_set(error, rte_errno,
3999                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4000                                           NULL,
4001                                           "failed to obtain E-Switch info");
4002         port_id = action->conf;
4003         port = port_id->original ? dev->data->port_id : port_id->id;
4004         act_priv = mlx5_port_to_eswitch_info(port, false);
4005         if (!act_priv)
4006                 return rte_flow_error_set
4007                                 (error, rte_errno,
4008                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4009                                  "failed to obtain E-Switch port id for port");
4010         if (act_priv->domain_id != dev_priv->domain_id)
4011                 return rte_flow_error_set
4012                                 (error, EINVAL,
4013                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4014                                  "port does not belong to"
4015                                  " E-Switch being configured");
4016         return 0;
4017 }
4018
4019 /**
4020  * Get the maximum number of modify header actions.
4021  *
4022  * @param dev
4023  *   Pointer to rte_eth_dev structure.
4024  * @param flags
4025  *   Flags bits to check if root level.
4026  *
4027  * @return
4028  *   Max number of modify header actions device can support.
4029  */
4030 static inline unsigned int
4031 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4032                               uint64_t flags)
4033 {
4034         /*
4035          * There's no way to directly query the max capacity from FW.
4036          * The maximal value on root table should be assumed to be supported.
4037          */
4038         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4039                 return MLX5_MAX_MODIFY_NUM;
4040         else
4041                 return MLX5_ROOT_TBL_MODIFY_NUM;
4042 }
4043
4044 /**
4045  * Validate the meter action.
4046  *
4047  * @param[in] dev
4048  *   Pointer to rte_eth_dev structure.
4049  * @param[in] action_flags
4050  *   Bit-fields that holds the actions detected until now.
4051  * @param[in] action
4052  *   Pointer to the meter action.
4053  * @param[in] attr
4054  *   Attributes of flow that includes this action.
4055  * @param[out] error
4056  *   Pointer to error structure.
4057  *
4058  * @return
4059  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4060  */
4061 static int
4062 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4063                                 uint64_t action_flags,
4064                                 const struct rte_flow_action *action,
4065                                 const struct rte_flow_attr *attr,
4066                                 struct rte_flow_error *error)
4067 {
4068         struct mlx5_priv *priv = dev->data->dev_private;
4069         const struct rte_flow_action_meter *am = action->conf;
4070         struct mlx5_flow_meter *fm;
4071
4072         if (!am)
4073                 return rte_flow_error_set(error, EINVAL,
4074                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4075                                           "meter action conf is NULL");
4076
4077         if (action_flags & MLX5_FLOW_ACTION_METER)
4078                 return rte_flow_error_set(error, ENOTSUP,
4079                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4080                                           "meter chaining not support");
4081         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4082                 return rte_flow_error_set(error, ENOTSUP,
4083                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4084                                           "meter with jump not support");
4085         if (!priv->mtr_en)
4086                 return rte_flow_error_set(error, ENOTSUP,
4087                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4088                                           NULL,
4089                                           "meter action not supported");
4090         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4091         if (!fm)
4092                 return rte_flow_error_set(error, EINVAL,
4093                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4094                                           "Meter not found");
4095         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4096               (!fm->ingress && !attr->ingress && attr->egress) ||
4097               (!fm->egress && !attr->egress && attr->ingress))))
4098                 return rte_flow_error_set(error, EINVAL,
4099                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4100                                           "Flow attributes are either invalid "
4101                                           "or have a conflict with current "
4102                                           "meter attributes");
4103         return 0;
4104 }
4105
4106 /**
4107  * Validate the age action.
4108  *
4109  * @param[in] action_flags
4110  *   Holds the actions detected until now.
4111  * @param[in] action
4112  *   Pointer to the age action.
4113  * @param[in] dev
4114  *   Pointer to the Ethernet device structure.
4115  * @param[out] error
4116  *   Pointer to error structure.
4117  *
4118  * @return
4119  *   0 on success, a negative errno value otherwise and rte_errno is set.
4120  */
4121 static int
4122 flow_dv_validate_action_age(uint64_t action_flags,
4123                             const struct rte_flow_action *action,
4124                             struct rte_eth_dev *dev,
4125                             struct rte_flow_error *error)
4126 {
4127         struct mlx5_priv *priv = dev->data->dev_private;
4128         const struct rte_flow_action_age *age = action->conf;
4129
4130         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4131             !priv->sh->aso_age_mng))
4132                 return rte_flow_error_set(error, ENOTSUP,
4133                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4134                                           NULL,
4135                                           "age action not supported");
4136         if (!(action->conf))
4137                 return rte_flow_error_set(error, EINVAL,
4138                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4139                                           "configuration cannot be null");
4140         if (!(age->timeout))
4141                 return rte_flow_error_set(error, EINVAL,
4142                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4143                                           "invalid timeout value 0");
4144         if (action_flags & MLX5_FLOW_ACTION_AGE)
4145                 return rte_flow_error_set(error, EINVAL,
4146                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4147                                           "duplicate age actions set");
4148         return 0;
4149 }
4150
4151 /**
4152  * Validate the modify-header IPv4 DSCP actions.
4153  *
4154  * @param[in] action_flags
4155  *   Holds the actions detected until now.
4156  * @param[in] action
4157  *   Pointer to the modify action.
4158  * @param[in] item_flags
4159  *   Holds the items detected.
4160  * @param[out] error
4161  *   Pointer to error structure.
4162  *
4163  * @return
4164  *   0 on success, a negative errno value otherwise and rte_errno is set.
4165  */
4166 static int
4167 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4168                                          const struct rte_flow_action *action,
4169                                          const uint64_t item_flags,
4170                                          struct rte_flow_error *error)
4171 {
4172         int ret = 0;
4173
4174         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4175         if (!ret) {
4176                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4177                         return rte_flow_error_set(error, EINVAL,
4178                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4179                                                   NULL,
4180                                                   "no ipv4 item in pattern");
4181         }
4182         return ret;
4183 }
4184
4185 /**
4186  * Validate the modify-header IPv6 DSCP actions.
4187  *
4188  * @param[in] action_flags
4189  *   Holds the actions detected until now.
4190  * @param[in] action
4191  *   Pointer to the modify action.
4192  * @param[in] item_flags
4193  *   Holds the items detected.
4194  * @param[out] error
4195  *   Pointer to error structure.
4196  *
4197  * @return
4198  *   0 on success, a negative errno value otherwise and rte_errno is set.
4199  */
4200 static int
4201 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4202                                          const struct rte_flow_action *action,
4203                                          const uint64_t item_flags,
4204                                          struct rte_flow_error *error)
4205 {
4206         int ret = 0;
4207
4208         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4209         if (!ret) {
4210                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4211                         return rte_flow_error_set(error, EINVAL,
4212                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4213                                                   NULL,
4214                                                   "no ipv6 item in pattern");
4215         }
4216         return ret;
4217 }
4218
4219 /**
4220  * Match modify-header resource.
4221  *
4222  * @param list
4223  *   Pointer to the hash list.
4224  * @param entry
4225  *   Pointer to exist resource entry object.
4226  * @param key
4227  *   Key of the new entry.
4228  * @param ctx
4229  *   Pointer to new modify-header resource.
4230  *
4231  * @return
4232  *   0 on matching, non-zero otherwise.
4233  */
4234 int
4235 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4236                         struct mlx5_hlist_entry *entry,
4237                         uint64_t key __rte_unused, void *cb_ctx)
4238 {
4239         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4240         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4241         struct mlx5_flow_dv_modify_hdr_resource *resource =
4242                         container_of(entry, typeof(*resource), entry);
4243         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4244
4245         key_len += ref->actions_num * sizeof(ref->actions[0]);
4246         return ref->actions_num != resource->actions_num ||
4247                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4248 }
4249
4250 struct mlx5_hlist_entry *
4251 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4252                          void *cb_ctx)
4253 {
4254         struct mlx5_dev_ctx_shared *sh = list->ctx;
4255         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4256         struct mlx5dv_dr_domain *ns;
4257         struct mlx5_flow_dv_modify_hdr_resource *entry;
4258         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4259         int ret;
4260         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4261         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4262
4263         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4264                             SOCKET_ID_ANY);
4265         if (!entry) {
4266                 rte_flow_error_set(ctx->error, ENOMEM,
4267                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4268                                    "cannot allocate resource memory");
4269                 return NULL;
4270         }
4271         rte_memcpy(&entry->ft_type,
4272                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4273                    key_len + data_len);
4274         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4275                 ns = sh->fdb_domain;
4276         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4277                 ns = sh->tx_domain;
4278         else
4279                 ns = sh->rx_domain;
4280         ret = mlx5_flow_os_create_flow_action_modify_header
4281                                         (sh->ctx, ns, entry,
4282                                          data_len, &entry->action);
4283         if (ret) {
4284                 mlx5_free(entry);
4285                 rte_flow_error_set(ctx->error, ENOMEM,
4286                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4287                                    NULL, "cannot create modification action");
4288                 return NULL;
4289         }
4290         return &entry->entry;
4291 }
4292
4293 /**
4294  * Validate the sample action.
4295  *
4296  * @param[in] action_flags
4297  *   Holds the actions detected until now.
4298  * @param[in] action
4299  *   Pointer to the sample action.
4300  * @param[in] dev
4301  *   Pointer to the Ethernet device structure.
4302  * @param[in] attr
4303  *   Attributes of flow that includes this action.
4304  * @param[out] error
4305  *   Pointer to error structure.
4306  *
4307  * @return
4308  *   0 on success, a negative errno value otherwise and rte_errno is set.
4309  */
4310 static int
4311 flow_dv_validate_action_sample(uint64_t action_flags,
4312                                const struct rte_flow_action *action,
4313                                struct rte_eth_dev *dev,
4314                                const struct rte_flow_attr *attr,
4315                                struct rte_flow_error *error)
4316 {
4317         struct mlx5_priv *priv = dev->data->dev_private;
4318         struct mlx5_dev_config *dev_conf = &priv->config;
4319         const struct rte_flow_action_sample *sample = action->conf;
4320         const struct rte_flow_action *act;
4321         uint64_t sub_action_flags = 0;
4322         uint16_t queue_index = 0xFFFF;
4323         int actions_n = 0;
4324         int ret;
4325         fdb_mirror = 0;
4326
4327         if (!sample)
4328                 return rte_flow_error_set(error, EINVAL,
4329                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4330                                           "configuration cannot be NULL");
4331         if (sample->ratio == 0)
4332                 return rte_flow_error_set(error, EINVAL,
4333                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4334                                           "ratio value starts from 1");
4335         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4336                 return rte_flow_error_set(error, ENOTSUP,
4337                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4338                                           NULL,
4339                                           "sample action not supported");
4340         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4341                 return rte_flow_error_set(error, EINVAL,
4342                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4343                                           "Multiple sample actions not "
4344                                           "supported");
4345         if (action_flags & MLX5_FLOW_ACTION_METER)
4346                 return rte_flow_error_set(error, EINVAL,
4347                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4348                                           "wrong action order, meter should "
4349                                           "be after sample action");
4350         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4351                 return rte_flow_error_set(error, EINVAL,
4352                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4353                                           "wrong action order, jump should "
4354                                           "be after sample action");
4355         act = sample->actions;
4356         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4357                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4358                         return rte_flow_error_set(error, ENOTSUP,
4359                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4360                                                   act, "too many actions");
4361                 switch (act->type) {
4362                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4363                         ret = mlx5_flow_validate_action_queue(act,
4364                                                               sub_action_flags,
4365                                                               dev,
4366                                                               attr, error);
4367                         if (ret < 0)
4368                                 return ret;
4369                         queue_index = ((const struct rte_flow_action_queue *)
4370                                                         (act->conf))->index;
4371                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4372                         ++actions_n;
4373                         break;
4374                 case RTE_FLOW_ACTION_TYPE_MARK:
4375                         ret = flow_dv_validate_action_mark(dev, act,
4376                                                            sub_action_flags,
4377                                                            attr, error);
4378                         if (ret < 0)
4379                                 return ret;
4380                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4381                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4382                                                 MLX5_FLOW_ACTION_MARK_EXT;
4383                         else
4384                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4385                         ++actions_n;
4386                         break;
4387                 case RTE_FLOW_ACTION_TYPE_COUNT:
4388                         ret = flow_dv_validate_action_count(dev, error);
4389                         if (ret < 0)
4390                                 return ret;
4391                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4392                         ++actions_n;
4393                         break;
4394                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4395                         ret = flow_dv_validate_action_port_id(dev,
4396                                                               sub_action_flags,
4397                                                               act,
4398                                                               attr,
4399                                                               error);
4400                         if (ret)
4401                                 return ret;
4402                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4403                         ++actions_n;
4404                         break;
4405                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4406                         ret = flow_dv_validate_action_raw_encap_decap
4407                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4408                                  &actions_n, error);
4409                         if (ret < 0)
4410                                 return ret;
4411                         ++actions_n;
4412                         break;
4413                 default:
4414                         return rte_flow_error_set(error, ENOTSUP,
4415                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4416                                                   NULL,
4417                                                   "Doesn't support optional "
4418                                                   "action");
4419                 }
4420         }
4421         if (attr->ingress && !attr->transfer) {
4422                 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4423                         return rte_flow_error_set(error, EINVAL,
4424                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4425                                                   NULL,
4426                                                   "Ingress must has a dest "
4427                                                   "QUEUE for Sample");
4428         } else if (attr->egress && !attr->transfer) {
4429                 return rte_flow_error_set(error, ENOTSUP,
4430                                           RTE_FLOW_ERROR_TYPE_ACTION,
4431                                           NULL,
4432                                           "Sample Only support Ingress "
4433                                           "or E-Switch");
4434         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4435                 MLX5_ASSERT(attr->transfer);
4436                 if (sample->ratio > 1)
4437                         return rte_flow_error_set(error, ENOTSUP,
4438                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4439                                                   NULL,
4440                                                   "E-Switch doesn't support "
4441                                                   "any optional action "
4442                                                   "for sampling");
4443                 fdb_mirror = 1;
4444                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4445                         return rte_flow_error_set(error, ENOTSUP,
4446                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4447                                                   NULL,
4448                                                   "unsupported action QUEUE");
4449                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4450                         return rte_flow_error_set(error, EINVAL,
4451                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4452                                                   NULL,
4453                                                   "E-Switch must has a dest "
4454                                                   "port for mirroring");
4455         }
4456         /* Continue validation for Xcap actions.*/
4457         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4458             (queue_index == 0xFFFF ||
4459              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4460                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4461                      MLX5_FLOW_XCAP_ACTIONS)
4462                         return rte_flow_error_set(error, ENOTSUP,
4463                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4464                                                   NULL, "encap and decap "
4465                                                   "combination aren't "
4466                                                   "supported");
4467                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4468                                                         MLX5_FLOW_ACTION_ENCAP))
4469                         return rte_flow_error_set(error, ENOTSUP,
4470                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4471                                                   NULL, "encap is not supported"
4472                                                   " for ingress traffic");
4473         }
4474         return 0;
4475 }
4476
4477 /**
4478  * Find existing modify-header resource or create and register a new one.
4479  *
4480  * @param dev[in, out]
4481  *   Pointer to rte_eth_dev structure.
4482  * @param[in, out] resource
4483  *   Pointer to modify-header resource.
4484  * @parm[in, out] dev_flow
4485  *   Pointer to the dev_flow.
4486  * @param[out] error
4487  *   pointer to error structure.
4488  *
4489  * @return
4490  *   0 on success otherwise -errno and errno is set.
4491  */
4492 static int
4493 flow_dv_modify_hdr_resource_register
4494                         (struct rte_eth_dev *dev,
4495                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4496                          struct mlx5_flow *dev_flow,
4497                          struct rte_flow_error *error)
4498 {
4499         struct mlx5_priv *priv = dev->data->dev_private;
4500         struct mlx5_dev_ctx_shared *sh = priv->sh;
4501         uint32_t key_len = sizeof(*resource) -
4502                            offsetof(typeof(*resource), ft_type) +
4503                            resource->actions_num * sizeof(resource->actions[0]);
4504         struct mlx5_hlist_entry *entry;
4505         struct mlx5_flow_cb_ctx ctx = {
4506                 .error = error,
4507                 .data = resource,
4508         };
4509
4510         resource->flags = dev_flow->dv.group ? 0 :
4511                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4512         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4513                                     resource->flags))
4514                 return rte_flow_error_set(error, EOVERFLOW,
4515                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4516                                           "too many modify header items");
4517         resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4518         entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
4519         if (!entry)
4520                 return -rte_errno;
4521         resource = container_of(entry, typeof(*resource), entry);
4522         dev_flow->handle->dvh.modify_hdr = resource;
4523         return 0;
4524 }
4525
4526 /**
4527  * Get DV flow counter by index.
4528  *
4529  * @param[in] dev
4530  *   Pointer to the Ethernet device structure.
4531  * @param[in] idx
4532  *   mlx5 flow counter index in the container.
4533  * @param[out] ppool
4534  *   mlx5 flow counter pool in the container,
4535  *
4536  * @return
4537  *   Pointer to the counter, NULL otherwise.
4538  */
4539 static struct mlx5_flow_counter *
4540 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4541                            uint32_t idx,
4542                            struct mlx5_flow_counter_pool **ppool)
4543 {
4544         struct mlx5_priv *priv = dev->data->dev_private;
4545         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4546         struct mlx5_flow_counter_pool *pool;
4547
4548         /* Decrease to original index and clear shared bit. */
4549         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4550         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4551         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4552         MLX5_ASSERT(pool);
4553         if (ppool)
4554                 *ppool = pool;
4555         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4556 }
4557
4558 /**
4559  * Check the devx counter belongs to the pool.
4560  *
4561  * @param[in] pool
4562  *   Pointer to the counter pool.
4563  * @param[in] id
4564  *   The counter devx ID.
4565  *
4566  * @return
4567  *   True if counter belongs to the pool, false otherwise.
4568  */
4569 static bool
4570 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4571 {
4572         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4573                    MLX5_COUNTERS_PER_POOL;
4574
4575         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4576                 return true;
4577         return false;
4578 }
4579
4580 /**
4581  * Get a pool by devx counter ID.
4582  *
4583  * @param[in] cmng
4584  *   Pointer to the counter management.
4585  * @param[in] id
4586  *   The counter devx ID.
4587  *
4588  * @return
4589  *   The counter pool pointer if exists, NULL otherwise,
4590  */
4591 static struct mlx5_flow_counter_pool *
4592 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4593 {
4594         uint32_t i;
4595         struct mlx5_flow_counter_pool *pool = NULL;
4596
4597         rte_spinlock_lock(&cmng->pool_update_sl);
4598         /* Check last used pool. */
4599         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4600             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4601                 pool = cmng->pools[cmng->last_pool_idx];
4602                 goto out;
4603         }
4604         /* ID out of range means no suitable pool in the container. */
4605         if (id > cmng->max_id || id < cmng->min_id)
4606                 goto out;
4607         /*
4608          * Find the pool from the end of the container, since mostly counter
4609          * ID is sequence increasing, and the last pool should be the needed
4610          * one.
4611          */
4612         i = cmng->n_valid;
4613         while (i--) {
4614                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4615
4616                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4617                         pool = pool_tmp;
4618                         break;
4619                 }
4620         }
4621 out:
4622         rte_spinlock_unlock(&cmng->pool_update_sl);
4623         return pool;
4624 }
4625
4626 /**
4627  * Resize a counter container.
4628  *
4629  * @param[in] dev
4630  *   Pointer to the Ethernet device structure.
4631  *
4632  * @return
4633  *   0 on success, otherwise negative errno value and rte_errno is set.
4634  */
4635 static int
4636 flow_dv_container_resize(struct rte_eth_dev *dev)
4637 {
4638         struct mlx5_priv *priv = dev->data->dev_private;
4639         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4640         void *old_pools = cmng->pools;
4641         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4642         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4643         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4644
4645         if (!pools) {
4646                 rte_errno = ENOMEM;
4647                 return -ENOMEM;
4648         }
4649         if (old_pools)
4650                 memcpy(pools, old_pools, cmng->n *
4651                                        sizeof(struct mlx5_flow_counter_pool *));
4652         cmng->n = resize;
4653         cmng->pools = pools;
4654         if (old_pools)
4655                 mlx5_free(old_pools);
4656         return 0;
4657 }
4658
4659 /**
4660  * Query a devx flow counter.
4661  *
4662  * @param[in] dev
4663  *   Pointer to the Ethernet device structure.
4664  * @param[in] cnt
4665  *   Index to the flow counter.
4666  * @param[out] pkts
4667  *   The statistics value of packets.
4668  * @param[out] bytes
4669  *   The statistics value of bytes.
4670  *
4671  * @return
4672  *   0 on success, otherwise a negative errno value and rte_errno is set.
4673  */
4674 static inline int
4675 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4676                      uint64_t *bytes)
4677 {
4678         struct mlx5_priv *priv = dev->data->dev_private;
4679         struct mlx5_flow_counter_pool *pool = NULL;
4680         struct mlx5_flow_counter *cnt;
4681         int offset;
4682
4683         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4684         MLX5_ASSERT(pool);
4685         if (priv->sh->cmng.counter_fallback)
4686                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4687                                         0, pkts, bytes, 0, NULL, NULL, 0);
4688         rte_spinlock_lock(&pool->sl);
4689         if (!pool->raw) {
4690                 *pkts = 0;
4691                 *bytes = 0;
4692         } else {
4693                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4694                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4695                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4696         }
4697         rte_spinlock_unlock(&pool->sl);
4698         return 0;
4699 }
4700
4701 /**
4702  * Create and initialize a new counter pool.
4703  *
4704  * @param[in] dev
4705  *   Pointer to the Ethernet device structure.
4706  * @param[out] dcs
4707  *   The devX counter handle.
4708  * @param[in] age
4709  *   Whether the pool is for counter that was allocated for aging.
4710  * @param[in/out] cont_cur
4711  *   Pointer to the container pointer, it will be update in pool resize.
4712  *
4713  * @return
4714  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4715  */
4716 static struct mlx5_flow_counter_pool *
4717 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4718                     uint32_t age)
4719 {
4720         struct mlx5_priv *priv = dev->data->dev_private;
4721         struct mlx5_flow_counter_pool *pool;
4722         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4723         bool fallback = priv->sh->cmng.counter_fallback;
4724         uint32_t size = sizeof(*pool);
4725
4726         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4727         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4728         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4729         if (!pool) {
4730                 rte_errno = ENOMEM;
4731                 return NULL;
4732         }
4733         pool->raw = NULL;
4734         pool->is_aged = !!age;
4735         pool->query_gen = 0;
4736         pool->min_dcs = dcs;
4737         rte_spinlock_init(&pool->sl);
4738         rte_spinlock_init(&pool->csl);
4739         TAILQ_INIT(&pool->counters[0]);
4740         TAILQ_INIT(&pool->counters[1]);
4741         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4742         rte_spinlock_lock(&cmng->pool_update_sl);
4743         pool->index = cmng->n_valid;
4744         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4745                 mlx5_free(pool);
4746                 rte_spinlock_unlock(&cmng->pool_update_sl);
4747                 return NULL;
4748         }
4749         cmng->pools[pool->index] = pool;
4750         cmng->n_valid++;
4751         if (unlikely(fallback)) {
4752                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4753
4754                 if (base < cmng->min_id)
4755                         cmng->min_id = base;
4756                 if (base > cmng->max_id)
4757                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4758                 cmng->last_pool_idx = pool->index;
4759         }
4760         rte_spinlock_unlock(&cmng->pool_update_sl);
4761         return pool;
4762 }
4763
4764 /**
4765  * Prepare a new counter and/or a new counter pool.
4766  *
4767  * @param[in] dev
4768  *   Pointer to the Ethernet device structure.
4769  * @param[out] cnt_free
4770  *   Where to put the pointer of a new counter.
4771  * @param[in] age
4772  *   Whether the pool is for counter that was allocated for aging.
4773  *
4774  * @return
4775  *   The counter pool pointer and @p cnt_free is set on success,
4776  *   NULL otherwise and rte_errno is set.
4777  */
4778 static struct mlx5_flow_counter_pool *
4779 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4780                              struct mlx5_flow_counter **cnt_free,
4781                              uint32_t age)
4782 {
4783         struct mlx5_priv *priv = dev->data->dev_private;
4784         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4785         struct mlx5_flow_counter_pool *pool;
4786         struct mlx5_counters tmp_tq;
4787         struct mlx5_devx_obj *dcs = NULL;
4788         struct mlx5_flow_counter *cnt;
4789         enum mlx5_counter_type cnt_type =
4790                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4791         bool fallback = priv->sh->cmng.counter_fallback;
4792         uint32_t i;
4793
4794         if (fallback) {
4795                 /* bulk_bitmap must be 0 for single counter allocation. */
4796                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4797                 if (!dcs)
4798                         return NULL;
4799                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4800                 if (!pool) {
4801                         pool = flow_dv_pool_create(dev, dcs, age);
4802                         if (!pool) {
4803                                 mlx5_devx_cmd_destroy(dcs);
4804                                 return NULL;
4805                         }
4806                 }
4807                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4808                 cnt = MLX5_POOL_GET_CNT(pool, i);
4809                 cnt->pool = pool;
4810                 cnt->dcs_when_free = dcs;
4811                 *cnt_free = cnt;
4812                 return pool;
4813         }
4814         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4815         if (!dcs) {
4816                 rte_errno = ENODATA;
4817                 return NULL;
4818         }
4819         pool = flow_dv_pool_create(dev, dcs, age);
4820         if (!pool) {
4821                 mlx5_devx_cmd_destroy(dcs);
4822                 return NULL;
4823         }
4824         TAILQ_INIT(&tmp_tq);
4825         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4826                 cnt = MLX5_POOL_GET_CNT(pool, i);
4827                 cnt->pool = pool;
4828                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4829         }
4830         rte_spinlock_lock(&cmng->csl[cnt_type]);
4831         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4832         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4833         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4834         (*cnt_free)->pool = pool;
4835         return pool;
4836 }
4837
4838 /**
4839  * Allocate a flow counter.
4840  *
4841  * @param[in] dev
4842  *   Pointer to the Ethernet device structure.
4843  * @param[in] age
4844  *   Whether the counter was allocated for aging.
4845  *
4846  * @return
4847  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4848  */
4849 static uint32_t
4850 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4851 {
4852         struct mlx5_priv *priv = dev->data->dev_private;
4853         struct mlx5_flow_counter_pool *pool = NULL;
4854         struct mlx5_flow_counter *cnt_free = NULL;
4855         bool fallback = priv->sh->cmng.counter_fallback;
4856         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4857         enum mlx5_counter_type cnt_type =
4858                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4859         uint32_t cnt_idx;
4860
4861         if (!priv->config.devx) {
4862                 rte_errno = ENOTSUP;
4863                 return 0;
4864         }
4865         /* Get free counters from container. */
4866         rte_spinlock_lock(&cmng->csl[cnt_type]);
4867         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4868         if (cnt_free)
4869                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4870         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4871         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4872                 goto err;
4873         pool = cnt_free->pool;
4874         if (fallback)
4875                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4876         /* Create a DV counter action only in the first time usage. */
4877         if (!cnt_free->action) {
4878                 uint16_t offset;
4879                 struct mlx5_devx_obj *dcs;
4880                 int ret;
4881
4882                 if (!fallback) {
4883                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4884                         dcs = pool->min_dcs;
4885                 } else {
4886                         offset = 0;
4887                         dcs = cnt_free->dcs_when_free;
4888                 }
4889                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4890                                                             &cnt_free->action);
4891                 if (ret) {
4892                         rte_errno = errno;
4893                         goto err;
4894                 }
4895         }
4896         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4897                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4898         /* Update the counter reset values. */
4899         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4900                                  &cnt_free->bytes))
4901                 goto err;
4902         if (!fallback && !priv->sh->cmng.query_thread_on)
4903                 /* Start the asynchronous batch query by the host thread. */
4904                 mlx5_set_query_alarm(priv->sh);
4905         return cnt_idx;
4906 err:
4907         if (cnt_free) {
4908                 cnt_free->pool = pool;
4909                 if (fallback)
4910                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4911                 rte_spinlock_lock(&cmng->csl[cnt_type]);
4912                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4913                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4914         }
4915         return 0;
4916 }
4917
4918 /**
4919  * Allocate a shared flow counter.
4920  *
4921  * @param[in] ctx
4922  *   Pointer to the shared counter configuration.
4923  * @param[in] data
4924  *   Pointer to save the allocated counter index.
4925  *
4926  * @return
4927  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4928  */
4929
4930 static int32_t
4931 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4932 {
4933         struct mlx5_shared_counter_conf *conf = ctx;
4934         struct rte_eth_dev *dev = conf->dev;
4935         struct mlx5_flow_counter *cnt;
4936
4937         data->dword = flow_dv_counter_alloc(dev, 0);
4938         data->dword |= MLX5_CNT_SHARED_OFFSET;
4939         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4940         cnt->shared_info.id = conf->id;
4941         return 0;
4942 }
4943
4944 /**
4945  * Get a shared flow counter.
4946  *
4947  * @param[in] dev
4948  *   Pointer to the Ethernet device structure.
4949  * @param[in] id
4950  *   Counter identifier.
4951  *
4952  * @return
4953  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4954  */
4955 static uint32_t
4956 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4957 {
4958         struct mlx5_priv *priv = dev->data->dev_private;
4959         struct mlx5_shared_counter_conf conf = {
4960                 .dev = dev,
4961                 .id = id,
4962         };
4963         union mlx5_l3t_data data = {
4964                 .dword = 0,
4965         };
4966
4967         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
4968                                flow_dv_counter_alloc_shared_cb, &conf);
4969         return data.dword;
4970 }
4971
4972 /**
4973  * Get age param from counter index.
4974  *
4975  * @param[in] dev
4976  *   Pointer to the Ethernet device structure.
4977  * @param[in] counter
4978  *   Index to the counter handler.
4979  *
4980  * @return
4981  *   The aging parameter specified for the counter index.
4982  */
4983 static struct mlx5_age_param*
4984 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
4985                                 uint32_t counter)
4986 {
4987         struct mlx5_flow_counter *cnt;
4988         struct mlx5_flow_counter_pool *pool = NULL;
4989
4990         flow_dv_counter_get_by_idx(dev, counter, &pool);
4991         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
4992         cnt = MLX5_POOL_GET_CNT(pool, counter);
4993         return MLX5_CNT_TO_AGE(cnt);
4994 }
4995
4996 /**
4997  * Remove a flow counter from aged counter list.
4998  *
4999  * @param[in] dev
5000  *   Pointer to the Ethernet device structure.
5001  * @param[in] counter
5002  *   Index to the counter handler.
5003  * @param[in] cnt
5004  *   Pointer to the counter handler.
5005  */
5006 static void
5007 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5008                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5009 {
5010         struct mlx5_age_info *age_info;
5011         struct mlx5_age_param *age_param;
5012         struct mlx5_priv *priv = dev->data->dev_private;
5013         uint16_t expected = AGE_CANDIDATE;
5014
5015         age_info = GET_PORT_AGE_INFO(priv);
5016         age_param = flow_dv_counter_idx_get_age(dev, counter);
5017         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5018                                          AGE_FREE, false, __ATOMIC_RELAXED,
5019                                          __ATOMIC_RELAXED)) {
5020                 /**
5021                  * We need the lock even it is age timeout,
5022                  * since counter may still in process.
5023                  */
5024                 rte_spinlock_lock(&age_info->aged_sl);
5025                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5026                 rte_spinlock_unlock(&age_info->aged_sl);
5027                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5028         }
5029 }
5030
5031 /**
5032  * Release a flow counter.
5033  *
5034  * @param[in] dev
5035  *   Pointer to the Ethernet device structure.
5036  * @param[in] counter
5037  *   Index to the counter handler.
5038  */
5039 static void
5040 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5041 {
5042         struct mlx5_priv *priv = dev->data->dev_private;
5043         struct mlx5_flow_counter_pool *pool = NULL;
5044         struct mlx5_flow_counter *cnt;
5045         enum mlx5_counter_type cnt_type;
5046
5047         if (!counter)
5048                 return;
5049         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5050         MLX5_ASSERT(pool);
5051         if (IS_SHARED_CNT(counter) &&
5052             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5053                 return;
5054         if (pool->is_aged)
5055                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5056         cnt->pool = pool;
5057         /*
5058          * Put the counter back to list to be updated in none fallback mode.
5059          * Currently, we are using two list alternately, while one is in query,
5060          * add the freed counter to the other list based on the pool query_gen
5061          * value. After query finishes, add counter the list to the global
5062          * container counter list. The list changes while query starts. In
5063          * this case, lock will not be needed as query callback and release
5064          * function both operate with the different list.
5065          *
5066          */
5067         if (!priv->sh->cmng.counter_fallback) {
5068                 rte_spinlock_lock(&pool->csl);
5069                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5070                 rte_spinlock_unlock(&pool->csl);
5071         } else {
5072                 cnt->dcs_when_free = cnt->dcs_when_active;
5073                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5074                                            MLX5_COUNTER_TYPE_ORIGIN;
5075                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5076                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5077                                   cnt, next);
5078                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5079         }
5080 }
5081
5082 /**
5083  * Verify the @p attributes will be correctly understood by the NIC and store
5084  * them in the @p flow if everything is correct.
5085  *
5086  * @param[in] dev
5087  *   Pointer to dev struct.
5088  * @param[in] attributes
5089  *   Pointer to flow attributes
5090  * @param[in] external
5091  *   This flow rule is created by request external to PMD.
5092  * @param[out] error
5093  *   Pointer to error structure.
5094  *
5095  * @return
5096  *   - 0 on success and non root table.
5097  *   - 1 on success and root table.
5098  *   - a negative errno value otherwise and rte_errno is set.
5099  */
5100 static int
5101 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5102                             const struct mlx5_flow_tunnel *tunnel,
5103                             const struct rte_flow_attr *attributes,
5104                             struct flow_grp_info grp_info,
5105                             struct rte_flow_error *error)
5106 {
5107         struct mlx5_priv *priv = dev->data->dev_private;
5108         uint32_t priority_max = priv->config.flow_prio - 1;
5109         int ret = 0;
5110
5111 #ifndef HAVE_MLX5DV_DR
5112         RTE_SET_USED(tunnel);
5113         RTE_SET_USED(grp_info);
5114         if (attributes->group)
5115                 return rte_flow_error_set(error, ENOTSUP,
5116                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5117                                           NULL,
5118                                           "groups are not supported");
5119 #else
5120         uint32_t table = 0;
5121
5122         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5123                                        grp_info, error);
5124         if (ret)
5125                 return ret;
5126         if (!table)
5127                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5128 #endif
5129         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5130             attributes->priority >= priority_max)
5131                 return rte_flow_error_set(error, ENOTSUP,
5132                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5133                                           NULL,
5134                                           "priority out of range");
5135         if (attributes->transfer) {
5136                 if (!priv->config.dv_esw_en)
5137                         return rte_flow_error_set
5138                                 (error, ENOTSUP,
5139                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5140                                  "E-Switch dr is not supported");
5141                 if (!(priv->representor || priv->master))
5142                         return rte_flow_error_set
5143                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5144                                  NULL, "E-Switch configuration can only be"
5145                                  " done by a master or a representor device");
5146                 if (attributes->egress)
5147                         return rte_flow_error_set
5148                                 (error, ENOTSUP,
5149                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5150                                  "egress is not supported");
5151         }
5152         if (!(attributes->egress ^ attributes->ingress))
5153                 return rte_flow_error_set(error, ENOTSUP,
5154                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5155                                           "must specify exactly one of "
5156                                           "ingress or egress");
5157         return ret;
5158 }
5159
5160 /**
5161  * Internal validation function. For validating both actions and items.
5162  *
5163  * @param[in] dev
5164  *   Pointer to the rte_eth_dev structure.
5165  * @param[in] attr
5166  *   Pointer to the flow attributes.
5167  * @param[in] items
5168  *   Pointer to the list of items.
5169  * @param[in] actions
5170  *   Pointer to the list of actions.
5171  * @param[in] external
5172  *   This flow rule is created by request external to PMD.
5173  * @param[in] hairpin
5174  *   Number of hairpin TX actions, 0 means classic flow.
5175  * @param[out] error
5176  *   Pointer to the error structure.
5177  *
5178  * @return
5179  *   0 on success, a negative errno value otherwise and rte_errno is set.
5180  */
5181 static int
5182 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5183                  const struct rte_flow_item items[],
5184                  const struct rte_flow_action actions[],
5185                  bool external, int hairpin, struct rte_flow_error *error)
5186 {
5187         int ret;
5188         uint64_t action_flags = 0;
5189         uint64_t item_flags = 0;
5190         uint64_t last_item = 0;
5191         uint8_t next_protocol = 0xff;
5192         uint16_t ether_type = 0;
5193         int actions_n = 0;
5194         uint8_t item_ipv6_proto = 0;
5195         const struct rte_flow_item *gre_item = NULL;
5196         const struct rte_flow_action_raw_decap *decap;
5197         const struct rte_flow_action_raw_encap *encap;
5198         const struct rte_flow_action_rss *rss;
5199         const struct rte_flow_item_tcp nic_tcp_mask = {
5200                 .hdr = {
5201                         .tcp_flags = 0xFF,
5202                         .src_port = RTE_BE16(UINT16_MAX),
5203                         .dst_port = RTE_BE16(UINT16_MAX),
5204                 }
5205         };
5206         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5207                 .hdr = {
5208                         .src_addr =
5209                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5210                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5211                         .dst_addr =
5212                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5213                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5214                         .vtc_flow = RTE_BE32(0xffffffff),
5215                         .proto = 0xff,
5216                         .hop_limits = 0xff,
5217                 },
5218                 .has_frag_ext = 1,
5219         };
5220         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5221                 .hdr = {
5222                         .common = {
5223                                 .u32 =
5224                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5225                                         .type = 0xFF,
5226                                         }).u32),
5227                         },
5228                         .dummy[0] = 0xffffffff,
5229                 },
5230         };
5231         struct mlx5_priv *priv = dev->data->dev_private;
5232         struct mlx5_dev_config *dev_conf = &priv->config;
5233         uint16_t queue_index = 0xFFFF;
5234         const struct rte_flow_item_vlan *vlan_m = NULL;
5235         int16_t rw_act_num = 0;
5236         uint64_t is_root;
5237         const struct mlx5_flow_tunnel *tunnel;
5238         struct flow_grp_info grp_info = {
5239                 .external = !!external,
5240                 .transfer = !!attr->transfer,
5241                 .fdb_def_rule = !!priv->fdb_def_rule,
5242         };
5243         const struct rte_eth_hairpin_conf *conf;
5244
5245         if (items == NULL)
5246                 return -1;
5247         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5248                 tunnel = flow_items_to_tunnel(items);
5249                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5250                                 MLX5_FLOW_ACTION_DECAP;
5251         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5252                 tunnel = flow_actions_to_tunnel(actions);
5253                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5254         } else {
5255                 tunnel = NULL;
5256         }
5257         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5258                                 (dev, tunnel, attr, items, actions);
5259         ret = flow_dv_validate_attributes(dev, tunnel, attr, grp_info, error);
5260         if (ret < 0)
5261                 return ret;
5262         is_root = (uint64_t)ret;
5263         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5264                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5265                 int type = items->type;
5266
5267                 if (!mlx5_flow_os_item_supported(type))
5268                         return rte_flow_error_set(error, ENOTSUP,
5269                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5270                                                   NULL, "item not supported");
5271                 switch (type) {
5272                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5273                         if (items[0].type != (typeof(items[0].type))
5274                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5275                                 return rte_flow_error_set
5276                                                 (error, EINVAL,
5277                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5278                                                 NULL, "MLX5 private items "
5279                                                 "must be the first");
5280                         break;
5281                 case RTE_FLOW_ITEM_TYPE_VOID:
5282                         break;
5283                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5284                         ret = flow_dv_validate_item_port_id
5285                                         (dev, items, attr, item_flags, error);
5286                         if (ret < 0)
5287                                 return ret;
5288                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5289                         break;
5290                 case RTE_FLOW_ITEM_TYPE_ETH:
5291                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5292                                                           true, error);
5293                         if (ret < 0)
5294                                 return ret;
5295                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5296                                              MLX5_FLOW_LAYER_OUTER_L2;
5297                         if (items->mask != NULL && items->spec != NULL) {
5298                                 ether_type =
5299                                         ((const struct rte_flow_item_eth *)
5300                                          items->spec)->type;
5301                                 ether_type &=
5302                                         ((const struct rte_flow_item_eth *)
5303                                          items->mask)->type;
5304                                 ether_type = rte_be_to_cpu_16(ether_type);
5305                         } else {
5306                                 ether_type = 0;
5307                         }
5308                         break;
5309                 case RTE_FLOW_ITEM_TYPE_VLAN:
5310                         ret = flow_dv_validate_item_vlan(items, item_flags,
5311                                                          dev, error);
5312                         if (ret < 0)
5313                                 return ret;
5314                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5315                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5316                         if (items->mask != NULL && items->spec != NULL) {
5317                                 ether_type =
5318                                         ((const struct rte_flow_item_vlan *)
5319                                          items->spec)->inner_type;
5320                                 ether_type &=
5321                                         ((const struct rte_flow_item_vlan *)
5322                                          items->mask)->inner_type;
5323                                 ether_type = rte_be_to_cpu_16(ether_type);
5324                         } else {
5325                                 ether_type = 0;
5326                         }
5327                         /* Store outer VLAN mask for of_push_vlan action. */
5328                         if (!tunnel)
5329                                 vlan_m = items->mask;
5330                         break;
5331                 case RTE_FLOW_ITEM_TYPE_IPV4:
5332                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5333                                                   &item_flags, &tunnel);
5334                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5335                                                          last_item, ether_type,
5336                                                          error);
5337                         if (ret < 0)
5338                                 return ret;
5339                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5340                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5341                         if (items->mask != NULL &&
5342                             ((const struct rte_flow_item_ipv4 *)
5343                              items->mask)->hdr.next_proto_id) {
5344                                 next_protocol =
5345                                         ((const struct rte_flow_item_ipv4 *)
5346                                          (items->spec))->hdr.next_proto_id;
5347                                 next_protocol &=
5348                                         ((const struct rte_flow_item_ipv4 *)
5349                                          (items->mask))->hdr.next_proto_id;
5350                         } else {
5351                                 /* Reset for inner layer. */
5352                                 next_protocol = 0xff;
5353                         }
5354                         break;
5355                 case RTE_FLOW_ITEM_TYPE_IPV6:
5356                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5357                                                   &item_flags, &tunnel);
5358                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5359                                                            last_item,
5360                                                            ether_type,
5361                                                            &nic_ipv6_mask,
5362                                                            error);
5363                         if (ret < 0)
5364                                 return ret;
5365                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5366                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5367                         if (items->mask != NULL &&
5368                             ((const struct rte_flow_item_ipv6 *)
5369                              items->mask)->hdr.proto) {
5370                                 item_ipv6_proto =
5371                                         ((const struct rte_flow_item_ipv6 *)
5372                                          items->spec)->hdr.proto;
5373                                 next_protocol =
5374                                         ((const struct rte_flow_item_ipv6 *)
5375                                          items->spec)->hdr.proto;
5376                                 next_protocol &=
5377                                         ((const struct rte_flow_item_ipv6 *)
5378                                          items->mask)->hdr.proto;
5379                         } else {
5380                                 /* Reset for inner layer. */
5381                                 next_protocol = 0xff;
5382                         }
5383                         break;
5384                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5385                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5386                                                                   item_flags,
5387                                                                   error);
5388                         if (ret < 0)
5389                                 return ret;
5390                         last_item = tunnel ?
5391                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5392                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5393                         if (items->mask != NULL &&
5394                             ((const struct rte_flow_item_ipv6_frag_ext *)
5395                              items->mask)->hdr.next_header) {
5396                                 next_protocol =
5397                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5398                                  items->spec)->hdr.next_header;
5399                                 next_protocol &=
5400                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5401                                  items->mask)->hdr.next_header;
5402                         } else {
5403                                 /* Reset for inner layer. */
5404                                 next_protocol = 0xff;
5405                         }
5406                         break;
5407                 case RTE_FLOW_ITEM_TYPE_TCP:
5408                         ret = mlx5_flow_validate_item_tcp
5409                                                 (items, item_flags,
5410                                                  next_protocol,
5411                                                  &nic_tcp_mask,
5412                                                  error);
5413                         if (ret < 0)
5414                                 return ret;
5415                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5416                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5417                         break;
5418                 case RTE_FLOW_ITEM_TYPE_UDP:
5419                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5420                                                           next_protocol,
5421                                                           error);
5422                         if (ret < 0)
5423                                 return ret;
5424                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5425                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5426                         break;
5427                 case RTE_FLOW_ITEM_TYPE_GRE:
5428                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5429                                                           next_protocol, error);
5430                         if (ret < 0)
5431                                 return ret;
5432                         gre_item = items;
5433                         last_item = MLX5_FLOW_LAYER_GRE;
5434                         break;
5435                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5436                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5437                                                             next_protocol,
5438                                                             error);
5439                         if (ret < 0)
5440                                 return ret;
5441                         last_item = MLX5_FLOW_LAYER_NVGRE;
5442                         break;
5443                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5444                         ret = mlx5_flow_validate_item_gre_key
5445                                 (items, item_flags, gre_item, error);
5446                         if (ret < 0)
5447                                 return ret;
5448                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5449                         break;
5450                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5451                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5452                                                             error);
5453                         if (ret < 0)
5454                                 return ret;
5455                         last_item = MLX5_FLOW_LAYER_VXLAN;
5456                         break;
5457                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5458                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5459                                                                 item_flags, dev,
5460                                                                 error);
5461                         if (ret < 0)
5462                                 return ret;
5463                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5464                         break;
5465                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5466                         ret = mlx5_flow_validate_item_geneve(items,
5467                                                              item_flags, dev,
5468                                                              error);
5469                         if (ret < 0)
5470                                 return ret;
5471                         last_item = MLX5_FLOW_LAYER_GENEVE;
5472                         break;
5473                 case RTE_FLOW_ITEM_TYPE_MPLS:
5474                         ret = mlx5_flow_validate_item_mpls(dev, items,
5475                                                            item_flags,
5476                                                            last_item, error);
5477                         if (ret < 0)
5478                                 return ret;
5479                         last_item = MLX5_FLOW_LAYER_MPLS;
5480                         break;
5481
5482                 case RTE_FLOW_ITEM_TYPE_MARK:
5483                         ret = flow_dv_validate_item_mark(dev, items, attr,
5484                                                          error);
5485                         if (ret < 0)
5486                                 return ret;
5487                         last_item = MLX5_FLOW_ITEM_MARK;
5488                         break;
5489                 case RTE_FLOW_ITEM_TYPE_META:
5490                         ret = flow_dv_validate_item_meta(dev, items, attr,
5491                                                          error);
5492                         if (ret < 0)
5493                                 return ret;
5494                         last_item = MLX5_FLOW_ITEM_METADATA;
5495                         break;
5496                 case RTE_FLOW_ITEM_TYPE_ICMP:
5497                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5498                                                            next_protocol,
5499                                                            error);
5500                         if (ret < 0)
5501                                 return ret;
5502                         last_item = MLX5_FLOW_LAYER_ICMP;
5503                         break;
5504                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5505                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5506                                                             next_protocol,
5507                                                             error);
5508                         if (ret < 0)
5509                                 return ret;
5510                         item_ipv6_proto = IPPROTO_ICMPV6;
5511                         last_item = MLX5_FLOW_LAYER_ICMP6;
5512                         break;
5513                 case RTE_FLOW_ITEM_TYPE_TAG:
5514                         ret = flow_dv_validate_item_tag(dev, items,
5515                                                         attr, error);
5516                         if (ret < 0)
5517                                 return ret;
5518                         last_item = MLX5_FLOW_ITEM_TAG;
5519                         break;
5520                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5521                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5522                         break;
5523                 case RTE_FLOW_ITEM_TYPE_GTP:
5524                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5525                                                         error);
5526                         if (ret < 0)
5527                                 return ret;
5528                         last_item = MLX5_FLOW_LAYER_GTP;
5529                         break;
5530                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5531                         /* Capacity will be checked in the translate stage. */
5532                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5533                                                             last_item,
5534                                                             ether_type,
5535                                                             &nic_ecpri_mask,
5536                                                             error);
5537                         if (ret < 0)
5538                                 return ret;
5539                         last_item = MLX5_FLOW_LAYER_ECPRI;
5540                         break;
5541                 default:
5542                         return rte_flow_error_set(error, ENOTSUP,
5543                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5544                                                   NULL, "item not supported");
5545                 }
5546                 item_flags |= last_item;
5547         }
5548         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5549                 int type = actions->type;
5550
5551                 if (!mlx5_flow_os_action_supported(type))
5552                         return rte_flow_error_set(error, ENOTSUP,
5553                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5554                                                   actions,
5555                                                   "action not supported");
5556                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5557                         return rte_flow_error_set(error, ENOTSUP,
5558                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5559                                                   actions, "too many actions");
5560                 switch (type) {
5561                 case RTE_FLOW_ACTION_TYPE_VOID:
5562                         break;
5563                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5564                         ret = flow_dv_validate_action_port_id(dev,
5565                                                               action_flags,
5566                                                               actions,
5567                                                               attr,
5568                                                               error);
5569                         if (ret)
5570                                 return ret;
5571                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5572                         ++actions_n;
5573                         break;
5574                 case RTE_FLOW_ACTION_TYPE_FLAG:
5575                         ret = flow_dv_validate_action_flag(dev, action_flags,
5576                                                            attr, error);
5577                         if (ret < 0)
5578                                 return ret;
5579                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5580                                 /* Count all modify-header actions as one. */
5581                                 if (!(action_flags &
5582                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5583                                         ++actions_n;
5584                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5585                                                 MLX5_FLOW_ACTION_MARK_EXT;
5586                         } else {
5587                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5588                                 ++actions_n;
5589                         }
5590                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5591                         break;
5592                 case RTE_FLOW_ACTION_TYPE_MARK:
5593                         ret = flow_dv_validate_action_mark(dev, actions,
5594                                                            action_flags,
5595                                                            attr, error);
5596                         if (ret < 0)
5597                                 return ret;
5598                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5599                                 /* Count all modify-header actions as one. */
5600                                 if (!(action_flags &
5601                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5602                                         ++actions_n;
5603                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5604                                                 MLX5_FLOW_ACTION_MARK_EXT;
5605                         } else {
5606                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5607                                 ++actions_n;
5608                         }
5609                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5610                         break;
5611                 case RTE_FLOW_ACTION_TYPE_SET_META:
5612                         ret = flow_dv_validate_action_set_meta(dev, actions,
5613                                                                action_flags,
5614                                                                attr, error);
5615                         if (ret < 0)
5616                                 return ret;
5617                         /* Count all modify-header actions as one action. */
5618                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5619                                 ++actions_n;
5620                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5621                         rw_act_num += MLX5_ACT_NUM_SET_META;
5622                         break;
5623                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5624                         ret = flow_dv_validate_action_set_tag(dev, actions,
5625                                                               action_flags,
5626                                                               attr, error);
5627                         if (ret < 0)
5628                                 return ret;
5629                         /* Count all modify-header actions as one action. */
5630                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5631                                 ++actions_n;
5632                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5633                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5634                         break;
5635                 case RTE_FLOW_ACTION_TYPE_DROP:
5636                         ret = mlx5_flow_validate_action_drop(action_flags,
5637                                                              attr, error);
5638                         if (ret < 0)
5639                                 return ret;
5640                         action_flags |= MLX5_FLOW_ACTION_DROP;
5641                         ++actions_n;
5642                         break;
5643                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5644                         ret = mlx5_flow_validate_action_queue(actions,
5645                                                               action_flags, dev,
5646                                                               attr, error);
5647                         if (ret < 0)
5648                                 return ret;
5649                         queue_index = ((const struct rte_flow_action_queue *)
5650                                                         (actions->conf))->index;
5651                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5652                         ++actions_n;
5653                         break;
5654                 case RTE_FLOW_ACTION_TYPE_RSS:
5655                         rss = actions->conf;
5656                         ret = mlx5_flow_validate_action_rss(actions,
5657                                                             action_flags, dev,
5658                                                             attr, item_flags,
5659                                                             error);
5660                         if (ret < 0)
5661                                 return ret;
5662                         if (rss != NULL && rss->queue_num)
5663                                 queue_index = rss->queue[0];
5664                         action_flags |= MLX5_FLOW_ACTION_RSS;
5665                         ++actions_n;
5666                         break;
5667                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5668                         ret =
5669                         mlx5_flow_validate_action_default_miss(action_flags,
5670                                         attr, error);
5671                         if (ret < 0)
5672                                 return ret;
5673                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5674                         ++actions_n;
5675                         break;
5676                 case RTE_FLOW_ACTION_TYPE_COUNT:
5677                         ret = flow_dv_validate_action_count(dev, error);
5678                         if (ret < 0)
5679                                 return ret;
5680                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5681                         ++actions_n;
5682                         break;
5683                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5684                         if (flow_dv_validate_action_pop_vlan(dev,
5685                                                              action_flags,
5686                                                              actions,
5687                                                              item_flags, attr,
5688                                                              error))
5689                                 return -rte_errno;
5690                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5691                         ++actions_n;
5692                         break;
5693                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5694                         ret = flow_dv_validate_action_push_vlan(dev,
5695                                                                 action_flags,
5696                                                                 vlan_m,
5697                                                                 actions, attr,
5698                                                                 error);
5699                         if (ret < 0)
5700                                 return ret;
5701                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5702                         ++actions_n;
5703                         break;
5704                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5705                         ret = flow_dv_validate_action_set_vlan_pcp
5706                                                 (action_flags, actions, error);
5707                         if (ret < 0)
5708                                 return ret;
5709                         /* Count PCP with push_vlan command. */
5710                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5711                         break;
5712                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5713                         ret = flow_dv_validate_action_set_vlan_vid
5714                                                 (item_flags, action_flags,
5715                                                  actions, error);
5716                         if (ret < 0)
5717                                 return ret;
5718                         /* Count VID with push_vlan command. */
5719                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5720                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5721                         break;
5722                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5723                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5724                         ret = flow_dv_validate_action_l2_encap(dev,
5725                                                                action_flags,
5726                                                                actions, attr,
5727                                                                error);
5728                         if (ret < 0)
5729                                 return ret;
5730                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5731                         ++actions_n;
5732                         break;
5733                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5734                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5735                         ret = flow_dv_validate_action_decap(dev, action_flags,
5736                                                             attr, error);
5737                         if (ret < 0)
5738                                 return ret;
5739                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5740                         ++actions_n;
5741                         break;
5742                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5743                         ret = flow_dv_validate_action_raw_encap_decap
5744                                 (dev, NULL, actions->conf, attr, &action_flags,
5745                                  &actions_n, error);
5746                         if (ret < 0)
5747                                 return ret;
5748                         break;
5749                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5750                         decap = actions->conf;
5751                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5752                                 ;
5753                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5754                                 encap = NULL;
5755                                 actions--;
5756                         } else {
5757                                 encap = actions->conf;
5758                         }
5759                         ret = flow_dv_validate_action_raw_encap_decap
5760                                            (dev,
5761                                             decap ? decap : &empty_decap, encap,
5762                                             attr, &action_flags, &actions_n,
5763                                             error);
5764                         if (ret < 0)
5765                                 return ret;
5766                         break;
5767                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5768                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5769                         ret = flow_dv_validate_action_modify_mac(action_flags,
5770                                                                  actions,
5771                                                                  item_flags,
5772                                                                  error);
5773                         if (ret < 0)
5774                                 return ret;
5775                         /* Count all modify-header actions as one action. */
5776                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5777                                 ++actions_n;
5778                         action_flags |= actions->type ==
5779                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5780                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5781                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5782                         /*
5783                          * Even if the source and destination MAC addresses have
5784                          * overlap in the header with 4B alignment, the convert
5785                          * function will handle them separately and 4 SW actions
5786                          * will be created. And 2 actions will be added each
5787                          * time no matter how many bytes of address will be set.
5788                          */
5789                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5790                         break;
5791                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5792                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5793                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5794                                                                   actions,
5795                                                                   item_flags,
5796                                                                   error);
5797                         if (ret < 0)
5798                                 return ret;
5799                         /* Count all modify-header actions as one action. */
5800                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5801                                 ++actions_n;
5802                         action_flags |= actions->type ==
5803                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5804                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5805                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5806                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5807                         break;
5808                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5809                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5810                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5811                                                                   actions,
5812                                                                   item_flags,
5813                                                                   error);
5814                         if (ret < 0)
5815                                 return ret;
5816                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5817                                 return rte_flow_error_set(error, ENOTSUP,
5818                                         RTE_FLOW_ERROR_TYPE_ACTION,
5819                                         actions,
5820                                         "Can't change header "
5821                                         "with ICMPv6 proto");
5822                         /* Count all modify-header actions as one action. */
5823                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5824                                 ++actions_n;
5825                         action_flags |= actions->type ==
5826                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5827                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5828                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5829                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5830                         break;
5831                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5832                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5833                         ret = flow_dv_validate_action_modify_tp(action_flags,
5834                                                                 actions,
5835                                                                 item_flags,
5836                                                                 error);
5837                         if (ret < 0)
5838                                 return ret;
5839                         /* Count all modify-header actions as one action. */
5840                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5841                                 ++actions_n;
5842                         action_flags |= actions->type ==
5843                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5844                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5845                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5846                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5847                         break;
5848                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5849                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5850                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5851                                                                  actions,
5852                                                                  item_flags,
5853                                                                  error);
5854                         if (ret < 0)
5855                                 return ret;
5856                         /* Count all modify-header actions as one action. */
5857                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5858                                 ++actions_n;
5859                         action_flags |= actions->type ==
5860                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5861                                                 MLX5_FLOW_ACTION_SET_TTL :
5862                                                 MLX5_FLOW_ACTION_DEC_TTL;
5863                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5864                         break;
5865                 case RTE_FLOW_ACTION_TYPE_JUMP:
5866                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5867                                                            action_flags,
5868                                                            attr, external,
5869                                                            error);
5870                         if (ret)
5871                                 return ret;
5872                         ++actions_n;
5873                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5874                         break;
5875                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5876                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5877                         ret = flow_dv_validate_action_modify_tcp_seq
5878                                                                 (action_flags,
5879                                                                  actions,
5880                                                                  item_flags,
5881                                                                  error);
5882                         if (ret < 0)
5883                                 return ret;
5884                         /* Count all modify-header actions as one action. */
5885                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5886                                 ++actions_n;
5887                         action_flags |= actions->type ==
5888                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5889                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5890                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5891                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5892                         break;
5893                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5894                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5895                         ret = flow_dv_validate_action_modify_tcp_ack
5896                                                                 (action_flags,
5897                                                                  actions,
5898                                                                  item_flags,
5899                                                                  error);
5900                         if (ret < 0)
5901                                 return ret;
5902                         /* Count all modify-header actions as one action. */
5903                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5904                                 ++actions_n;
5905                         action_flags |= actions->type ==
5906                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5907                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5908                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5909                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5910                         break;
5911                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5912                         break;
5913                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5914                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5915                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5916                         break;
5917                 case RTE_FLOW_ACTION_TYPE_METER:
5918                         ret = mlx5_flow_validate_action_meter(dev,
5919                                                               action_flags,
5920                                                               actions, attr,
5921                                                               error);
5922                         if (ret < 0)
5923                                 return ret;
5924                         action_flags |= MLX5_FLOW_ACTION_METER;
5925                         ++actions_n;
5926                         /* Meter action will add one more TAG action. */
5927                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5928                         break;
5929                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
5930                         if (!attr->group)
5931                                 return rte_flow_error_set(error, ENOTSUP,
5932                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5933                                                                            NULL,
5934                           "Shared ASO age action is not supported for group 0");
5935                         action_flags |= MLX5_FLOW_ACTION_AGE;
5936                         ++actions_n;
5937                         break;
5938                 case RTE_FLOW_ACTION_TYPE_AGE:
5939                         ret = flow_dv_validate_action_age(action_flags,
5940                                                           actions, dev,
5941                                                           error);
5942                         if (ret < 0)
5943                                 return ret;
5944                         action_flags |= MLX5_FLOW_ACTION_AGE;
5945                         ++actions_n;
5946                         break;
5947                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5948                         ret = flow_dv_validate_action_modify_ipv4_dscp
5949                                                          (action_flags,
5950                                                           actions,
5951                                                           item_flags,
5952                                                           error);
5953                         if (ret < 0)
5954                                 return ret;
5955                         /* Count all modify-header actions as one action. */
5956                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5957                                 ++actions_n;
5958                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5959                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5960                         break;
5961                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5962                         ret = flow_dv_validate_action_modify_ipv6_dscp
5963                                                                 (action_flags,
5964                                                                  actions,
5965                                                                  item_flags,
5966                                                                  error);
5967                         if (ret < 0)
5968                                 return ret;
5969                         /* Count all modify-header actions as one action. */
5970                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5971                                 ++actions_n;
5972                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5973                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5974                         break;
5975                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
5976                         ret = flow_dv_validate_action_sample(action_flags,
5977                                                              actions, dev,
5978                                                              attr, error);
5979                         if (ret < 0)
5980                                 return ret;
5981                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
5982                         ++actions_n;
5983                         break;
5984                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
5985                         if (actions[0].type != (typeof(actions[0].type))
5986                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
5987                                 return rte_flow_error_set
5988                                                 (error, EINVAL,
5989                                                 RTE_FLOW_ERROR_TYPE_ACTION,
5990                                                 NULL, "MLX5 private action "
5991                                                 "must be the first");
5992
5993                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5994                         break;
5995                 default:
5996                         return rte_flow_error_set(error, ENOTSUP,
5997                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5998                                                   actions,
5999                                                   "action not supported");
6000                 }
6001         }
6002         /*
6003          * Validate actions in flow rules
6004          * - Explicit decap action is prohibited by the tunnel offload API.
6005          * - Drop action in tunnel steer rule is prohibited by the API.
6006          * - Application cannot use MARK action because it's value can mask
6007          *   tunnel default miss nitification.
6008          * - JUMP in tunnel match rule has no support in current PMD
6009          *   implementation.
6010          * - TAG & META are reserved for future uses.
6011          */
6012         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6013                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6014                                             MLX5_FLOW_ACTION_MARK     |
6015                                             MLX5_FLOW_ACTION_SET_TAG  |
6016                                             MLX5_FLOW_ACTION_SET_META |
6017                                             MLX5_FLOW_ACTION_DROP;
6018
6019                 if (action_flags & bad_actions_mask)
6020                         return rte_flow_error_set
6021                                         (error, EINVAL,
6022                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6023                                         "Invalid RTE action in tunnel "
6024                                         "set decap rule");
6025                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6026                         return rte_flow_error_set
6027                                         (error, EINVAL,
6028                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6029                                         "tunnel set decap rule must terminate "
6030                                         "with JUMP");
6031                 if (!attr->ingress)
6032                         return rte_flow_error_set
6033                                         (error, EINVAL,
6034                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6035                                         "tunnel flows for ingress traffic only");
6036         }
6037         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6038                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6039                                             MLX5_FLOW_ACTION_MARK    |
6040                                             MLX5_FLOW_ACTION_SET_TAG |
6041                                             MLX5_FLOW_ACTION_SET_META;
6042
6043                 if (action_flags & bad_actions_mask)
6044                         return rte_flow_error_set
6045                                         (error, EINVAL,
6046                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6047                                         "Invalid RTE action in tunnel "
6048                                         "set match rule");
6049         }
6050         /*
6051          * Validate the drop action mutual exclusion with other actions.
6052          * Drop action is mutually-exclusive with any other action, except for
6053          * Count action.
6054          */
6055         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6056             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6057                 return rte_flow_error_set(error, EINVAL,
6058                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6059                                           "Drop action is mutually-exclusive "
6060                                           "with any other action, except for "
6061                                           "Count action");
6062         /* Eswitch has few restrictions on using items and actions */
6063         if (attr->transfer) {
6064                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6065                     action_flags & MLX5_FLOW_ACTION_FLAG)
6066                         return rte_flow_error_set(error, ENOTSUP,
6067                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6068                                                   NULL,
6069                                                   "unsupported action FLAG");
6070                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6071                     action_flags & MLX5_FLOW_ACTION_MARK)
6072                         return rte_flow_error_set(error, ENOTSUP,
6073                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6074                                                   NULL,
6075                                                   "unsupported action MARK");
6076                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6077                         return rte_flow_error_set(error, ENOTSUP,
6078                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6079                                                   NULL,
6080                                                   "unsupported action QUEUE");
6081                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6082                         return rte_flow_error_set(error, ENOTSUP,
6083                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6084                                                   NULL,
6085                                                   "unsupported action RSS");
6086                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6087                         return rte_flow_error_set(error, EINVAL,
6088                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6089                                                   actions,
6090                                                   "no fate action is found");
6091         } else {
6092                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6093                         return rte_flow_error_set(error, EINVAL,
6094                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6095                                                   actions,
6096                                                   "no fate action is found");
6097         }
6098         /*
6099          * Continue validation for Xcap and VLAN actions.
6100          * If hairpin is working in explicit TX rule mode, there is no actions
6101          * splitting and the validation of hairpin ingress flow should be the
6102          * same as other standard flows.
6103          */
6104         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6105                              MLX5_FLOW_VLAN_ACTIONS)) &&
6106             (queue_index == 0xFFFF ||
6107              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6108              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6109              conf->tx_explicit != 0))) {
6110                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6111                     MLX5_FLOW_XCAP_ACTIONS)
6112                         return rte_flow_error_set(error, ENOTSUP,
6113                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6114                                                   NULL, "encap and decap "
6115                                                   "combination aren't supported");
6116                 if (!attr->transfer && attr->ingress) {
6117                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6118                                 return rte_flow_error_set
6119                                                 (error, ENOTSUP,
6120                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6121                                                  NULL, "encap is not supported"
6122                                                  " for ingress traffic");
6123                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6124                                 return rte_flow_error_set
6125                                                 (error, ENOTSUP,
6126                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6127                                                  NULL, "push VLAN action not "
6128                                                  "supported for ingress");
6129                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6130                                         MLX5_FLOW_VLAN_ACTIONS)
6131                                 return rte_flow_error_set
6132                                                 (error, ENOTSUP,
6133                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6134                                                  NULL, "no support for "
6135                                                  "multiple VLAN actions");
6136                 }
6137         }
6138         /*
6139          * Hairpin flow will add one more TAG action in TX implicit mode.
6140          * In TX explicit mode, there will be no hairpin flow ID.
6141          */
6142         if (hairpin > 0)
6143                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6144         /* extra metadata enabled: one more TAG action will be add. */
6145         if (dev_conf->dv_flow_en &&
6146             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6147             mlx5_flow_ext_mreg_supported(dev))
6148                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6149         if ((uint32_t)rw_act_num >
6150                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6151                 return rte_flow_error_set(error, ENOTSUP,
6152                                           RTE_FLOW_ERROR_TYPE_ACTION,
6153                                           NULL, "too many header modify"
6154                                           " actions to support");
6155         }
6156         return 0;
6157 }
6158
6159 /**
6160  * Internal preparation function. Allocates the DV flow size,
6161  * this size is constant.
6162  *
6163  * @param[in] dev
6164  *   Pointer to the rte_eth_dev structure.
6165  * @param[in] attr
6166  *   Pointer to the flow attributes.
6167  * @param[in] items
6168  *   Pointer to the list of items.
6169  * @param[in] actions
6170  *   Pointer to the list of actions.
6171  * @param[out] error
6172  *   Pointer to the error structure.
6173  *
6174  * @return
6175  *   Pointer to mlx5_flow object on success,
6176  *   otherwise NULL and rte_errno is set.
6177  */
6178 static struct mlx5_flow *
6179 flow_dv_prepare(struct rte_eth_dev *dev,
6180                 const struct rte_flow_attr *attr __rte_unused,
6181                 const struct rte_flow_item items[] __rte_unused,
6182                 const struct rte_flow_action actions[] __rte_unused,
6183                 struct rte_flow_error *error)
6184 {
6185         uint32_t handle_idx = 0;
6186         struct mlx5_flow *dev_flow;
6187         struct mlx5_flow_handle *dev_handle;
6188         struct mlx5_priv *priv = dev->data->dev_private;
6189         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6190
6191         MLX5_ASSERT(wks);
6192         /* In case of corrupting the memory. */
6193         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6194                 rte_flow_error_set(error, ENOSPC,
6195                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6196                                    "not free temporary device flow");
6197                 return NULL;
6198         }
6199         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6200                                    &handle_idx);
6201         if (!dev_handle) {
6202                 rte_flow_error_set(error, ENOMEM,
6203                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6204                                    "not enough memory to create flow handle");
6205                 return NULL;
6206         }
6207         MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6208         dev_flow = &wks->flows[wks->flow_idx++];
6209         dev_flow->handle = dev_handle;
6210         dev_flow->handle_idx = handle_idx;
6211         /*
6212          * In some old rdma-core releases, before continuing, a check of the
6213          * length of matching parameter will be done at first. It needs to use
6214          * the length without misc4 param. If the flow has misc4 support, then
6215          * the length needs to be adjusted accordingly. Each param member is
6216          * aligned with a 64B boundary naturally.
6217          */
6218         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6219                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6220         /*
6221          * The matching value needs to be cleared to 0 before using. In the
6222          * past, it will be automatically cleared when using rte_*alloc
6223          * API. The time consumption will be almost the same as before.
6224          */
6225         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6226         dev_flow->ingress = attr->ingress;
6227         dev_flow->dv.transfer = attr->transfer;
6228         return dev_flow;
6229 }
6230
6231 #ifdef RTE_LIBRTE_MLX5_DEBUG
6232 /**
6233  * Sanity check for match mask and value. Similar to check_valid_spec() in
6234  * kernel driver. If unmasked bit is present in value, it returns failure.
6235  *
6236  * @param match_mask
6237  *   pointer to match mask buffer.
6238  * @param match_value
6239  *   pointer to match value buffer.
6240  *
6241  * @return
6242  *   0 if valid, -EINVAL otherwise.
6243  */
6244 static int
6245 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6246 {
6247         uint8_t *m = match_mask;
6248         uint8_t *v = match_value;
6249         unsigned int i;
6250
6251         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6252                 if (v[i] & ~m[i]) {
6253                         DRV_LOG(ERR,
6254                                 "match_value differs from match_criteria"
6255                                 " %p[%u] != %p[%u]",
6256                                 match_value, i, match_mask, i);
6257                         return -EINVAL;
6258                 }
6259         }
6260         return 0;
6261 }
6262 #endif
6263
6264 /**
6265  * Add match of ip_version.
6266  *
6267  * @param[in] group
6268  *   Flow group.
6269  * @param[in] headers_v
6270  *   Values header pointer.
6271  * @param[in] headers_m
6272  *   Masks header pointer.
6273  * @param[in] ip_version
6274  *   The IP version to set.
6275  */
6276 static inline void
6277 flow_dv_set_match_ip_version(uint32_t group,
6278                              void *headers_v,
6279                              void *headers_m,
6280                              uint8_t ip_version)
6281 {
6282         if (group == 0)
6283                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6284         else
6285                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6286                          ip_version);
6287         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6288         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6289         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6290 }
6291
6292 /**
6293  * Add Ethernet item to matcher and to the value.
6294  *
6295  * @param[in, out] matcher
6296  *   Flow matcher.
6297  * @param[in, out] key
6298  *   Flow matcher value.
6299  * @param[in] item
6300  *   Flow pattern to translate.
6301  * @param[in] inner
6302  *   Item is inner pattern.
6303  */
6304 static void
6305 flow_dv_translate_item_eth(void *matcher, void *key,
6306                            const struct rte_flow_item *item, int inner,
6307                            uint32_t group)
6308 {
6309         const struct rte_flow_item_eth *eth_m = item->mask;
6310         const struct rte_flow_item_eth *eth_v = item->spec;
6311         const struct rte_flow_item_eth nic_mask = {
6312                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6313                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6314                 .type = RTE_BE16(0xffff),
6315                 .has_vlan = 0,
6316         };
6317         void *hdrs_m;
6318         void *hdrs_v;
6319         char *l24_v;
6320         unsigned int i;
6321
6322         if (!eth_v)
6323                 return;
6324         if (!eth_m)
6325                 eth_m = &nic_mask;
6326         if (inner) {
6327                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6328                                          inner_headers);
6329                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6330         } else {
6331                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6332                                          outer_headers);
6333                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6334         }
6335         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6336                &eth_m->dst, sizeof(eth_m->dst));
6337         /* The value must be in the range of the mask. */
6338         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6339         for (i = 0; i < sizeof(eth_m->dst); ++i)
6340                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6341         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6342                &eth_m->src, sizeof(eth_m->src));
6343         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6344         /* The value must be in the range of the mask. */
6345         for (i = 0; i < sizeof(eth_m->dst); ++i)
6346                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6347         /*
6348          * HW supports match on one Ethertype, the Ethertype following the last
6349          * VLAN tag of the packet (see PRM).
6350          * Set match on ethertype only if ETH header is not followed by VLAN.
6351          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6352          * ethertype, and use ip_version field instead.
6353          * eCPRI over Ether layer will use type value 0xAEFE.
6354          */
6355         if (eth_m->type == 0xFFFF) {
6356                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6357                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6358                 switch (eth_v->type) {
6359                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6360                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6361                         return;
6362                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6363                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6364                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6365                         return;
6366                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6367                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6368                         return;
6369                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6370                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6371                         return;
6372                 default:
6373                         break;
6374                 }
6375         }
6376         if (eth_m->has_vlan) {
6377                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6378                 if (eth_v->has_vlan) {
6379                         /*
6380                          * Here, when also has_more_vlan field in VLAN item is
6381                          * not set, only single-tagged packets will be matched.
6382                          */
6383                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6384                         return;
6385                 }
6386         }
6387         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6388                  rte_be_to_cpu_16(eth_m->type));
6389         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6390         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6391 }
6392
6393 /**
6394  * Add VLAN item to matcher and to the value.
6395  *
6396  * @param[in, out] dev_flow
6397  *   Flow descriptor.
6398  * @param[in, out] matcher
6399  *   Flow matcher.
6400  * @param[in, out] key
6401  *   Flow matcher value.
6402  * @param[in] item
6403  *   Flow pattern to translate.
6404  * @param[in] inner
6405  *   Item is inner pattern.
6406  */
6407 static void
6408 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6409                             void *matcher, void *key,
6410                             const struct rte_flow_item *item,
6411                             int inner, uint32_t group)
6412 {
6413         const struct rte_flow_item_vlan *vlan_m = item->mask;
6414         const struct rte_flow_item_vlan *vlan_v = item->spec;
6415         void *hdrs_m;
6416         void *hdrs_v;
6417         uint16_t tci_m;
6418         uint16_t tci_v;
6419
6420         if (inner) {
6421                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6422                                          inner_headers);
6423                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6424         } else {
6425                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6426                                          outer_headers);
6427                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6428                 /*
6429                  * This is workaround, masks are not supported,
6430                  * and pre-validated.
6431                  */
6432                 if (vlan_v)
6433                         dev_flow->handle->vf_vlan.tag =
6434                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6435         }
6436         /*
6437          * When VLAN item exists in flow, mark packet as tagged,
6438          * even if TCI is not specified.
6439          */
6440         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6441                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6442                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6443         }
6444         if (!vlan_v)
6445                 return;
6446         if (!vlan_m)
6447                 vlan_m = &rte_flow_item_vlan_mask;
6448         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6449         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6450         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6451         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6452         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6453         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6454         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6455         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6456         /*
6457          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6458          * ethertype, and use ip_version field instead.
6459          */
6460         if (vlan_m->inner_type == 0xFFFF) {
6461                 switch (vlan_v->inner_type) {
6462                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6463                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6464                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6465                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6466                         return;
6467                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6468                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6469                         return;
6470                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6471                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6472                         return;
6473                 default:
6474                         break;
6475                 }
6476         }
6477         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6478                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6479                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6480                 /* Only one vlan_tag bit can be set. */
6481                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6482                 return;
6483         }
6484         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6485                  rte_be_to_cpu_16(vlan_m->inner_type));
6486         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6487                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6488 }
6489
6490 /**
6491  * Add IPV4 item to matcher and to the value.
6492  *
6493  * @param[in, out] matcher
6494  *   Flow matcher.
6495  * @param[in, out] key
6496  *   Flow matcher value.
6497  * @param[in] item
6498  *   Flow pattern to translate.
6499  * @param[in] inner
6500  *   Item is inner pattern.
6501  * @param[in] group
6502  *   The group to insert the rule.
6503  */
6504 static void
6505 flow_dv_translate_item_ipv4(void *matcher, void *key,
6506                             const struct rte_flow_item *item,
6507                             int inner, uint32_t group)
6508 {
6509         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6510         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6511         const struct rte_flow_item_ipv4 nic_mask = {
6512                 .hdr = {
6513                         .src_addr = RTE_BE32(0xffffffff),
6514                         .dst_addr = RTE_BE32(0xffffffff),
6515                         .type_of_service = 0xff,
6516                         .next_proto_id = 0xff,
6517                         .time_to_live = 0xff,
6518                 },
6519         };
6520         void *headers_m;
6521         void *headers_v;
6522         char *l24_m;
6523         char *l24_v;
6524         uint8_t tos;
6525
6526         if (inner) {
6527                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6528                                          inner_headers);
6529                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6530         } else {
6531                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6532                                          outer_headers);
6533                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6534         }
6535         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6536         if (!ipv4_v)
6537                 return;
6538         if (!ipv4_m)
6539                 ipv4_m = &nic_mask;
6540         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6541                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6542         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6543                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6544         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6545         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6546         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6547                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6548         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6549                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6550         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6551         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6552         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6553         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6554                  ipv4_m->hdr.type_of_service);
6555         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6556         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6557                  ipv4_m->hdr.type_of_service >> 2);
6558         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6559         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6560                  ipv4_m->hdr.next_proto_id);
6561         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6562                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6563         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6564                  ipv4_m->hdr.time_to_live);
6565         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6566                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6567         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6568                  !!(ipv4_m->hdr.fragment_offset));
6569         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6570                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6571 }
6572
6573 /**
6574  * Add IPV6 item to matcher and to the value.
6575  *
6576  * @param[in, out] matcher
6577  *   Flow matcher.
6578  * @param[in, out] key
6579  *   Flow matcher value.
6580  * @param[in] item
6581  *   Flow pattern to translate.
6582  * @param[in] inner
6583  *   Item is inner pattern.
6584  * @param[in] group
6585  *   The group to insert the rule.
6586  */
6587 static void
6588 flow_dv_translate_item_ipv6(void *matcher, void *key,
6589                             const struct rte_flow_item *item,
6590                             int inner, uint32_t group)
6591 {
6592         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6593         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6594         const struct rte_flow_item_ipv6 nic_mask = {
6595                 .hdr = {
6596                         .src_addr =
6597                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6598                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6599                         .dst_addr =
6600                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6601                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6602                         .vtc_flow = RTE_BE32(0xffffffff),
6603                         .proto = 0xff,
6604                         .hop_limits = 0xff,
6605                 },
6606         };
6607         void *headers_m;
6608         void *headers_v;
6609         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6610         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6611         char *l24_m;
6612         char *l24_v;
6613         uint32_t vtc_m;
6614         uint32_t vtc_v;
6615         int i;
6616         int size;
6617
6618         if (inner) {
6619                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6620                                          inner_headers);
6621                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6622         } else {
6623                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6624                                          outer_headers);
6625                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6626         }
6627         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6628         if (!ipv6_v)
6629                 return;
6630         if (!ipv6_m)
6631                 ipv6_m = &nic_mask;
6632         size = sizeof(ipv6_m->hdr.dst_addr);
6633         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6634                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6635         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6636                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6637         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6638         for (i = 0; i < size; ++i)
6639                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6640         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6641                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6642         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6643                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6644         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6645         for (i = 0; i < size; ++i)
6646                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6647         /* TOS. */
6648         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6649         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6650         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6651         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6652         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6653         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6654         /* Label. */
6655         if (inner) {
6656                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6657                          vtc_m);
6658                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6659                          vtc_v);
6660         } else {
6661                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6662                          vtc_m);
6663                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6664                          vtc_v);
6665         }
6666         /* Protocol. */
6667         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6668                  ipv6_m->hdr.proto);
6669         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6670                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6671         /* Hop limit. */
6672         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6673                  ipv6_m->hdr.hop_limits);
6674         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6675                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6676         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6677                  !!(ipv6_m->has_frag_ext));
6678         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6679                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6680 }
6681
6682 /**
6683  * Add IPV6 fragment extension item to matcher and to the value.
6684  *
6685  * @param[in, out] matcher
6686  *   Flow matcher.
6687  * @param[in, out] key
6688  *   Flow matcher value.
6689  * @param[in] item
6690  *   Flow pattern to translate.
6691  * @param[in] inner
6692  *   Item is inner pattern.
6693  */
6694 static void
6695 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6696                                      const struct rte_flow_item *item,
6697                                      int inner)
6698 {
6699         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6700         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6701         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6702                 .hdr = {
6703                         .next_header = 0xff,
6704                         .frag_data = RTE_BE16(0xffff),
6705                 },
6706         };
6707         void *headers_m;
6708         void *headers_v;
6709
6710         if (inner) {
6711                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6712                                          inner_headers);
6713                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6714         } else {
6715                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6716                                          outer_headers);
6717                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6718         }
6719         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6720         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6721         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6722         if (!ipv6_frag_ext_v)
6723                 return;
6724         if (!ipv6_frag_ext_m)
6725                 ipv6_frag_ext_m = &nic_mask;
6726         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6727                  ipv6_frag_ext_m->hdr.next_header);
6728         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6729                  ipv6_frag_ext_v->hdr.next_header &
6730                  ipv6_frag_ext_m->hdr.next_header);
6731 }
6732
6733 /**
6734  * Add TCP item to matcher and to the value.
6735  *
6736  * @param[in, out] matcher
6737  *   Flow matcher.
6738  * @param[in, out] key
6739  *   Flow matcher value.
6740  * @param[in] item
6741  *   Flow pattern to translate.
6742  * @param[in] inner
6743  *   Item is inner pattern.
6744  */
6745 static void
6746 flow_dv_translate_item_tcp(void *matcher, void *key,
6747                            const struct rte_flow_item *item,
6748                            int inner)
6749 {
6750         const struct rte_flow_item_tcp *tcp_m = item->mask;
6751         const struct rte_flow_item_tcp *tcp_v = item->spec;
6752         void *headers_m;
6753         void *headers_v;
6754
6755         if (inner) {
6756                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6757                                          inner_headers);
6758                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6759         } else {
6760                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6761                                          outer_headers);
6762                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6763         }
6764         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6765         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6766         if (!tcp_v)
6767                 return;
6768         if (!tcp_m)
6769                 tcp_m = &rte_flow_item_tcp_mask;
6770         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6771                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6772         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6773                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6774         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6775                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6776         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6777                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6778         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6779                  tcp_m->hdr.tcp_flags);
6780         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6781                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6782 }
6783
6784 /**
6785  * Add UDP item to matcher and to the value.
6786  *
6787  * @param[in, out] matcher
6788  *   Flow matcher.
6789  * @param[in, out] key
6790  *   Flow matcher value.
6791  * @param[in] item
6792  *   Flow pattern to translate.
6793  * @param[in] inner
6794  *   Item is inner pattern.
6795  */
6796 static void
6797 flow_dv_translate_item_udp(void *matcher, void *key,
6798                            const struct rte_flow_item *item,
6799                            int inner)
6800 {
6801         const struct rte_flow_item_udp *udp_m = item->mask;
6802         const struct rte_flow_item_udp *udp_v = item->spec;
6803         void *headers_m;
6804         void *headers_v;
6805
6806         if (inner) {
6807                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6808                                          inner_headers);
6809                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6810         } else {
6811                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6812                                          outer_headers);
6813                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6814         }
6815         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6816         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6817         if (!udp_v)
6818                 return;
6819         if (!udp_m)
6820                 udp_m = &rte_flow_item_udp_mask;
6821         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6822                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6823         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6824                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6825         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6826                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6827         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6828                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6829 }
6830
6831 /**
6832  * Add GRE optional Key item to matcher and to the value.
6833  *
6834  * @param[in, out] matcher
6835  *   Flow matcher.
6836  * @param[in, out] key
6837  *   Flow matcher value.
6838  * @param[in] item
6839  *   Flow pattern to translate.
6840  * @param[in] inner
6841  *   Item is inner pattern.
6842  */
6843 static void
6844 flow_dv_translate_item_gre_key(void *matcher, void *key,
6845                                    const struct rte_flow_item *item)
6846 {
6847         const rte_be32_t *key_m = item->mask;
6848         const rte_be32_t *key_v = item->spec;
6849         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6850         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6851         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6852
6853         /* GRE K bit must be on and should already be validated */
6854         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6855         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6856         if (!key_v)
6857                 return;
6858         if (!key_m)
6859                 key_m = &gre_key_default_mask;
6860         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6861                  rte_be_to_cpu_32(*key_m) >> 8);
6862         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6863                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6864         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6865                  rte_be_to_cpu_32(*key_m) & 0xFF);
6866         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6867                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6868 }
6869
6870 /**
6871  * Add GRE item to matcher and to the value.
6872  *
6873  * @param[in, out] matcher
6874  *   Flow matcher.
6875  * @param[in, out] key
6876  *   Flow matcher value.
6877  * @param[in] item
6878  *   Flow pattern to translate.
6879  * @param[in] inner
6880  *   Item is inner pattern.
6881  */
6882 static void
6883 flow_dv_translate_item_gre(void *matcher, void *key,
6884                            const struct rte_flow_item *item,
6885                            int inner)
6886 {
6887         const struct rte_flow_item_gre *gre_m = item->mask;
6888         const struct rte_flow_item_gre *gre_v = item->spec;
6889         void *headers_m;
6890         void *headers_v;
6891         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6892         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6893         struct {
6894                 union {
6895                         __extension__
6896                         struct {
6897                                 uint16_t version:3;
6898                                 uint16_t rsvd0:9;
6899                                 uint16_t s_present:1;
6900                                 uint16_t k_present:1;
6901                                 uint16_t rsvd_bit1:1;
6902                                 uint16_t c_present:1;
6903                         };
6904                         uint16_t value;
6905                 };
6906         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6907
6908         if (inner) {
6909                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6910                                          inner_headers);
6911                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6912         } else {
6913                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6914                                          outer_headers);
6915                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6916         }
6917         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6918         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6919         if (!gre_v)
6920                 return;
6921         if (!gre_m)
6922                 gre_m = &rte_flow_item_gre_mask;
6923         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6924                  rte_be_to_cpu_16(gre_m->protocol));
6925         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6926                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6927         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6928         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6929         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6930                  gre_crks_rsvd0_ver_m.c_present);
6931         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6932                  gre_crks_rsvd0_ver_v.c_present &
6933                  gre_crks_rsvd0_ver_m.c_present);
6934         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6935                  gre_crks_rsvd0_ver_m.k_present);
6936         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6937                  gre_crks_rsvd0_ver_v.k_present &
6938                  gre_crks_rsvd0_ver_m.k_present);
6939         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6940                  gre_crks_rsvd0_ver_m.s_present);
6941         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6942                  gre_crks_rsvd0_ver_v.s_present &
6943                  gre_crks_rsvd0_ver_m.s_present);
6944 }
6945
6946 /**
6947  * Add NVGRE item to matcher and to the value.
6948  *
6949  * @param[in, out] matcher
6950  *   Flow matcher.
6951  * @param[in, out] key
6952  *   Flow matcher value.
6953  * @param[in] item
6954  *   Flow pattern to translate.
6955  * @param[in] inner
6956  *   Item is inner pattern.
6957  */
6958 static void
6959 flow_dv_translate_item_nvgre(void *matcher, void *key,
6960                              const struct rte_flow_item *item,
6961                              int inner)
6962 {
6963         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6964         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6965         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6966         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6967         const char *tni_flow_id_m;
6968         const char *tni_flow_id_v;
6969         char *gre_key_m;
6970         char *gre_key_v;
6971         int size;
6972         int i;
6973
6974         /* For NVGRE, GRE header fields must be set with defined values. */
6975         const struct rte_flow_item_gre gre_spec = {
6976                 .c_rsvd0_ver = RTE_BE16(0x2000),
6977                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
6978         };
6979         const struct rte_flow_item_gre gre_mask = {
6980                 .c_rsvd0_ver = RTE_BE16(0xB000),
6981                 .protocol = RTE_BE16(UINT16_MAX),
6982         };
6983         const struct rte_flow_item gre_item = {
6984                 .spec = &gre_spec,
6985                 .mask = &gre_mask,
6986                 .last = NULL,
6987         };
6988         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
6989         if (!nvgre_v)
6990                 return;
6991         if (!nvgre_m)
6992                 nvgre_m = &rte_flow_item_nvgre_mask;
6993         tni_flow_id_m = (const char *)nvgre_m->tni;
6994         tni_flow_id_v = (const char *)nvgre_v->tni;
6995         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
6996         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
6997         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
6998         memcpy(gre_key_m, tni_flow_id_m, size);
6999         for (i = 0; i < size; ++i)
7000                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7001 }
7002
7003 /**
7004  * Add VXLAN item to matcher and to the value.
7005  *
7006  * @param[in, out] matcher
7007  *   Flow matcher.
7008  * @param[in, out] key
7009  *   Flow matcher value.
7010  * @param[in] item
7011  *   Flow pattern to translate.
7012  * @param[in] inner
7013  *   Item is inner pattern.
7014  */
7015 static void
7016 flow_dv_translate_item_vxlan(void *matcher, void *key,
7017                              const struct rte_flow_item *item,
7018                              int inner)
7019 {
7020         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7021         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7022         void *headers_m;
7023         void *headers_v;
7024         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7025         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7026         char *vni_m;
7027         char *vni_v;
7028         uint16_t dport;
7029         int size;
7030         int i;
7031
7032         if (inner) {
7033                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7034                                          inner_headers);
7035                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7036         } else {
7037                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7038                                          outer_headers);
7039                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7040         }
7041         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7042                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7043         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7044                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7045                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7046         }
7047         if (!vxlan_v)
7048                 return;
7049         if (!vxlan_m)
7050                 vxlan_m = &rte_flow_item_vxlan_mask;
7051         size = sizeof(vxlan_m->vni);
7052         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7053         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7054         memcpy(vni_m, vxlan_m->vni, size);
7055         for (i = 0; i < size; ++i)
7056                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7057 }
7058
7059 /**
7060  * Add VXLAN-GPE item to matcher and to the value.
7061  *
7062  * @param[in, out] matcher
7063  *   Flow matcher.
7064  * @param[in, out] key
7065  *   Flow matcher value.
7066  * @param[in] item
7067  *   Flow pattern to translate.
7068  * @param[in] inner
7069  *   Item is inner pattern.
7070  */
7071
7072 static void
7073 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7074                                  const struct rte_flow_item *item, int inner)
7075 {
7076         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7077         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7078         void *headers_m;
7079         void *headers_v;
7080         void *misc_m =
7081                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7082         void *misc_v =
7083                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7084         char *vni_m;
7085         char *vni_v;
7086         uint16_t dport;
7087         int size;
7088         int i;
7089         uint8_t flags_m = 0xff;
7090         uint8_t flags_v = 0xc;
7091
7092         if (inner) {
7093                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7094                                          inner_headers);
7095                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7096         } else {
7097                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7098                                          outer_headers);
7099                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7100         }
7101         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7102                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7103         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7104                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7105                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7106         }
7107         if (!vxlan_v)
7108                 return;
7109         if (!vxlan_m)
7110                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7111         size = sizeof(vxlan_m->vni);
7112         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7113         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7114         memcpy(vni_m, vxlan_m->vni, size);
7115         for (i = 0; i < size; ++i)
7116                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7117         if (vxlan_m->flags) {
7118                 flags_m = vxlan_m->flags;
7119                 flags_v = vxlan_v->flags;
7120         }
7121         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7122         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7123         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7124                  vxlan_m->protocol);
7125         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7126                  vxlan_v->protocol);
7127 }
7128
7129 /**
7130  * Add Geneve item to matcher and to the value.
7131  *
7132  * @param[in, out] matcher
7133  *   Flow matcher.
7134  * @param[in, out] key
7135  *   Flow matcher value.
7136  * @param[in] item
7137  *   Flow pattern to translate.
7138  * @param[in] inner
7139  *   Item is inner pattern.
7140  */
7141
7142 static void
7143 flow_dv_translate_item_geneve(void *matcher, void *key,
7144                               const struct rte_flow_item *item, int inner)
7145 {
7146         const struct rte_flow_item_geneve *geneve_m = item->mask;
7147         const struct rte_flow_item_geneve *geneve_v = item->spec;
7148         void *headers_m;
7149         void *headers_v;
7150         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7151         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7152         uint16_t dport;
7153         uint16_t gbhdr_m;
7154         uint16_t gbhdr_v;
7155         char *vni_m;
7156         char *vni_v;
7157         size_t size, i;
7158
7159         if (inner) {
7160                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7161                                          inner_headers);
7162                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7163         } else {
7164                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7165                                          outer_headers);
7166                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7167         }
7168         dport = MLX5_UDP_PORT_GENEVE;
7169         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7170                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7171                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7172         }
7173         if (!geneve_v)
7174                 return;
7175         if (!geneve_m)
7176                 geneve_m = &rte_flow_item_geneve_mask;
7177         size = sizeof(geneve_m->vni);
7178         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7179         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7180         memcpy(vni_m, geneve_m->vni, size);
7181         for (i = 0; i < size; ++i)
7182                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7183         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7184                  rte_be_to_cpu_16(geneve_m->protocol));
7185         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7186                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7187         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7188         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7189         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7190                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7191         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7192                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7193         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7194                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7195         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7196                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7197                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7198 }
7199
7200 /**
7201  * Add MPLS item to matcher and to the value.
7202  *
7203  * @param[in, out] matcher
7204  *   Flow matcher.
7205  * @param[in, out] key
7206  *   Flow matcher value.
7207  * @param[in] item
7208  *   Flow pattern to translate.
7209  * @param[in] prev_layer
7210  *   The protocol layer indicated in previous item.
7211  * @param[in] inner
7212  *   Item is inner pattern.
7213  */
7214 static void
7215 flow_dv_translate_item_mpls(void *matcher, void *key,
7216                             const struct rte_flow_item *item,
7217                             uint64_t prev_layer,
7218                             int inner)
7219 {
7220         const uint32_t *in_mpls_m = item->mask;
7221         const uint32_t *in_mpls_v = item->spec;
7222         uint32_t *out_mpls_m = 0;
7223         uint32_t *out_mpls_v = 0;
7224         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7225         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7226         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7227                                      misc_parameters_2);
7228         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7229         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7230         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7231
7232         switch (prev_layer) {
7233         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7234                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7235                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7236                          MLX5_UDP_PORT_MPLS);
7237                 break;
7238         case MLX5_FLOW_LAYER_GRE:
7239                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7240                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7241                          RTE_ETHER_TYPE_MPLS);
7242                 break;
7243         default:
7244                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7245                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7246                          IPPROTO_MPLS);
7247                 break;
7248         }
7249         if (!in_mpls_v)
7250                 return;
7251         if (!in_mpls_m)
7252                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7253         switch (prev_layer) {
7254         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7255                 out_mpls_m =
7256                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7257                                                  outer_first_mpls_over_udp);
7258                 out_mpls_v =
7259                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7260                                                  outer_first_mpls_over_udp);
7261                 break;
7262         case MLX5_FLOW_LAYER_GRE:
7263                 out_mpls_m =
7264                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7265                                                  outer_first_mpls_over_gre);
7266                 out_mpls_v =
7267                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7268                                                  outer_first_mpls_over_gre);
7269                 break;
7270         default:
7271                 /* Inner MPLS not over GRE is not supported. */
7272                 if (!inner) {
7273                         out_mpls_m =
7274                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7275                                                          misc2_m,
7276                                                          outer_first_mpls);
7277                         out_mpls_v =
7278                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7279                                                          misc2_v,
7280                                                          outer_first_mpls);
7281                 }
7282                 break;
7283         }
7284         if (out_mpls_m && out_mpls_v) {
7285                 *out_mpls_m = *in_mpls_m;
7286                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7287         }
7288 }
7289
7290 /**
7291  * Add metadata register item to matcher
7292  *
7293  * @param[in, out] matcher
7294  *   Flow matcher.
7295  * @param[in, out] key
7296  *   Flow matcher value.
7297  * @param[in] reg_type
7298  *   Type of device metadata register
7299  * @param[in] value
7300  *   Register value
7301  * @param[in] mask
7302  *   Register mask
7303  */
7304 static void
7305 flow_dv_match_meta_reg(void *matcher, void *key,
7306                        enum modify_reg reg_type,
7307                        uint32_t data, uint32_t mask)
7308 {
7309         void *misc2_m =
7310                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7311         void *misc2_v =
7312                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7313         uint32_t temp;
7314
7315         data &= mask;
7316         switch (reg_type) {
7317         case REG_A:
7318                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7319                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7320                 break;
7321         case REG_B:
7322                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7323                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7324                 break;
7325         case REG_C_0:
7326                 /*
7327                  * The metadata register C0 field might be divided into
7328                  * source vport index and META item value, we should set
7329                  * this field according to specified mask, not as whole one.
7330                  */
7331                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7332                 temp |= mask;
7333                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7334                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7335                 temp &= ~mask;
7336                 temp |= data;
7337                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7338                 break;
7339         case REG_C_1:
7340                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7341                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7342                 break;
7343         case REG_C_2:
7344                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7345                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7346                 break;
7347         case REG_C_3:
7348                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7349                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7350                 break;
7351         case REG_C_4:
7352                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7353                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7354                 break;
7355         case REG_C_5:
7356                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7357                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7358                 break;
7359         case REG_C_6:
7360                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7361                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7362                 break;
7363         case REG_C_7:
7364                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7365                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7366                 break;
7367         default:
7368                 MLX5_ASSERT(false);
7369                 break;
7370         }
7371 }
7372
7373 /**
7374  * Add MARK item to matcher
7375  *
7376  * @param[in] dev
7377  *   The device to configure through.
7378  * @param[in, out] matcher
7379  *   Flow matcher.
7380  * @param[in, out] key
7381  *   Flow matcher value.
7382  * @param[in] item
7383  *   Flow pattern to translate.
7384  */
7385 static void
7386 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7387                             void *matcher, void *key,
7388                             const struct rte_flow_item *item)
7389 {
7390         struct mlx5_priv *priv = dev->data->dev_private;
7391         const struct rte_flow_item_mark *mark;
7392         uint32_t value;
7393         uint32_t mask;
7394
7395         mark = item->mask ? (const void *)item->mask :
7396                             &rte_flow_item_mark_mask;
7397         mask = mark->id & priv->sh->dv_mark_mask;
7398         mark = (const void *)item->spec;
7399         MLX5_ASSERT(mark);
7400         value = mark->id & priv->sh->dv_mark_mask & mask;
7401         if (mask) {
7402                 enum modify_reg reg;
7403
7404                 /* Get the metadata register index for the mark. */
7405                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7406                 MLX5_ASSERT(reg > 0);
7407                 if (reg == REG_C_0) {
7408                         struct mlx5_priv *priv = dev->data->dev_private;
7409                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7410                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7411
7412                         mask &= msk_c0;
7413                         mask <<= shl_c0;
7414                         value <<= shl_c0;
7415                 }
7416                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7417         }
7418 }
7419
7420 /**
7421  * Add META item to matcher
7422  *
7423  * @param[in] dev
7424  *   The devich to configure through.
7425  * @param[in, out] matcher
7426  *   Flow matcher.
7427  * @param[in, out] key
7428  *   Flow matcher value.
7429  * @param[in] attr
7430  *   Attributes of flow that includes this item.
7431  * @param[in] item
7432  *   Flow pattern to translate.
7433  */
7434 static void
7435 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7436                             void *matcher, void *key,
7437                             const struct rte_flow_attr *attr,
7438                             const struct rte_flow_item *item)
7439 {
7440         const struct rte_flow_item_meta *meta_m;
7441         const struct rte_flow_item_meta *meta_v;
7442
7443         meta_m = (const void *)item->mask;
7444         if (!meta_m)
7445                 meta_m = &rte_flow_item_meta_mask;
7446         meta_v = (const void *)item->spec;
7447         if (meta_v) {
7448                 int reg;
7449                 uint32_t value = meta_v->data;
7450                 uint32_t mask = meta_m->data;
7451
7452                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7453                 if (reg < 0)
7454                         return;
7455                 /*
7456                  * In datapath code there is no endianness
7457                  * coversions for perfromance reasons, all
7458                  * pattern conversions are done in rte_flow.
7459                  */
7460                 value = rte_cpu_to_be_32(value);
7461                 mask = rte_cpu_to_be_32(mask);
7462                 if (reg == REG_C_0) {
7463                         struct mlx5_priv *priv = dev->data->dev_private;
7464                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7465                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7466 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7467                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7468
7469                         value >>= shr_c0;
7470                         mask >>= shr_c0;
7471 #endif
7472                         value <<= shl_c0;
7473                         mask <<= shl_c0;
7474                         MLX5_ASSERT(msk_c0);
7475                         MLX5_ASSERT(!(~msk_c0 & mask));
7476                 }
7477                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7478         }
7479 }
7480
7481 /**
7482  * Add vport metadata Reg C0 item to matcher
7483  *
7484  * @param[in, out] matcher
7485  *   Flow matcher.
7486  * @param[in, out] key
7487  *   Flow matcher value.
7488  * @param[in] reg
7489  *   Flow pattern to translate.
7490  */
7491 static void
7492 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7493                                   uint32_t value, uint32_t mask)
7494 {
7495         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7496 }
7497
7498 /**
7499  * Add tag item to matcher
7500  *
7501  * @param[in] dev
7502  *   The devich to configure through.
7503  * @param[in, out] matcher
7504  *   Flow matcher.
7505  * @param[in, out] key
7506  *   Flow matcher value.
7507  * @param[in] item
7508  *   Flow pattern to translate.
7509  */
7510 static void
7511 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7512                                 void *matcher, void *key,
7513                                 const struct rte_flow_item *item)
7514 {
7515         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7516         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7517         uint32_t mask, value;
7518
7519         MLX5_ASSERT(tag_v);
7520         value = tag_v->data;
7521         mask = tag_m ? tag_m->data : UINT32_MAX;
7522         if (tag_v->id == REG_C_0) {
7523                 struct mlx5_priv *priv = dev->data->dev_private;
7524                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7525                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7526
7527                 mask &= msk_c0;
7528                 mask <<= shl_c0;
7529                 value <<= shl_c0;
7530         }
7531         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7532 }
7533
7534 /**
7535  * Add TAG item to matcher
7536  *
7537  * @param[in] dev
7538  *   The devich to configure through.
7539  * @param[in, out] matcher
7540  *   Flow matcher.
7541  * @param[in, out] key
7542  *   Flow matcher value.
7543  * @param[in] item
7544  *   Flow pattern to translate.
7545  */
7546 static void
7547 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7548                            void *matcher, void *key,
7549                            const struct rte_flow_item *item)
7550 {
7551         const struct rte_flow_item_tag *tag_v = item->spec;
7552         const struct rte_flow_item_tag *tag_m = item->mask;
7553         enum modify_reg reg;
7554
7555         MLX5_ASSERT(tag_v);
7556         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7557         /* Get the metadata register index for the tag. */
7558         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7559         MLX5_ASSERT(reg > 0);
7560         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7561 }
7562
7563 /**
7564  * Add source vport match to the specified matcher.
7565  *
7566  * @param[in, out] matcher
7567  *   Flow matcher.
7568  * @param[in, out] key
7569  *   Flow matcher value.
7570  * @param[in] port
7571  *   Source vport value to match
7572  * @param[in] mask
7573  *   Mask
7574  */
7575 static void
7576 flow_dv_translate_item_source_vport(void *matcher, void *key,
7577                                     int16_t port, uint16_t mask)
7578 {
7579         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7580         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7581
7582         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7583         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7584 }
7585
7586 /**
7587  * Translate port-id item to eswitch match on  port-id.
7588  *
7589  * @param[in] dev
7590  *   The devich to configure through.
7591  * @param[in, out] matcher
7592  *   Flow matcher.
7593  * @param[in, out] key
7594  *   Flow matcher value.
7595  * @param[in] item
7596  *   Flow pattern to translate.
7597  *
7598  * @return
7599  *   0 on success, a negative errno value otherwise.
7600  */
7601 static int
7602 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7603                                void *key, const struct rte_flow_item *item)
7604 {
7605         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7606         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7607         struct mlx5_priv *priv;
7608         uint16_t mask, id;
7609
7610         mask = pid_m ? pid_m->id : 0xffff;
7611         id = pid_v ? pid_v->id : dev->data->port_id;
7612         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7613         if (!priv)
7614                 return -rte_errno;
7615         /* Translate to vport field or to metadata, depending on mode. */
7616         if (priv->vport_meta_mask)
7617                 flow_dv_translate_item_meta_vport(matcher, key,
7618                                                   priv->vport_meta_tag,
7619                                                   priv->vport_meta_mask);
7620         else
7621                 flow_dv_translate_item_source_vport(matcher, key,
7622                                                     priv->vport_id, mask);
7623         return 0;
7624 }
7625
7626 /**
7627  * Add ICMP6 item to matcher and to the value.
7628  *
7629  * @param[in, out] matcher
7630  *   Flow matcher.
7631  * @param[in, out] key
7632  *   Flow matcher value.
7633  * @param[in] item
7634  *   Flow pattern to translate.
7635  * @param[in] inner
7636  *   Item is inner pattern.
7637  */
7638 static void
7639 flow_dv_translate_item_icmp6(void *matcher, void *key,
7640                               const struct rte_flow_item *item,
7641                               int inner)
7642 {
7643         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7644         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7645         void *headers_m;
7646         void *headers_v;
7647         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7648                                      misc_parameters_3);
7649         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7650         if (inner) {
7651                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7652                                          inner_headers);
7653                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7654         } else {
7655                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7656                                          outer_headers);
7657                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7658         }
7659         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7660         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7661         if (!icmp6_v)
7662                 return;
7663         if (!icmp6_m)
7664                 icmp6_m = &rte_flow_item_icmp6_mask;
7665         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7666         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7667                  icmp6_v->type & icmp6_m->type);
7668         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7669         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7670                  icmp6_v->code & icmp6_m->code);
7671 }
7672
7673 /**
7674  * Add ICMP item to matcher and to the value.
7675  *
7676  * @param[in, out] matcher
7677  *   Flow matcher.
7678  * @param[in, out] key
7679  *   Flow matcher value.
7680  * @param[in] item
7681  *   Flow pattern to translate.
7682  * @param[in] inner
7683  *   Item is inner pattern.
7684  */
7685 static void
7686 flow_dv_translate_item_icmp(void *matcher, void *key,
7687                             const struct rte_flow_item *item,
7688                             int inner)
7689 {
7690         const struct rte_flow_item_icmp *icmp_m = item->mask;
7691         const struct rte_flow_item_icmp *icmp_v = item->spec;
7692         uint32_t icmp_header_data_m = 0;
7693         uint32_t icmp_header_data_v = 0;
7694         void *headers_m;
7695         void *headers_v;
7696         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7697                                      misc_parameters_3);
7698         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7699         if (inner) {
7700                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7701                                          inner_headers);
7702                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7703         } else {
7704                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7705                                          outer_headers);
7706                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7707         }
7708         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7709         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7710         if (!icmp_v)
7711                 return;
7712         if (!icmp_m)
7713                 icmp_m = &rte_flow_item_icmp_mask;
7714         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7715                  icmp_m->hdr.icmp_type);
7716         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7717                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7718         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7719                  icmp_m->hdr.icmp_code);
7720         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7721                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7722         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7723         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7724         if (icmp_header_data_m) {
7725                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7726                 icmp_header_data_v |=
7727                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7728                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7729                          icmp_header_data_m);
7730                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7731                          icmp_header_data_v & icmp_header_data_m);
7732         }
7733 }
7734
7735 /**
7736  * Add GTP item to matcher and to the value.
7737  *
7738  * @param[in, out] matcher
7739  *   Flow matcher.
7740  * @param[in, out] key
7741  *   Flow matcher value.
7742  * @param[in] item
7743  *   Flow pattern to translate.
7744  * @param[in] inner
7745  *   Item is inner pattern.
7746  */
7747 static void
7748 flow_dv_translate_item_gtp(void *matcher, void *key,
7749                            const struct rte_flow_item *item, int inner)
7750 {
7751         const struct rte_flow_item_gtp *gtp_m = item->mask;
7752         const struct rte_flow_item_gtp *gtp_v = item->spec;
7753         void *headers_m;
7754         void *headers_v;
7755         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7756                                      misc_parameters_3);
7757         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7758         uint16_t dport = RTE_GTPU_UDP_PORT;
7759
7760         if (inner) {
7761                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7762                                          inner_headers);
7763                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7764         } else {
7765                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7766                                          outer_headers);
7767                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7768         }
7769         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7770                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7771                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7772         }
7773         if (!gtp_v)
7774                 return;
7775         if (!gtp_m)
7776                 gtp_m = &rte_flow_item_gtp_mask;
7777         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7778                  gtp_m->v_pt_rsv_flags);
7779         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7780                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7781         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7782         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7783                  gtp_v->msg_type & gtp_m->msg_type);
7784         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7785                  rte_be_to_cpu_32(gtp_m->teid));
7786         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7787                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7788 }
7789
7790 /**
7791  * Add eCPRI item to matcher and to the value.
7792  *
7793  * @param[in] dev
7794  *   The devich to configure through.
7795  * @param[in, out] matcher
7796  *   Flow matcher.
7797  * @param[in, out] key
7798  *   Flow matcher value.
7799  * @param[in] item
7800  *   Flow pattern to translate.
7801  * @param[in] samples
7802  *   Sample IDs to be used in the matching.
7803  */
7804 static void
7805 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7806                              void *key, const struct rte_flow_item *item)
7807 {
7808         struct mlx5_priv *priv = dev->data->dev_private;
7809         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7810         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7811         struct rte_ecpri_common_hdr common;
7812         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7813                                      misc_parameters_4);
7814         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7815         uint32_t *samples;
7816         void *dw_m;
7817         void *dw_v;
7818
7819         if (!ecpri_v)
7820                 return;
7821         if (!ecpri_m)
7822                 ecpri_m = &rte_flow_item_ecpri_mask;
7823         /*
7824          * Maximal four DW samples are supported in a single matching now.
7825          * Two are used now for a eCPRI matching:
7826          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7827          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7828          *    if any.
7829          */
7830         if (!ecpri_m->hdr.common.u32)
7831                 return;
7832         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7833         /* Need to take the whole DW as the mask to fill the entry. */
7834         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7835                             prog_sample_field_value_0);
7836         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7837                             prog_sample_field_value_0);
7838         /* Already big endian (network order) in the header. */
7839         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7840         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7841         /* Sample#0, used for matching type, offset 0. */
7842         MLX5_SET(fte_match_set_misc4, misc4_m,
7843                  prog_sample_field_id_0, samples[0]);
7844         /* It makes no sense to set the sample ID in the mask field. */
7845         MLX5_SET(fte_match_set_misc4, misc4_v,
7846                  prog_sample_field_id_0, samples[0]);
7847         /*
7848          * Checking if message body part needs to be matched.
7849          * Some wildcard rules only matching type field should be supported.
7850          */
7851         if (ecpri_m->hdr.dummy[0]) {
7852                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
7853                 switch (common.type) {
7854                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7855                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7856                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7857                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7858                                             prog_sample_field_value_1);
7859                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7860                                             prog_sample_field_value_1);
7861                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7862                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7863                         /* Sample#1, to match message body, offset 4. */
7864                         MLX5_SET(fte_match_set_misc4, misc4_m,
7865                                  prog_sample_field_id_1, samples[1]);
7866                         MLX5_SET(fte_match_set_misc4, misc4_v,
7867                                  prog_sample_field_id_1, samples[1]);
7868                         break;
7869                 default:
7870                         /* Others, do not match any sample ID. */
7871                         break;
7872                 }
7873         }
7874 }
7875
7876 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7877
7878 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7879         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7880                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7881
7882 /**
7883  * Calculate flow matcher enable bitmap.
7884  *
7885  * @param match_criteria
7886  *   Pointer to flow matcher criteria.
7887  *
7888  * @return
7889  *   Bitmap of enabled fields.
7890  */
7891 static uint8_t
7892 flow_dv_matcher_enable(uint32_t *match_criteria)
7893 {
7894         uint8_t match_criteria_enable;
7895
7896         match_criteria_enable =
7897                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7898                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7899         match_criteria_enable |=
7900                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7901                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7902         match_criteria_enable |=
7903                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7904                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7905         match_criteria_enable |=
7906                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7907                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7908         match_criteria_enable |=
7909                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7910                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7911         match_criteria_enable |=
7912                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7913                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7914         return match_criteria_enable;
7915 }
7916
7917 struct mlx5_hlist_entry *
7918 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7919 {
7920         struct mlx5_dev_ctx_shared *sh = list->ctx;
7921         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7922         struct rte_eth_dev *dev = ctx->dev;
7923         struct mlx5_flow_tbl_data_entry *tbl_data;
7924         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7925         struct rte_flow_error *error = ctx->error;
7926         union mlx5_flow_tbl_key key = { .v64 = key64 };
7927         struct mlx5_flow_tbl_resource *tbl;
7928         void *domain;
7929         uint32_t idx = 0;
7930         int ret;
7931
7932         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7933         if (!tbl_data) {
7934                 rte_flow_error_set(error, ENOMEM,
7935                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7936                                    NULL,
7937                                    "cannot allocate flow table data entry");
7938                 return NULL;
7939         }
7940         tbl_data->idx = idx;
7941         tbl_data->tunnel = tt_prm->tunnel;
7942         tbl_data->group_id = tt_prm->group_id;
7943         tbl_data->external = tt_prm->external;
7944         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7945         tbl_data->is_egress = !!key.direction;
7946         tbl = &tbl_data->tbl;
7947         if (key.dummy)
7948                 return &tbl_data->entry;
7949         if (key.domain)
7950                 domain = sh->fdb_domain;
7951         else if (key.direction)
7952                 domain = sh->tx_domain;
7953         else
7954                 domain = sh->rx_domain;
7955         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
7956         if (ret) {
7957                 rte_flow_error_set(error, ENOMEM,
7958                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7959                                    NULL, "cannot create flow table object");
7960                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7961                 return NULL;
7962         }
7963         if (key.table_id) {
7964                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
7965                                         (tbl->obj, &tbl_data->jump.action);
7966                 if (ret) {
7967                         rte_flow_error_set(error, ENOMEM,
7968                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7969                                            NULL,
7970                                            "cannot create flow jump action");
7971                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7972                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7973                         return NULL;
7974                 }
7975         }
7976         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
7977               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
7978               key.table_id);
7979         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
7980                              flow_dv_matcher_create_cb,
7981                              flow_dv_matcher_match_cb,
7982                              flow_dv_matcher_remove_cb);
7983         return &tbl_data->entry;
7984 }
7985
7986 /**
7987  * Get a flow table.
7988  *
7989  * @param[in, out] dev
7990  *   Pointer to rte_eth_dev structure.
7991  * @param[in] table_id
7992  *   Table id to use.
7993  * @param[in] egress
7994  *   Direction of the table.
7995  * @param[in] transfer
7996  *   E-Switch or NIC flow.
7997  * @param[in] dummy
7998  *   Dummy entry for dv API.
7999  * @param[out] error
8000  *   pointer to error structure.
8001  *
8002  * @return
8003  *   Returns tables resource based on the index, NULL in case of failed.
8004  */
8005 struct mlx5_flow_tbl_resource *
8006 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8007                          uint32_t table_id, uint8_t egress,
8008                          uint8_t transfer,
8009                          bool external,
8010                          const struct mlx5_flow_tunnel *tunnel,
8011                          uint32_t group_id, uint8_t dummy,
8012                          struct rte_flow_error *error)
8013 {
8014         struct mlx5_priv *priv = dev->data->dev_private;
8015         union mlx5_flow_tbl_key table_key = {
8016                 {
8017                         .table_id = table_id,
8018                         .dummy = dummy,
8019                         .domain = !!transfer,
8020                         .direction = !!egress,
8021                 }
8022         };
8023         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8024                 .tunnel = tunnel,
8025                 .group_id = group_id,
8026                 .external = external,
8027         };
8028         struct mlx5_flow_cb_ctx ctx = {
8029                 .dev = dev,
8030                 .error = error,
8031                 .data = &tt_prm,
8032         };
8033         struct mlx5_hlist_entry *entry;
8034         struct mlx5_flow_tbl_data_entry *tbl_data;
8035
8036         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8037         if (!entry) {
8038                 rte_flow_error_set(error, ENOMEM,
8039                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8040                                    "cannot get table");
8041                 return NULL;
8042         }
8043         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8044         return &tbl_data->tbl;
8045 }
8046
8047 void
8048 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8049                       struct mlx5_hlist_entry *entry)
8050 {
8051         struct mlx5_dev_ctx_shared *sh = list->ctx;
8052         struct mlx5_flow_tbl_data_entry *tbl_data =
8053                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8054
8055         MLX5_ASSERT(entry && sh);
8056         if (tbl_data->jump.action)
8057                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8058         if (tbl_data->tbl.obj)
8059                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8060         if (tbl_data->tunnel_offload && tbl_data->external) {
8061                 struct mlx5_hlist_entry *he;
8062                 struct mlx5_hlist *tunnel_grp_hash;
8063                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8064                 union tunnel_tbl_key tunnel_key = {
8065                         .tunnel_id = tbl_data->tunnel ?
8066                                         tbl_data->tunnel->tunnel_id : 0,
8067                         .group = tbl_data->group_id
8068                 };
8069                 union mlx5_flow_tbl_key table_key = {
8070                         .v64 = entry->key
8071                 };
8072                 uint32_t table_id = table_key.table_id;
8073
8074                 tunnel_grp_hash = tbl_data->tunnel ?
8075                                         tbl_data->tunnel->groups :
8076                                         thub->groups;
8077                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8078                 if (he)
8079                         mlx5_hlist_unregister(tunnel_grp_hash, he);
8080                 DRV_LOG(DEBUG,
8081                         "Table_id %#x tunnel %u group %u released.",
8082                         table_id,
8083                         tbl_data->tunnel ?
8084                         tbl_data->tunnel->tunnel_id : 0,
8085                         tbl_data->group_id);
8086         }
8087         mlx5_cache_list_destroy(&tbl_data->matchers);
8088         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8089 }
8090
8091 /**
8092  * Release a flow table.
8093  *
8094  * @param[in] sh
8095  *   Pointer to device shared structure.
8096  * @param[in] tbl
8097  *   Table resource to be released.
8098  *
8099  * @return
8100  *   Returns 0 if table was released, else return 1;
8101  */
8102 static int
8103 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8104                              struct mlx5_flow_tbl_resource *tbl)
8105 {
8106         struct mlx5_flow_tbl_data_entry *tbl_data =
8107                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8108
8109         if (!tbl)
8110                 return 0;
8111         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8112 }
8113
8114 int
8115 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8116                          struct mlx5_cache_entry *entry, void *cb_ctx)
8117 {
8118         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8119         struct mlx5_flow_dv_matcher *ref = ctx->data;
8120         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8121                                                         entry);
8122
8123         return cur->crc != ref->crc ||
8124                cur->priority != ref->priority ||
8125                memcmp((const void *)cur->mask.buf,
8126                       (const void *)ref->mask.buf, ref->mask.size);
8127 }
8128
8129 struct mlx5_cache_entry *
8130 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8131                           struct mlx5_cache_entry *entry __rte_unused,
8132                           void *cb_ctx)
8133 {
8134         struct mlx5_dev_ctx_shared *sh = list->ctx;
8135         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8136         struct mlx5_flow_dv_matcher *ref = ctx->data;
8137         struct mlx5_flow_dv_matcher *cache;
8138         struct mlx5dv_flow_matcher_attr dv_attr = {
8139                 .type = IBV_FLOW_ATTR_NORMAL,
8140                 .match_mask = (void *)&ref->mask,
8141         };
8142         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8143                                                             typeof(*tbl), tbl);
8144         int ret;
8145
8146         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8147         if (!cache) {
8148                 rte_flow_error_set(ctx->error, ENOMEM,
8149                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8150                                    "cannot create matcher");
8151                 return NULL;
8152         }
8153         *cache = *ref;
8154         dv_attr.match_criteria_enable =
8155                 flow_dv_matcher_enable(cache->mask.buf);
8156         dv_attr.priority = ref->priority;
8157         if (tbl->is_egress)
8158                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8159         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8160                                                &cache->matcher_object);
8161         if (ret) {
8162                 mlx5_free(cache);
8163                 rte_flow_error_set(ctx->error, ENOMEM,
8164                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8165                                    "cannot create matcher");
8166                 return NULL;
8167         }
8168         return &cache->entry;
8169 }
8170
8171 /**
8172  * Register the flow matcher.
8173  *
8174  * @param[in, out] dev
8175  *   Pointer to rte_eth_dev structure.
8176  * @param[in, out] matcher
8177  *   Pointer to flow matcher.
8178  * @param[in, out] key
8179  *   Pointer to flow table key.
8180  * @parm[in, out] dev_flow
8181  *   Pointer to the dev_flow.
8182  * @param[out] error
8183  *   pointer to error structure.
8184  *
8185  * @return
8186  *   0 on success otherwise -errno and errno is set.
8187  */
8188 static int
8189 flow_dv_matcher_register(struct rte_eth_dev *dev,
8190                          struct mlx5_flow_dv_matcher *ref,
8191                          union mlx5_flow_tbl_key *key,
8192                          struct mlx5_flow *dev_flow,
8193                          struct rte_flow_error *error)
8194 {
8195         struct mlx5_cache_entry *entry;
8196         struct mlx5_flow_dv_matcher *cache;
8197         struct mlx5_flow_tbl_resource *tbl;
8198         struct mlx5_flow_tbl_data_entry *tbl_data;
8199         struct mlx5_flow_cb_ctx ctx = {
8200                 .error = error,
8201                 .data = ref,
8202         };
8203
8204         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
8205                                        key->domain, false, NULL, 0, 0, error);
8206         if (!tbl)
8207                 return -rte_errno;      /* No need to refill the error info */
8208         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8209         ref->tbl = tbl;
8210         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8211         if (!entry) {
8212                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8213                 return rte_flow_error_set(error, ENOMEM,
8214                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8215                                           "cannot allocate ref memory");
8216         }
8217         cache = container_of(entry, typeof(*cache), entry);
8218         dev_flow->handle->dvh.matcher = cache;
8219         return 0;
8220 }
8221
8222 struct mlx5_hlist_entry *
8223 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8224 {
8225         struct mlx5_dev_ctx_shared *sh = list->ctx;
8226         struct rte_flow_error *error = ctx;
8227         struct mlx5_flow_dv_tag_resource *entry;
8228         uint32_t idx = 0;
8229         int ret;
8230
8231         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8232         if (!entry) {
8233                 rte_flow_error_set(error, ENOMEM,
8234                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8235                                    "cannot allocate resource memory");
8236                 return NULL;
8237         }
8238         entry->idx = idx;
8239         ret = mlx5_flow_os_create_flow_action_tag(key,
8240                                                   &entry->action);
8241         if (ret) {
8242                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8243                 rte_flow_error_set(error, ENOMEM,
8244                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8245                                    NULL, "cannot create action");
8246                 return NULL;
8247         }
8248         return &entry->entry;
8249 }
8250
8251 /**
8252  * Find existing tag resource or create and register a new one.
8253  *
8254  * @param dev[in, out]
8255  *   Pointer to rte_eth_dev structure.
8256  * @param[in, out] tag_be24
8257  *   Tag value in big endian then R-shift 8.
8258  * @parm[in, out] dev_flow
8259  *   Pointer to the dev_flow.
8260  * @param[out] error
8261  *   pointer to error structure.
8262  *
8263  * @return
8264  *   0 on success otherwise -errno and errno is set.
8265  */
8266 static int
8267 flow_dv_tag_resource_register
8268                         (struct rte_eth_dev *dev,
8269                          uint32_t tag_be24,
8270                          struct mlx5_flow *dev_flow,
8271                          struct rte_flow_error *error)
8272 {
8273         struct mlx5_priv *priv = dev->data->dev_private;
8274         struct mlx5_flow_dv_tag_resource *cache_resource;
8275         struct mlx5_hlist_entry *entry;
8276
8277         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8278         if (entry) {
8279                 cache_resource = container_of
8280                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8281                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8282                 dev_flow->dv.tag_resource = cache_resource;
8283                 return 0;
8284         }
8285         return -rte_errno;
8286 }
8287
8288 void
8289 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8290                       struct mlx5_hlist_entry *entry)
8291 {
8292         struct mlx5_dev_ctx_shared *sh = list->ctx;
8293         struct mlx5_flow_dv_tag_resource *tag =
8294                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8295
8296         MLX5_ASSERT(tag && sh && tag->action);
8297         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8298         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8299         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8300 }
8301
8302 /**
8303  * Release the tag.
8304  *
8305  * @param dev
8306  *   Pointer to Ethernet device.
8307  * @param tag_idx
8308  *   Tag index.
8309  *
8310  * @return
8311  *   1 while a reference on it exists, 0 when freed.
8312  */
8313 static int
8314 flow_dv_tag_release(struct rte_eth_dev *dev,
8315                     uint32_t tag_idx)
8316 {
8317         struct mlx5_priv *priv = dev->data->dev_private;
8318         struct mlx5_flow_dv_tag_resource *tag;
8319
8320         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8321         if (!tag)
8322                 return 0;
8323         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8324                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8325         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8326 }
8327
8328 /**
8329  * Translate port ID action to vport.
8330  *
8331  * @param[in] dev
8332  *   Pointer to rte_eth_dev structure.
8333  * @param[in] action
8334  *   Pointer to the port ID action.
8335  * @param[out] dst_port_id
8336  *   The target port ID.
8337  * @param[out] error
8338  *   Pointer to the error structure.
8339  *
8340  * @return
8341  *   0 on success, a negative errno value otherwise and rte_errno is set.
8342  */
8343 static int
8344 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8345                                  const struct rte_flow_action *action,
8346                                  uint32_t *dst_port_id,
8347                                  struct rte_flow_error *error)
8348 {
8349         uint32_t port;
8350         struct mlx5_priv *priv;
8351         const struct rte_flow_action_port_id *conf =
8352                         (const struct rte_flow_action_port_id *)action->conf;
8353
8354         port = conf->original ? dev->data->port_id : conf->id;
8355         priv = mlx5_port_to_eswitch_info(port, false);
8356         if (!priv)
8357                 return rte_flow_error_set(error, -rte_errno,
8358                                           RTE_FLOW_ERROR_TYPE_ACTION,
8359                                           NULL,
8360                                           "No eswitch info was found for port");
8361 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8362         /*
8363          * This parameter is transferred to
8364          * mlx5dv_dr_action_create_dest_ib_port().
8365          */
8366         *dst_port_id = priv->dev_port;
8367 #else
8368         /*
8369          * Legacy mode, no LAG configurations is supported.
8370          * This parameter is transferred to
8371          * mlx5dv_dr_action_create_dest_vport().
8372          */
8373         *dst_port_id = priv->vport_id;
8374 #endif
8375         return 0;
8376 }
8377
8378 /**
8379  * Create a counter with aging configuration.
8380  *
8381  * @param[in] dev
8382  *   Pointer to rte_eth_dev structure.
8383  * @param[out] count
8384  *   Pointer to the counter action configuration.
8385  * @param[in] age
8386  *   Pointer to the aging action configuration.
8387  *
8388  * @return
8389  *   Index to flow counter on success, 0 otherwise.
8390  */
8391 static uint32_t
8392 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8393                                 struct mlx5_flow *dev_flow,
8394                                 const struct rte_flow_action_count *count,
8395                                 const struct rte_flow_action_age *age)
8396 {
8397         uint32_t counter;
8398         struct mlx5_age_param *age_param;
8399
8400         if (count && count->shared)
8401                 counter = flow_dv_counter_get_shared(dev, count->id);
8402         else
8403                 counter = flow_dv_counter_alloc(dev, !!age);
8404         if (!counter || age == NULL)
8405                 return counter;
8406         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8407         age_param->context = age->context ? age->context :
8408                 (void *)(uintptr_t)(dev_flow->flow_idx);
8409         age_param->timeout = age->timeout;
8410         age_param->port_id = dev->data->port_id;
8411         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8412         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8413         return counter;
8414 }
8415
8416 /**
8417  * Add Tx queue matcher
8418  *
8419  * @param[in] dev
8420  *   Pointer to the dev struct.
8421  * @param[in, out] matcher
8422  *   Flow matcher.
8423  * @param[in, out] key
8424  *   Flow matcher value.
8425  * @param[in] item
8426  *   Flow pattern to translate.
8427  * @param[in] inner
8428  *   Item is inner pattern.
8429  */
8430 static void
8431 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8432                                 void *matcher, void *key,
8433                                 const struct rte_flow_item *item)
8434 {
8435         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8436         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8437         void *misc_m =
8438                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8439         void *misc_v =
8440                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8441         struct mlx5_txq_ctrl *txq;
8442         uint32_t queue;
8443
8444
8445         queue_m = (const void *)item->mask;
8446         if (!queue_m)
8447                 return;
8448         queue_v = (const void *)item->spec;
8449         if (!queue_v)
8450                 return;
8451         txq = mlx5_txq_get(dev, queue_v->queue);
8452         if (!txq)
8453                 return;
8454         queue = txq->obj->sq->id;
8455         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8456         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8457                  queue & queue_m->queue);
8458         mlx5_txq_release(dev, queue_v->queue);
8459 }
8460
8461 /**
8462  * Set the hash fields according to the @p flow information.
8463  *
8464  * @param[in] dev_flow
8465  *   Pointer to the mlx5_flow.
8466  * @param[in] rss_desc
8467  *   Pointer to the mlx5_flow_rss_desc.
8468  */
8469 static void
8470 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8471                        struct mlx5_flow_rss_desc *rss_desc)
8472 {
8473         uint64_t items = dev_flow->handle->layers;
8474         int rss_inner = 0;
8475         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8476
8477         dev_flow->hash_fields = 0;
8478 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8479         if (rss_desc->level >= 2) {
8480                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8481                 rss_inner = 1;
8482         }
8483 #endif
8484         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8485             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8486                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8487                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8488                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8489                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8490                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8491                         else
8492                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8493                 }
8494         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8495                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8496                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8497                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8498                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8499                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8500                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8501                         else
8502                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8503                 }
8504         }
8505         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8506             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8507                 if (rss_types & ETH_RSS_UDP) {
8508                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8509                                 dev_flow->hash_fields |=
8510                                                 IBV_RX_HASH_SRC_PORT_UDP;
8511                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8512                                 dev_flow->hash_fields |=
8513                                                 IBV_RX_HASH_DST_PORT_UDP;
8514                         else
8515                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8516                 }
8517         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8518                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8519                 if (rss_types & ETH_RSS_TCP) {
8520                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8521                                 dev_flow->hash_fields |=
8522                                                 IBV_RX_HASH_SRC_PORT_TCP;
8523                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8524                                 dev_flow->hash_fields |=
8525                                                 IBV_RX_HASH_DST_PORT_TCP;
8526                         else
8527                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8528                 }
8529         }
8530 }
8531
8532 /**
8533  * Prepare an Rx Hash queue.
8534  *
8535  * @param dev
8536  *   Pointer to Ethernet device.
8537  * @param[in] dev_flow
8538  *   Pointer to the mlx5_flow.
8539  * @param[in] rss_desc
8540  *   Pointer to the mlx5_flow_rss_desc.
8541  * @param[out] hrxq_idx
8542  *   Hash Rx queue index.
8543  *
8544  * @return
8545  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8546  */
8547 static struct mlx5_hrxq *
8548 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
8549                      struct mlx5_flow *dev_flow,
8550                      struct mlx5_flow_rss_desc *rss_desc,
8551                      uint32_t *hrxq_idx)
8552 {
8553         struct mlx5_priv *priv = dev->data->dev_private;
8554         struct mlx5_flow_handle *dh = dev_flow->handle;
8555         struct mlx5_hrxq *hrxq;
8556
8557         MLX5_ASSERT(rss_desc->queue_num);
8558         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
8559         rss_desc->hash_fields = dev_flow->hash_fields;
8560         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
8561         rss_desc->standalone = false;
8562         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
8563         if (!*hrxq_idx)
8564                 return NULL;
8565         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8566                               *hrxq_idx);
8567         return hrxq;
8568 }
8569
8570 /**
8571  * Release sample sub action resource.
8572  *
8573  * @param[in, out] dev
8574  *   Pointer to rte_eth_dev structure.
8575  * @param[in] act_res
8576  *   Pointer to sample sub action resource.
8577  */
8578 static void
8579 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
8580                                    struct mlx5_flow_sub_actions_idx *act_res)
8581 {
8582         if (act_res->rix_hrxq) {
8583                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
8584                 act_res->rix_hrxq = 0;
8585         }
8586         if (act_res->rix_encap_decap) {
8587                 flow_dv_encap_decap_resource_release(dev,
8588                                                      act_res->rix_encap_decap);
8589                 act_res->rix_encap_decap = 0;
8590         }
8591         if (act_res->rix_port_id_action) {
8592                 flow_dv_port_id_action_resource_release(dev,
8593                                                 act_res->rix_port_id_action);
8594                 act_res->rix_port_id_action = 0;
8595         }
8596         if (act_res->rix_tag) {
8597                 flow_dv_tag_release(dev, act_res->rix_tag);
8598                 act_res->rix_tag = 0;
8599         }
8600         if (act_res->cnt) {
8601                 flow_dv_counter_free(dev, act_res->cnt);
8602                 act_res->cnt = 0;
8603         }
8604 }
8605
8606 int
8607 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
8608                         struct mlx5_cache_entry *entry, void *cb_ctx)
8609 {
8610         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8611         struct rte_eth_dev *dev = ctx->dev;
8612         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8613         struct mlx5_flow_dv_sample_resource *cache_resource =
8614                         container_of(entry, typeof(*cache_resource), entry);
8615
8616         if (resource->ratio == cache_resource->ratio &&
8617             resource->ft_type == cache_resource->ft_type &&
8618             resource->ft_id == cache_resource->ft_id &&
8619             resource->set_action == cache_resource->set_action &&
8620             !memcmp((void *)&resource->sample_act,
8621                     (void *)&cache_resource->sample_act,
8622                     sizeof(struct mlx5_flow_sub_actions_list))) {
8623                 /*
8624                  * Existing sample action should release the prepared
8625                  * sub-actions reference counter.
8626                  */
8627                 flow_dv_sample_sub_actions_release(dev,
8628                                                 &resource->sample_idx);
8629                 return 0;
8630         }
8631         return 1;
8632 }
8633
8634 struct mlx5_cache_entry *
8635 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
8636                          struct mlx5_cache_entry *entry __rte_unused,
8637                          void *cb_ctx)
8638 {
8639         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8640         struct rte_eth_dev *dev = ctx->dev;
8641         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8642         void **sample_dv_actions = resource->sub_actions;
8643         struct mlx5_flow_dv_sample_resource *cache_resource;
8644         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8645         struct mlx5_priv *priv = dev->data->dev_private;
8646         struct mlx5_dev_ctx_shared *sh = priv->sh;
8647         struct mlx5_flow_tbl_resource *tbl;
8648         uint32_t idx = 0;
8649         const uint32_t next_ft_step = 1;
8650         uint32_t next_ft_id = resource->ft_id + next_ft_step;
8651         uint8_t is_egress = 0;
8652         uint8_t is_transfer = 0;
8653         struct rte_flow_error *error = ctx->error;
8654
8655         /* Register new sample resource. */
8656         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
8657         if (!cache_resource) {
8658                 rte_flow_error_set(error, ENOMEM,
8659                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8660                                           NULL,
8661                                           "cannot allocate resource memory");
8662                 return NULL;
8663         }
8664         *cache_resource = *resource;
8665         /* Create normal path table level */
8666         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8667                 is_transfer = 1;
8668         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
8669                 is_egress = 1;
8670         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8671                                         is_egress, is_transfer,
8672                                         true, NULL, 0, 0, error);
8673         if (!tbl) {
8674                 rte_flow_error_set(error, ENOMEM,
8675                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8676                                           NULL,
8677                                           "fail to create normal path table "
8678                                           "for sample");
8679                 goto error;
8680         }
8681         cache_resource->normal_path_tbl = tbl;
8682         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8683                 cache_resource->default_miss =
8684                                 mlx5_glue->dr_create_flow_action_default_miss();
8685                 if (!cache_resource->default_miss) {
8686                         rte_flow_error_set(error, ENOMEM,
8687                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8688                                                 NULL,
8689                                                 "cannot create default miss "
8690                                                 "action");
8691                         goto error;
8692                 }
8693                 sample_dv_actions[resource->sample_act.actions_num++] =
8694                                                 cache_resource->default_miss;
8695         }
8696         /* Create a DR sample action */
8697         sampler_attr.sample_ratio = cache_resource->ratio;
8698         sampler_attr.default_next_table = tbl->obj;
8699         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8700         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8701                                                         &sample_dv_actions[0];
8702         sampler_attr.action = cache_resource->set_action;
8703         cache_resource->verbs_action =
8704                 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8705         if (!cache_resource->verbs_action) {
8706                 rte_flow_error_set(error, ENOMEM,
8707                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8708                                         NULL, "cannot create sample action");
8709                 goto error;
8710         }
8711         cache_resource->idx = idx;
8712         return &cache_resource->entry;
8713 error:
8714         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
8715             cache_resource->default_miss)
8716                 claim_zero(mlx5_glue->destroy_flow_action
8717                                 (cache_resource->default_miss));
8718         else
8719                 flow_dv_sample_sub_actions_release(dev,
8720                                                    &cache_resource->sample_idx);
8721         if (cache_resource->normal_path_tbl)
8722                 flow_dv_tbl_resource_release(MLX5_SH(dev),
8723                                 cache_resource->normal_path_tbl);
8724         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
8725         return NULL;
8726
8727 }
8728
8729 /**
8730  * Find existing sample resource or create and register a new one.
8731  *
8732  * @param[in, out] dev
8733  *   Pointer to rte_eth_dev structure.
8734  * @param[in] resource
8735  *   Pointer to sample resource.
8736  * @parm[in, out] dev_flow
8737  *   Pointer to the dev_flow.
8738  * @param[out] error
8739  *   pointer to error structure.
8740  *
8741  * @return
8742  *   0 on success otherwise -errno and errno is set.
8743  */
8744 static int
8745 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8746                          struct mlx5_flow_dv_sample_resource *resource,
8747                          struct mlx5_flow *dev_flow,
8748                          struct rte_flow_error *error)
8749 {
8750         struct mlx5_flow_dv_sample_resource *cache_resource;
8751         struct mlx5_cache_entry *entry;
8752         struct mlx5_priv *priv = dev->data->dev_private;
8753         struct mlx5_flow_cb_ctx ctx = {
8754                 .dev = dev,
8755                 .error = error,
8756                 .data = resource,
8757         };
8758
8759         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
8760         if (!entry)
8761                 return -rte_errno;
8762         cache_resource = container_of(entry, typeof(*cache_resource), entry);
8763         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
8764         dev_flow->dv.sample_res = cache_resource;
8765         return 0;
8766 }
8767
8768 int
8769 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
8770                             struct mlx5_cache_entry *entry, void *cb_ctx)
8771 {
8772         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8773         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8774         struct rte_eth_dev *dev = ctx->dev;
8775         struct mlx5_flow_dv_dest_array_resource *cache_resource =
8776                         container_of(entry, typeof(*cache_resource), entry);
8777         uint32_t idx = 0;
8778
8779         if (resource->num_of_dest == cache_resource->num_of_dest &&
8780             resource->ft_type == cache_resource->ft_type &&
8781             !memcmp((void *)cache_resource->sample_act,
8782                     (void *)resource->sample_act,
8783                    (resource->num_of_dest *
8784                    sizeof(struct mlx5_flow_sub_actions_list)))) {
8785                 /*
8786                  * Existing sample action should release the prepared
8787                  * sub-actions reference counter.
8788                  */
8789                 for (idx = 0; idx < resource->num_of_dest; idx++)
8790                         flow_dv_sample_sub_actions_release(dev,
8791                                         &resource->sample_idx[idx]);
8792                 return 0;
8793         }
8794         return 1;
8795 }
8796
8797 struct mlx5_cache_entry *
8798 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
8799                          struct mlx5_cache_entry *entry __rte_unused,
8800                          void *cb_ctx)
8801 {
8802         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8803         struct rte_eth_dev *dev = ctx->dev;
8804         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8805         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8806         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8807         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8808         struct mlx5_priv *priv = dev->data->dev_private;
8809         struct mlx5_dev_ctx_shared *sh = priv->sh;
8810         struct mlx5_flow_sub_actions_list *sample_act;
8811         struct mlx5dv_dr_domain *domain;
8812         uint32_t idx = 0, res_idx = 0;
8813         struct rte_flow_error *error = ctx->error;
8814
8815         /* Register new destination array resource. */
8816         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8817                                             &res_idx);
8818         if (!cache_resource) {
8819                 rte_flow_error_set(error, ENOMEM,
8820                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8821                                           NULL,
8822                                           "cannot allocate resource memory");
8823                 return NULL;
8824         }
8825         *cache_resource = *resource;
8826         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8827                 domain = sh->fdb_domain;
8828         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
8829                 domain = sh->rx_domain;
8830         else
8831                 domain = sh->tx_domain;
8832         for (idx = 0; idx < resource->num_of_dest; idx++) {
8833                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8834                                  mlx5_malloc(MLX5_MEM_ZERO,
8835                                  sizeof(struct mlx5dv_dr_action_dest_attr),
8836                                  0, SOCKET_ID_ANY);
8837                 if (!dest_attr[idx]) {
8838                         rte_flow_error_set(error, ENOMEM,
8839                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8840                                            NULL,
8841                                            "cannot allocate resource memory");
8842                         goto error;
8843                 }
8844                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8845                 sample_act = &resource->sample_act[idx];
8846                 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8847                         dest_attr[idx]->dest = sample_act->dr_queue_action;
8848                 } else if (sample_act->action_flags ==
8849                           (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8850                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8851                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8852                         dest_attr[idx]->dest_reformat->reformat =
8853                                         sample_act->dr_encap_action;
8854                         dest_attr[idx]->dest_reformat->dest =
8855                                         sample_act->dr_port_id_action;
8856                 } else if (sample_act->action_flags ==
8857                            MLX5_FLOW_ACTION_PORT_ID) {
8858                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
8859                 }
8860         }
8861         /* create a dest array actioin */
8862         cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8863                                                 (domain,
8864                                                  cache_resource->num_of_dest,
8865                                                  dest_attr);
8866         if (!cache_resource->action) {
8867                 rte_flow_error_set(error, ENOMEM,
8868                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8869                                    NULL,
8870                                    "cannot create destination array action");
8871                 goto error;
8872         }
8873         cache_resource->idx = res_idx;
8874         for (idx = 0; idx < resource->num_of_dest; idx++)
8875                 mlx5_free(dest_attr[idx]);
8876         return &cache_resource->entry;
8877 error:
8878         for (idx = 0; idx < resource->num_of_dest; idx++) {
8879                 struct mlx5_flow_sub_actions_idx *act_res =
8880                                         &cache_resource->sample_idx[idx];
8881                 if (act_res->rix_hrxq &&
8882                     !mlx5_hrxq_release(dev,
8883                                 act_res->rix_hrxq))
8884                         act_res->rix_hrxq = 0;
8885                 if (act_res->rix_encap_decap &&
8886                         !flow_dv_encap_decap_resource_release(dev,
8887                                 act_res->rix_encap_decap))
8888                         act_res->rix_encap_decap = 0;
8889                 if (act_res->rix_port_id_action &&
8890                         !flow_dv_port_id_action_resource_release(dev,
8891                                 act_res->rix_port_id_action))
8892                         act_res->rix_port_id_action = 0;
8893                 if (dest_attr[idx])
8894                         mlx5_free(dest_attr[idx]);
8895         }
8896
8897         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
8898         return NULL;
8899 }
8900
8901 /**
8902  * Find existing destination array resource or create and register a new one.
8903  *
8904  * @param[in, out] dev
8905  *   Pointer to rte_eth_dev structure.
8906  * @param[in] resource
8907  *   Pointer to destination array resource.
8908  * @parm[in, out] dev_flow
8909  *   Pointer to the dev_flow.
8910  * @param[out] error
8911  *   pointer to error structure.
8912  *
8913  * @return
8914  *   0 on success otherwise -errno and errno is set.
8915  */
8916 static int
8917 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8918                          struct mlx5_flow_dv_dest_array_resource *resource,
8919                          struct mlx5_flow *dev_flow,
8920                          struct rte_flow_error *error)
8921 {
8922         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8923         struct mlx5_priv *priv = dev->data->dev_private;
8924         struct mlx5_cache_entry *entry;
8925         struct mlx5_flow_cb_ctx ctx = {
8926                 .dev = dev,
8927                 .error = error,
8928                 .data = resource,
8929         };
8930
8931         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
8932         if (!entry)
8933                 return -rte_errno;
8934         cache_resource = container_of(entry, typeof(*cache_resource), entry);
8935         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
8936         dev_flow->dv.dest_array_res = cache_resource;
8937         return 0;
8938 }
8939
8940 /**
8941  * Convert Sample action to DV specification.
8942  *
8943  * @param[in] dev
8944  *   Pointer to rte_eth_dev structure.
8945  * @param[in] action
8946  *   Pointer to action structure.
8947  * @param[in, out] dev_flow
8948  *   Pointer to the mlx5_flow.
8949  * @param[in] attr
8950  *   Pointer to the flow attributes.
8951  * @param[in, out] num_of_dest
8952  *   Pointer to the num of destination.
8953  * @param[in, out] sample_actions
8954  *   Pointer to sample actions list.
8955  * @param[in, out] res
8956  *   Pointer to sample resource.
8957  * @param[out] error
8958  *   Pointer to the error structure.
8959  *
8960  * @return
8961  *   0 on success, a negative errno value otherwise and rte_errno is set.
8962  */
8963 static int
8964 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
8965                                 const struct rte_flow_action *action,
8966                                 struct mlx5_flow *dev_flow,
8967                                 const struct rte_flow_attr *attr,
8968                                 uint32_t *num_of_dest,
8969                                 void **sample_actions,
8970                                 struct mlx5_flow_dv_sample_resource *res,
8971                                 struct rte_flow_error *error)
8972 {
8973         struct mlx5_priv *priv = dev->data->dev_private;
8974         const struct rte_flow_action_sample *sample_action;
8975         const struct rte_flow_action *sub_actions;
8976         const struct rte_flow_action_queue *queue;
8977         struct mlx5_flow_sub_actions_list *sample_act;
8978         struct mlx5_flow_sub_actions_idx *sample_idx;
8979         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
8980         struct mlx5_flow_rss_desc *rss_desc;
8981         uint64_t action_flags = 0;
8982
8983         MLX5_ASSERT(wks);
8984         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
8985         sample_act = &res->sample_act;
8986         sample_idx = &res->sample_idx;
8987         sample_action = (const struct rte_flow_action_sample *)action->conf;
8988         res->ratio = sample_action->ratio;
8989         sub_actions = sample_action->actions;
8990         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
8991                 int type = sub_actions->type;
8992                 uint32_t pre_rix = 0;
8993                 void *pre_r;
8994                 switch (type) {
8995                 case RTE_FLOW_ACTION_TYPE_QUEUE:
8996                 {
8997                         struct mlx5_hrxq *hrxq;
8998                         uint32_t hrxq_idx;
8999
9000                         queue = sub_actions->conf;
9001                         rss_desc->queue_num = 1;
9002                         rss_desc->queue[0] = queue->index;
9003                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9004                                                     rss_desc, &hrxq_idx);
9005                         if (!hrxq)
9006                                 return rte_flow_error_set
9007                                         (error, rte_errno,
9008                                          RTE_FLOW_ERROR_TYPE_ACTION,
9009                                          NULL,
9010                                          "cannot create fate queue");
9011                         sample_act->dr_queue_action = hrxq->action;
9012                         sample_idx->rix_hrxq = hrxq_idx;
9013                         sample_actions[sample_act->actions_num++] =
9014                                                 hrxq->action;
9015                         (*num_of_dest)++;
9016                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9017                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9018                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9019                         dev_flow->handle->fate_action =
9020                                         MLX5_FLOW_FATE_QUEUE;
9021                         break;
9022                 }
9023                 case RTE_FLOW_ACTION_TYPE_MARK:
9024                 {
9025                         uint32_t tag_be = mlx5_flow_mark_set
9026                                 (((const struct rte_flow_action_mark *)
9027                                 (sub_actions->conf))->id);
9028
9029                         dev_flow->handle->mark = 1;
9030                         pre_rix = dev_flow->handle->dvh.rix_tag;
9031                         /* Save the mark resource before sample */
9032                         pre_r = dev_flow->dv.tag_resource;
9033                         if (flow_dv_tag_resource_register(dev, tag_be,
9034                                                   dev_flow, error))
9035                                 return -rte_errno;
9036                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9037                         sample_act->dr_tag_action =
9038                                 dev_flow->dv.tag_resource->action;
9039                         sample_idx->rix_tag =
9040                                 dev_flow->handle->dvh.rix_tag;
9041                         sample_actions[sample_act->actions_num++] =
9042                                                 sample_act->dr_tag_action;
9043                         /* Recover the mark resource after sample */
9044                         dev_flow->dv.tag_resource = pre_r;
9045                         dev_flow->handle->dvh.rix_tag = pre_rix;
9046                         action_flags |= MLX5_FLOW_ACTION_MARK;
9047                         break;
9048                 }
9049                 case RTE_FLOW_ACTION_TYPE_COUNT:
9050                 {
9051                         uint32_t counter;
9052
9053                         counter = flow_dv_translate_create_counter(dev,
9054                                         dev_flow, sub_actions->conf, 0);
9055                         if (!counter)
9056                                 return rte_flow_error_set
9057                                                 (error, rte_errno,
9058                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9059                                                  NULL,
9060                                                  "cannot create counter"
9061                                                  " object.");
9062                         sample_idx->cnt = counter;
9063                         sample_act->dr_cnt_action =
9064                                   (flow_dv_counter_get_by_idx(dev,
9065                                   counter, NULL))->action;
9066                         sample_actions[sample_act->actions_num++] =
9067                                                 sample_act->dr_cnt_action;
9068                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9069                         break;
9070                 }
9071                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9072                 {
9073                         struct mlx5_flow_dv_port_id_action_resource
9074                                         port_id_resource;
9075                         uint32_t port_id = 0;
9076
9077                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9078                         /* Save the port id resource before sample */
9079                         pre_rix = dev_flow->handle->rix_port_id_action;
9080                         pre_r = dev_flow->dv.port_id_action;
9081                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9082                                                              &port_id, error))
9083                                 return -rte_errno;
9084                         port_id_resource.port_id = port_id;
9085                         if (flow_dv_port_id_action_resource_register
9086                             (dev, &port_id_resource, dev_flow, error))
9087                                 return -rte_errno;
9088                         sample_act->dr_port_id_action =
9089                                 dev_flow->dv.port_id_action->action;
9090                         sample_idx->rix_port_id_action =
9091                                 dev_flow->handle->rix_port_id_action;
9092                         sample_actions[sample_act->actions_num++] =
9093                                                 sample_act->dr_port_id_action;
9094                         /* Recover the port id resource after sample */
9095                         dev_flow->dv.port_id_action = pre_r;
9096                         dev_flow->handle->rix_port_id_action = pre_rix;
9097                         (*num_of_dest)++;
9098                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9099                         break;
9100                 }
9101                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9102                         /* Save the encap resource before sample */
9103                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9104                         pre_r = dev_flow->dv.encap_decap;
9105                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9106                                                            dev_flow,
9107                                                            attr->transfer,
9108                                                            error))
9109                                 return -rte_errno;
9110                         sample_act->dr_encap_action =
9111                                 dev_flow->dv.encap_decap->action;
9112                         sample_idx->rix_encap_decap =
9113                                 dev_flow->handle->dvh.rix_encap_decap;
9114                         sample_actions[sample_act->actions_num++] =
9115                                                 sample_act->dr_encap_action;
9116                         /* Recover the encap resource after sample */
9117                         dev_flow->dv.encap_decap = pre_r;
9118                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9119                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9120                         break;
9121                 default:
9122                         return rte_flow_error_set(error, EINVAL,
9123                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9124                                 NULL,
9125                                 "Not support for sampler action");
9126                 }
9127         }
9128         sample_act->action_flags = action_flags;
9129         res->ft_id = dev_flow->dv.group;
9130         if (attr->transfer) {
9131                 union {
9132                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9133                         uint64_t set_action;
9134                 } action_ctx = { .set_action = 0 };
9135
9136                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9137                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9138                          MLX5_MODIFICATION_TYPE_SET);
9139                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9140                          MLX5_MODI_META_REG_C_0);
9141                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9142                          priv->vport_meta_tag);
9143                 res->set_action = action_ctx.set_action;
9144         } else if (attr->ingress) {
9145                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9146         } else {
9147                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9148         }
9149         return 0;
9150 }
9151
9152 /**
9153  * Convert Sample action to DV specification.
9154  *
9155  * @param[in] dev
9156  *   Pointer to rte_eth_dev structure.
9157  * @param[in, out] dev_flow
9158  *   Pointer to the mlx5_flow.
9159  * @param[in] num_of_dest
9160  *   The num of destination.
9161  * @param[in, out] res
9162  *   Pointer to sample resource.
9163  * @param[in, out] mdest_res
9164  *   Pointer to destination array resource.
9165  * @param[in] sample_actions
9166  *   Pointer to sample path actions list.
9167  * @param[in] action_flags
9168  *   Holds the actions detected until now.
9169  * @param[out] error
9170  *   Pointer to the error structure.
9171  *
9172  * @return
9173  *   0 on success, a negative errno value otherwise and rte_errno is set.
9174  */
9175 static int
9176 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9177                              struct mlx5_flow *dev_flow,
9178                              uint32_t num_of_dest,
9179                              struct mlx5_flow_dv_sample_resource *res,
9180                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9181                              void **sample_actions,
9182                              uint64_t action_flags,
9183                              struct rte_flow_error *error)
9184 {
9185         /* update normal path action resource into last index of array */
9186         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9187         struct mlx5_flow_sub_actions_list *sample_act =
9188                                         &mdest_res->sample_act[dest_index];
9189         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9190         struct mlx5_flow_rss_desc *rss_desc;
9191         uint32_t normal_idx = 0;
9192         struct mlx5_hrxq *hrxq;
9193         uint32_t hrxq_idx;
9194
9195         MLX5_ASSERT(wks);
9196         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9197         if (num_of_dest > 1) {
9198                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9199                         /* Handle QP action for mirroring */
9200                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9201                                                     rss_desc, &hrxq_idx);
9202                         if (!hrxq)
9203                                 return rte_flow_error_set
9204                                      (error, rte_errno,
9205                                       RTE_FLOW_ERROR_TYPE_ACTION,
9206                                       NULL,
9207                                       "cannot create rx queue");
9208                         normal_idx++;
9209                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9210                         sample_act->dr_queue_action = hrxq->action;
9211                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9212                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9213                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9214                 }
9215                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9216                         normal_idx++;
9217                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9218                                 dev_flow->handle->dvh.rix_encap_decap;
9219                         sample_act->dr_encap_action =
9220                                 dev_flow->dv.encap_decap->action;
9221                 }
9222                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9223                         normal_idx++;
9224                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9225                                 dev_flow->handle->rix_port_id_action;
9226                         sample_act->dr_port_id_action =
9227                                 dev_flow->dv.port_id_action->action;
9228                 }
9229                 sample_act->actions_num = normal_idx;
9230                 /* update sample action resource into first index of array */
9231                 mdest_res->ft_type = res->ft_type;
9232                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9233                                 sizeof(struct mlx5_flow_sub_actions_idx));
9234                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9235                                 sizeof(struct mlx5_flow_sub_actions_list));
9236                 mdest_res->num_of_dest = num_of_dest;
9237                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9238                                                          dev_flow, error))
9239                         return rte_flow_error_set(error, EINVAL,
9240                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9241                                                   NULL, "can't create sample "
9242                                                   "action");
9243         } else {
9244                 res->sub_actions = sample_actions;
9245                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
9246                         return rte_flow_error_set(error, EINVAL,
9247                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9248                                                   NULL,
9249                                                   "can't create sample action");
9250         }
9251         return 0;
9252 }
9253
9254 /**
9255  * Remove an ASO age action from age actions list.
9256  *
9257  * @param[in] dev
9258  *   Pointer to the Ethernet device structure.
9259  * @param[in] age
9260  *   Pointer to the aso age action handler.
9261  */
9262 static void
9263 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
9264                                 struct mlx5_aso_age_action *age)
9265 {
9266         struct mlx5_age_info *age_info;
9267         struct mlx5_age_param *age_param = &age->age_params;
9268         struct mlx5_priv *priv = dev->data->dev_private;
9269         uint16_t expected = AGE_CANDIDATE;
9270
9271         age_info = GET_PORT_AGE_INFO(priv);
9272         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
9273                                          AGE_FREE, false, __ATOMIC_RELAXED,
9274                                          __ATOMIC_RELAXED)) {
9275                 /**
9276                  * We need the lock even it is age timeout,
9277                  * since age action may still in process.
9278                  */
9279                 rte_spinlock_lock(&age_info->aged_sl);
9280                 LIST_REMOVE(age, next);
9281                 rte_spinlock_unlock(&age_info->aged_sl);
9282                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
9283         }
9284 }
9285
9286 /**
9287  * Release an ASO age action.
9288  *
9289  * @param[in] dev
9290  *   Pointer to the Ethernet device structure.
9291  * @param[in] age_idx
9292  *   Index of ASO age action to release.
9293  * @param[in] flow
9294  *   True if the release operation is during flow destroy operation.
9295  *   False if the release operation is during action destroy operation.
9296  *
9297  * @return
9298  *   0 when age action was removed, otherwise the number of references.
9299  */
9300 static int
9301 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
9302 {
9303         struct mlx5_priv *priv = dev->data->dev_private;
9304         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9305         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
9306         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
9307
9308         if (!ret) {
9309                 flow_dv_aso_age_remove_from_age(dev, age);
9310                 rte_spinlock_lock(&mng->free_sl);
9311                 LIST_INSERT_HEAD(&mng->free, age, next);
9312                 rte_spinlock_unlock(&mng->free_sl);
9313         }
9314         return ret;
9315 }
9316
9317 /**
9318  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
9319  *
9320  * @param[in] dev
9321  *   Pointer to the Ethernet device structure.
9322  *
9323  * @return
9324  *   0 on success, otherwise negative errno value and rte_errno is set.
9325  */
9326 static int
9327 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
9328 {
9329         struct mlx5_priv *priv = dev->data->dev_private;
9330         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9331         void *old_pools = mng->pools;
9332         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
9333         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
9334         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
9335
9336         if (!pools) {
9337                 rte_errno = ENOMEM;
9338                 return -ENOMEM;
9339         }
9340         if (old_pools) {
9341                 memcpy(pools, old_pools,
9342                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
9343                 mlx5_free(old_pools);
9344         } else {
9345                 /* First ASO flow hit allocation - starting ASO data-path. */
9346                 int ret = mlx5_aso_queue_start(priv->sh);
9347
9348                 if (ret)
9349                         return ret;
9350         }
9351         mng->n = resize;
9352         mng->pools = pools;
9353         return 0;
9354 }
9355
9356 /**
9357  * Create and initialize a new ASO aging pool.
9358  *
9359  * @param[in] dev
9360  *   Pointer to the Ethernet device structure.
9361  * @param[out] age_free
9362  *   Where to put the pointer of a new age action.
9363  *
9364  * @return
9365  *   The age actions pool pointer and @p age_free is set on success,
9366  *   NULL otherwise and rte_errno is set.
9367  */
9368 static struct mlx5_aso_age_pool *
9369 flow_dv_age_pool_create(struct rte_eth_dev *dev,
9370                         struct mlx5_aso_age_action **age_free)
9371 {
9372         struct mlx5_priv *priv = dev->data->dev_private;
9373         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9374         struct mlx5_aso_age_pool *pool = NULL;
9375         struct mlx5_devx_obj *obj = NULL;
9376         uint32_t i;
9377
9378         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
9379                                                     priv->sh->pdn);
9380         if (!obj) {
9381                 rte_errno = ENODATA;
9382                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
9383                 return NULL;
9384         }
9385         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
9386         if (!pool) {
9387                 claim_zero(mlx5_devx_cmd_destroy(obj));
9388                 rte_errno = ENOMEM;
9389                 return NULL;
9390         }
9391         pool->flow_hit_aso_obj = obj;
9392         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
9393         rte_spinlock_lock(&mng->resize_sl);
9394         pool->index = mng->next;
9395         /* Resize pools array if there is no room for the new pool in it. */
9396         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
9397                 claim_zero(mlx5_devx_cmd_destroy(obj));
9398                 mlx5_free(pool);
9399                 rte_spinlock_unlock(&mng->resize_sl);
9400                 return NULL;
9401         }
9402         mng->pools[pool->index] = pool;
9403         mng->next++;
9404         rte_spinlock_unlock(&mng->resize_sl);
9405         /* Assign the first action in the new pool, the rest go to free list. */
9406         *age_free = &pool->actions[0];
9407         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
9408                 pool->actions[i].offset = i;
9409                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
9410         }
9411         return pool;
9412 }
9413
9414 /**
9415  * Allocate a ASO aging bit.
9416  *
9417  * @param[in] dev
9418  *   Pointer to the Ethernet device structure.
9419  *
9420  * @return
9421  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
9422  */
9423 static uint32_t
9424 flow_dv_aso_age_alloc(struct rte_eth_dev *dev)
9425 {
9426         struct mlx5_priv *priv = dev->data->dev_private;
9427         const struct mlx5_aso_age_pool *pool;
9428         struct mlx5_aso_age_action *age_free = NULL;
9429         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9430
9431         MLX5_ASSERT(mng);
9432         /* Try to get the next free age action bit. */
9433         rte_spinlock_lock(&mng->free_sl);
9434         age_free = LIST_FIRST(&mng->free);
9435         if (age_free) {
9436                 LIST_REMOVE(age_free, next);
9437         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
9438                 rte_spinlock_unlock(&mng->free_sl);
9439                 return 0; /* 0 is an error.*/
9440         }
9441         rte_spinlock_unlock(&mng->free_sl);
9442         pool = container_of
9443           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
9444                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
9445                                                                        actions);
9446         if (!age_free->dr_action) {
9447                 age_free->dr_action = mlx5_glue->dr_action_create_flow_hit
9448                                                 (pool->flow_hit_aso_obj->obj,
9449                                                  age_free->offset, REG_C_5);
9450                 if (!age_free->dr_action) {
9451                         rte_errno = errno;
9452                         rte_spinlock_lock(&mng->free_sl);
9453                         LIST_INSERT_HEAD(&mng->free, age_free, next);
9454                         rte_spinlock_unlock(&mng->free_sl);
9455                         return 0; /* 0 is an error.*/
9456                 }
9457         }
9458         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
9459         return pool->index | ((age_free->offset + 1) << 16);
9460 }
9461
9462 /**
9463  * Create a age action using ASO mechanism.
9464  *
9465  * @param[in] dev
9466  *   Pointer to rte_eth_dev structure.
9467  * @param[in] age
9468  *   Pointer to the aging action configuration.
9469  *
9470  * @return
9471  *   Index to flow counter on success, 0 otherwise.
9472  */
9473 static uint32_t
9474 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
9475                                  const struct rte_flow_action_age *age)
9476 {
9477         uint32_t age_idx = 0;
9478         struct mlx5_aso_age_action *aso_age;
9479
9480         age_idx = flow_dv_aso_age_alloc(dev);
9481         if (!age_idx)
9482                 return 0;
9483         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
9484         aso_age->age_params.context = age->context;
9485         aso_age->age_params.timeout = age->timeout;
9486         aso_age->age_params.port_id = dev->data->port_id;
9487         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
9488                          __ATOMIC_RELAXED);
9489         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
9490                          __ATOMIC_RELAXED);
9491         return age_idx;
9492 }
9493
9494 /**
9495  * Fill the flow with DV spec, lock free
9496  * (mutex should be acquired by caller).
9497  *
9498  * @param[in] dev
9499  *   Pointer to rte_eth_dev structure.
9500  * @param[in, out] dev_flow
9501  *   Pointer to the sub flow.
9502  * @param[in] attr
9503  *   Pointer to the flow attributes.
9504  * @param[in] items
9505  *   Pointer to the list of items.
9506  * @param[in] actions
9507  *   Pointer to the list of actions.
9508  * @param[out] error
9509  *   Pointer to the error structure.
9510  *
9511  * @return
9512  *   0 on success, a negative errno value otherwise and rte_errno is set.
9513  */
9514 static int
9515 flow_dv_translate(struct rte_eth_dev *dev,
9516                   struct mlx5_flow *dev_flow,
9517                   const struct rte_flow_attr *attr,
9518                   const struct rte_flow_item items[],
9519                   const struct rte_flow_action actions[],
9520                   struct rte_flow_error *error)
9521 {
9522         struct mlx5_priv *priv = dev->data->dev_private;
9523         struct mlx5_dev_config *dev_conf = &priv->config;
9524         struct rte_flow *flow = dev_flow->flow;
9525         struct mlx5_flow_handle *handle = dev_flow->handle;
9526         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9527         struct mlx5_flow_rss_desc *rss_desc;
9528         uint64_t item_flags = 0;
9529         uint64_t last_item = 0;
9530         uint64_t action_flags = 0;
9531         uint64_t priority = attr->priority;
9532         struct mlx5_flow_dv_matcher matcher = {
9533                 .mask = {
9534                         .size = sizeof(matcher.mask.buf) -
9535                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9536                 },
9537         };
9538         int actions_n = 0;
9539         bool actions_end = false;
9540         union {
9541                 struct mlx5_flow_dv_modify_hdr_resource res;
9542                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9543                             sizeof(struct mlx5_modification_cmd) *
9544                             (MLX5_MAX_MODIFY_NUM + 1)];
9545         } mhdr_dummy;
9546         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9547         const struct rte_flow_action_count *count = NULL;
9548         const struct rte_flow_action_age *age = NULL;
9549         union flow_dv_attr flow_attr = { .attr = 0 };
9550         uint32_t tag_be;
9551         union mlx5_flow_tbl_key tbl_key;
9552         uint32_t modify_action_position = UINT32_MAX;
9553         void *match_mask = matcher.mask.buf;
9554         void *match_value = dev_flow->dv.value.buf;
9555         uint8_t next_protocol = 0xff;
9556         struct rte_vlan_hdr vlan = { 0 };
9557         struct mlx5_flow_dv_dest_array_resource mdest_res;
9558         struct mlx5_flow_dv_sample_resource sample_res;
9559         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9560         struct mlx5_flow_sub_actions_list *sample_act;
9561         uint32_t sample_act_pos = UINT32_MAX;
9562         uint32_t num_of_dest = 0;
9563         int tmp_actions_n = 0;
9564         uint32_t table;
9565         int ret = 0;
9566         const struct mlx5_flow_tunnel *tunnel;
9567         struct flow_grp_info grp_info = {
9568                 .external = !!dev_flow->external,
9569                 .transfer = !!attr->transfer,
9570                 .fdb_def_rule = !!priv->fdb_def_rule,
9571                 .skip_scale = !!dev_flow->skip_scale,
9572         };
9573
9574         MLX5_ASSERT(wks);
9575         rss_desc = &wks->rss_desc[!!wks->flow_nested_idx];
9576         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9577         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9578         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9579                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9580         /* update normal path action resource into last index of array */
9581         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9582         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9583                  flow_items_to_tunnel(items) :
9584                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9585                  flow_actions_to_tunnel(actions) :
9586                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
9587         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9588                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9589         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9590                                 (dev, tunnel, attr, items, actions);
9591         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9592                                        grp_info, error);
9593         if (ret)
9594                 return ret;
9595         dev_flow->dv.group = table;
9596         if (attr->transfer)
9597                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9598         if (priority == MLX5_FLOW_PRIO_RSVD)
9599                 priority = dev_conf->flow_prio - 1;
9600         /* number of actions must be set to 0 in case of dirty stack. */
9601         mhdr_res->actions_num = 0;
9602         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9603                 /*
9604                  * do not add decap action if match rule drops packet
9605                  * HW rejects rules with decap & drop
9606                  */
9607                 bool add_decap = true;
9608                 const struct rte_flow_action *ptr = actions;
9609                 struct mlx5_flow_tbl_resource *tbl;
9610
9611                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9612                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9613                                 add_decap = false;
9614                                 break;
9615                         }
9616                 }
9617                 if (add_decap) {
9618                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9619                                                            attr->transfer,
9620                                                            error))
9621                                 return -rte_errno;
9622                         dev_flow->dv.actions[actions_n++] =
9623                                         dev_flow->dv.encap_decap->action;
9624                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9625                 }
9626                 /*
9627                  * bind table_id with <group, table> for tunnel match rule.
9628                  * Tunnel set rule establishes that bind in JUMP action handler.
9629                  * Required for scenario when application creates tunnel match
9630                  * rule before tunnel set rule.
9631                  */
9632                 tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9633                                                attr->transfer,
9634                                                !!dev_flow->external, tunnel,
9635                                                attr->group, 0, error);
9636                 if (!tbl)
9637                         return rte_flow_error_set
9638                                (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
9639                                actions, "cannot register tunnel group");
9640         }
9641         for (; !actions_end ; actions++) {
9642                 const struct rte_flow_action_queue *queue;
9643                 const struct rte_flow_action_rss *rss;
9644                 const struct rte_flow_action *action = actions;
9645                 const uint8_t *rss_key;
9646                 const struct rte_flow_action_meter *mtr;
9647                 struct mlx5_flow_tbl_resource *tbl;
9648                 struct mlx5_aso_age_action *age_act;
9649                 uint32_t port_id = 0;
9650                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9651                 int action_type = actions->type;
9652                 const struct rte_flow_action *found_action = NULL;
9653                 struct mlx5_flow_meter *fm = NULL;
9654                 uint32_t jump_group = 0;
9655
9656                 if (!mlx5_flow_os_action_supported(action_type))
9657                         return rte_flow_error_set(error, ENOTSUP,
9658                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9659                                                   actions,
9660                                                   "action not supported");
9661                 switch (action_type) {
9662                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9663                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9664                         break;
9665                 case RTE_FLOW_ACTION_TYPE_VOID:
9666                         break;
9667                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9668                         if (flow_dv_translate_action_port_id(dev, action,
9669                                                              &port_id, error))
9670                                 return -rte_errno;
9671                         port_id_resource.port_id = port_id;
9672                         MLX5_ASSERT(!handle->rix_port_id_action);
9673                         if (flow_dv_port_id_action_resource_register
9674                             (dev, &port_id_resource, dev_flow, error))
9675                                 return -rte_errno;
9676                         dev_flow->dv.actions[actions_n++] =
9677                                         dev_flow->dv.port_id_action->action;
9678                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9679                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9680                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9681                         num_of_dest++;
9682                         break;
9683                 case RTE_FLOW_ACTION_TYPE_FLAG:
9684                         action_flags |= MLX5_FLOW_ACTION_FLAG;
9685                         dev_flow->handle->mark = 1;
9686                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9687                                 struct rte_flow_action_mark mark = {
9688                                         .id = MLX5_FLOW_MARK_DEFAULT,
9689                                 };
9690
9691                                 if (flow_dv_convert_action_mark(dev, &mark,
9692                                                                 mhdr_res,
9693                                                                 error))
9694                                         return -rte_errno;
9695                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9696                                 break;
9697                         }
9698                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9699                         /*
9700                          * Only one FLAG or MARK is supported per device flow
9701                          * right now. So the pointer to the tag resource must be
9702                          * zero before the register process.
9703                          */
9704                         MLX5_ASSERT(!handle->dvh.rix_tag);
9705                         if (flow_dv_tag_resource_register(dev, tag_be,
9706                                                           dev_flow, error))
9707                                 return -rte_errno;
9708                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9709                         dev_flow->dv.actions[actions_n++] =
9710                                         dev_flow->dv.tag_resource->action;
9711                         break;
9712                 case RTE_FLOW_ACTION_TYPE_MARK:
9713                         action_flags |= MLX5_FLOW_ACTION_MARK;
9714                         dev_flow->handle->mark = 1;
9715                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9716                                 const struct rte_flow_action_mark *mark =
9717                                         (const struct rte_flow_action_mark *)
9718                                                 actions->conf;
9719
9720                                 if (flow_dv_convert_action_mark(dev, mark,
9721                                                                 mhdr_res,
9722                                                                 error))
9723                                         return -rte_errno;
9724                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9725                                 break;
9726                         }
9727                         /* Fall-through */
9728                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9729                         /* Legacy (non-extensive) MARK action. */
9730                         tag_be = mlx5_flow_mark_set
9731                               (((const struct rte_flow_action_mark *)
9732                                (actions->conf))->id);
9733                         MLX5_ASSERT(!handle->dvh.rix_tag);
9734                         if (flow_dv_tag_resource_register(dev, tag_be,
9735                                                           dev_flow, error))
9736                                 return -rte_errno;
9737                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9738                         dev_flow->dv.actions[actions_n++] =
9739                                         dev_flow->dv.tag_resource->action;
9740                         break;
9741                 case RTE_FLOW_ACTION_TYPE_SET_META:
9742                         if (flow_dv_convert_action_set_meta
9743                                 (dev, mhdr_res, attr,
9744                                  (const struct rte_flow_action_set_meta *)
9745                                   actions->conf, error))
9746                                 return -rte_errno;
9747                         action_flags |= MLX5_FLOW_ACTION_SET_META;
9748                         break;
9749                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9750                         if (flow_dv_convert_action_set_tag
9751                                 (dev, mhdr_res,
9752                                  (const struct rte_flow_action_set_tag *)
9753                                   actions->conf, error))
9754                                 return -rte_errno;
9755                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9756                         break;
9757                 case RTE_FLOW_ACTION_TYPE_DROP:
9758                         action_flags |= MLX5_FLOW_ACTION_DROP;
9759                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9760                         break;
9761                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9762                         queue = actions->conf;
9763                         rss_desc->queue_num = 1;
9764                         rss_desc->queue[0] = queue->index;
9765                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9766                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9767                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9768                         num_of_dest++;
9769                         break;
9770                 case RTE_FLOW_ACTION_TYPE_RSS:
9771                         rss = actions->conf;
9772                         memcpy(rss_desc->queue, rss->queue,
9773                                rss->queue_num * sizeof(uint16_t));
9774                         rss_desc->queue_num = rss->queue_num;
9775                         /* NULL RSS key indicates default RSS key. */
9776                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9777                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9778                         /*
9779                          * rss->level and rss.types should be set in advance
9780                          * when expanding items for RSS.
9781                          */
9782                         action_flags |= MLX5_FLOW_ACTION_RSS;
9783                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9784                         break;
9785                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
9786                         flow->age = (uint32_t)(uintptr_t)(action->conf);
9787                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
9788                         __atomic_fetch_add(&age_act->refcnt, 1,
9789                                            __ATOMIC_RELAXED);
9790                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
9791                         action_flags |= MLX5_FLOW_ACTION_AGE;
9792                         break;
9793                 case RTE_FLOW_ACTION_TYPE_AGE:
9794                         if (priv->sh->flow_hit_aso_en && attr->group) {
9795                                 flow->age = flow_dv_translate_create_aso_age
9796                                                 (dev, action->conf);
9797                                 if (!flow->age)
9798                                         return rte_flow_error_set
9799                                                 (error, rte_errno,
9800                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9801                                                  NULL,
9802                                                  "can't create ASO age action");
9803                                 dev_flow->dv.actions[actions_n++] =
9804                                           (flow_aso_age_get_by_idx
9805                                                 (dev, flow->age))->dr_action;
9806                                 action_flags |= MLX5_FLOW_ACTION_AGE;
9807                                 break;
9808                         }
9809                         /* Fall-through */
9810                 case RTE_FLOW_ACTION_TYPE_COUNT:
9811                         if (!dev_conf->devx) {
9812                                 return rte_flow_error_set
9813                                               (error, ENOTSUP,
9814                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9815                                                NULL,
9816                                                "count action not supported");
9817                         }
9818                         /* Save information first, will apply later. */
9819                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9820                                 count = action->conf;
9821                         else
9822                                 age = action->conf;
9823                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9824                         break;
9825                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9826                         dev_flow->dv.actions[actions_n++] =
9827                                                 priv->sh->pop_vlan_action;
9828                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9829                         break;
9830                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9831                         if (!(action_flags &
9832                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9833                                 flow_dev_get_vlan_info_from_items(items, &vlan);
9834                         vlan.eth_proto = rte_be_to_cpu_16
9835                              ((((const struct rte_flow_action_of_push_vlan *)
9836                                                    actions->conf)->ethertype));
9837                         found_action = mlx5_flow_find_action
9838                                         (actions + 1,
9839                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9840                         if (found_action)
9841                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9842                         found_action = mlx5_flow_find_action
9843                                         (actions + 1,
9844                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9845                         if (found_action)
9846                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9847                         if (flow_dv_create_action_push_vlan
9848                                             (dev, attr, &vlan, dev_flow, error))
9849                                 return -rte_errno;
9850                         dev_flow->dv.actions[actions_n++] =
9851                                         dev_flow->dv.push_vlan_res->action;
9852                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9853                         break;
9854                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9855                         /* of_vlan_push action handled this action */
9856                         MLX5_ASSERT(action_flags &
9857                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9858                         break;
9859                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9860                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9861                                 break;
9862                         flow_dev_get_vlan_info_from_items(items, &vlan);
9863                         mlx5_update_vlan_vid_pcp(actions, &vlan);
9864                         /* If no VLAN push - this is a modify header action */
9865                         if (flow_dv_convert_action_modify_vlan_vid
9866                                                 (mhdr_res, actions, error))
9867                                 return -rte_errno;
9868                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9869                         break;
9870                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9871                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9872                         if (flow_dv_create_action_l2_encap(dev, actions,
9873                                                            dev_flow,
9874                                                            attr->transfer,
9875                                                            error))
9876                                 return -rte_errno;
9877                         dev_flow->dv.actions[actions_n++] =
9878                                         dev_flow->dv.encap_decap->action;
9879                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9880                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9881                                 sample_act->action_flags |=
9882                                                         MLX5_FLOW_ACTION_ENCAP;
9883                         break;
9884                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9885                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9886                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9887                                                            attr->transfer,
9888                                                            error))
9889                                 return -rte_errno;
9890                         dev_flow->dv.actions[actions_n++] =
9891                                         dev_flow->dv.encap_decap->action;
9892                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9893                         break;
9894                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9895                         /* Handle encap with preceding decap. */
9896                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9897                                 if (flow_dv_create_action_raw_encap
9898                                         (dev, actions, dev_flow, attr, error))
9899                                         return -rte_errno;
9900                                 dev_flow->dv.actions[actions_n++] =
9901                                         dev_flow->dv.encap_decap->action;
9902                         } else {
9903                                 /* Handle encap without preceding decap. */
9904                                 if (flow_dv_create_action_l2_encap
9905                                     (dev, actions, dev_flow, attr->transfer,
9906                                      error))
9907                                         return -rte_errno;
9908                                 dev_flow->dv.actions[actions_n++] =
9909                                         dev_flow->dv.encap_decap->action;
9910                         }
9911                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9912                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9913                                 sample_act->action_flags |=
9914                                                         MLX5_FLOW_ACTION_ENCAP;
9915                         break;
9916                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
9917                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
9918                                 ;
9919                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
9920                                 if (flow_dv_create_action_l2_decap
9921                                     (dev, dev_flow, attr->transfer, error))
9922                                         return -rte_errno;
9923                                 dev_flow->dv.actions[actions_n++] =
9924                                         dev_flow->dv.encap_decap->action;
9925                         }
9926                         /* If decap is followed by encap, handle it at encap. */
9927                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9928                         break;
9929                 case RTE_FLOW_ACTION_TYPE_JUMP:
9930                         jump_group = ((const struct rte_flow_action_jump *)
9931                                                         action->conf)->group;
9932                         grp_info.std_tbl_fix = 0;
9933                         grp_info.skip_scale = 0;
9934                         ret = mlx5_flow_group_to_table(dev, tunnel,
9935                                                        jump_group,
9936                                                        &table,
9937                                                        grp_info, error);
9938                         if (ret)
9939                                 return ret;
9940                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9941                                                        attr->transfer,
9942                                                        !!dev_flow->external,
9943                                                        tunnel, jump_group, 0,
9944                                                        error);
9945                         if (!tbl)
9946                                 return rte_flow_error_set
9947                                                 (error, errno,
9948                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9949                                                  NULL,
9950                                                  "cannot create jump action.");
9951                         if (flow_dv_jump_tbl_resource_register
9952                             (dev, tbl, dev_flow, error)) {
9953                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9954                                 return rte_flow_error_set
9955                                                 (error, errno,
9956                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9957                                                  NULL,
9958                                                  "cannot create jump action.");
9959                         }
9960                         dev_flow->dv.actions[actions_n++] =
9961                                         dev_flow->dv.jump->action;
9962                         action_flags |= MLX5_FLOW_ACTION_JUMP;
9963                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
9964                         break;
9965                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
9966                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
9967                         if (flow_dv_convert_action_modify_mac
9968                                         (mhdr_res, actions, error))
9969                                 return -rte_errno;
9970                         action_flags |= actions->type ==
9971                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
9972                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
9973                                         MLX5_FLOW_ACTION_SET_MAC_DST;
9974                         break;
9975                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
9976                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
9977                         if (flow_dv_convert_action_modify_ipv4
9978                                         (mhdr_res, actions, error))
9979                                 return -rte_errno;
9980                         action_flags |= actions->type ==
9981                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
9982                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
9983                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
9984                         break;
9985                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
9986                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
9987                         if (flow_dv_convert_action_modify_ipv6
9988                                         (mhdr_res, actions, error))
9989                                 return -rte_errno;
9990                         action_flags |= actions->type ==
9991                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
9992                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
9993                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
9994                         break;
9995                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
9996                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
9997                         if (flow_dv_convert_action_modify_tp
9998                                         (mhdr_res, actions, items,
9999                                          &flow_attr, dev_flow, !!(action_flags &
10000                                          MLX5_FLOW_ACTION_DECAP), error))
10001                                 return -rte_errno;
10002                         action_flags |= actions->type ==
10003                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
10004                                         MLX5_FLOW_ACTION_SET_TP_SRC :
10005                                         MLX5_FLOW_ACTION_SET_TP_DST;
10006                         break;
10007                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
10008                         if (flow_dv_convert_action_modify_dec_ttl
10009                                         (mhdr_res, items, &flow_attr, dev_flow,
10010                                          !!(action_flags &
10011                                          MLX5_FLOW_ACTION_DECAP), error))
10012                                 return -rte_errno;
10013                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
10014                         break;
10015                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
10016                         if (flow_dv_convert_action_modify_ttl
10017                                         (mhdr_res, actions, items, &flow_attr,
10018                                          dev_flow, !!(action_flags &
10019                                          MLX5_FLOW_ACTION_DECAP), error))
10020                                 return -rte_errno;
10021                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
10022                         break;
10023                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
10024                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
10025                         if (flow_dv_convert_action_modify_tcp_seq
10026                                         (mhdr_res, actions, error))
10027                                 return -rte_errno;
10028                         action_flags |= actions->type ==
10029                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
10030                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
10031                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
10032                         break;
10033
10034                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
10035                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
10036                         if (flow_dv_convert_action_modify_tcp_ack
10037                                         (mhdr_res, actions, error))
10038                                 return -rte_errno;
10039                         action_flags |= actions->type ==
10040                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
10041                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
10042                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
10043                         break;
10044                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
10045                         if (flow_dv_convert_action_set_reg
10046                                         (mhdr_res, actions, error))
10047                                 return -rte_errno;
10048                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10049                         break;
10050                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
10051                         if (flow_dv_convert_action_copy_mreg
10052                                         (dev, mhdr_res, actions, error))
10053                                 return -rte_errno;
10054                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10055                         break;
10056                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
10057                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
10058                         dev_flow->handle->fate_action =
10059                                         MLX5_FLOW_FATE_DEFAULT_MISS;
10060                         break;
10061                 case RTE_FLOW_ACTION_TYPE_METER:
10062                         mtr = actions->conf;
10063                         if (!flow->meter) {
10064                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
10065                                                             attr, error);
10066                                 if (!fm)
10067                                         return rte_flow_error_set(error,
10068                                                 rte_errno,
10069                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10070                                                 NULL,
10071                                                 "meter not found "
10072                                                 "or invalid parameters");
10073                                 flow->meter = fm->idx;
10074                         }
10075                         /* Set the meter action. */
10076                         if (!fm) {
10077                                 fm = mlx5_ipool_get(priv->sh->ipool
10078                                                 [MLX5_IPOOL_MTR], flow->meter);
10079                                 if (!fm)
10080                                         return rte_flow_error_set(error,
10081                                                 rte_errno,
10082                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10083                                                 NULL,
10084                                                 "meter not found "
10085                                                 "or invalid parameters");
10086                         }
10087                         dev_flow->dv.actions[actions_n++] =
10088                                 fm->mfts->meter_action;
10089                         action_flags |= MLX5_FLOW_ACTION_METER;
10090                         break;
10091                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
10092                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
10093                                                               actions, error))
10094                                 return -rte_errno;
10095                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
10096                         break;
10097                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
10098                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
10099                                                               actions, error))
10100                                 return -rte_errno;
10101                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
10102                         break;
10103                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
10104                         sample_act_pos = actions_n;
10105                         ret = flow_dv_translate_action_sample(dev,
10106                                                               actions,
10107                                                               dev_flow, attr,
10108                                                               &num_of_dest,
10109                                                               sample_actions,
10110                                                               &sample_res,
10111                                                               error);
10112                         if (ret < 0)
10113                                 return ret;
10114                         actions_n++;
10115                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
10116                         /* put encap action into group if work with port id */
10117                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
10118                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
10119                                 sample_act->action_flags |=
10120                                                         MLX5_FLOW_ACTION_ENCAP;
10121                         break;
10122                 case RTE_FLOW_ACTION_TYPE_END:
10123                         actions_end = true;
10124                         if (mhdr_res->actions_num) {
10125                                 /* create modify action if needed. */
10126                                 if (flow_dv_modify_hdr_resource_register
10127                                         (dev, mhdr_res, dev_flow, error))
10128                                         return -rte_errno;
10129                                 dev_flow->dv.actions[modify_action_position] =
10130                                         handle->dvh.modify_hdr->action;
10131                         }
10132                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
10133                                 flow->counter =
10134                                         flow_dv_translate_create_counter(dev,
10135                                                 dev_flow, count, age);
10136
10137                                 if (!flow->counter)
10138                                         return rte_flow_error_set
10139                                                 (error, rte_errno,
10140                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10141                                                 NULL,
10142                                                 "cannot create counter"
10143                                                 " object.");
10144                                 dev_flow->dv.actions[actions_n] =
10145                                           (flow_dv_counter_get_by_idx(dev,
10146                                           flow->counter, NULL))->action;
10147                                 actions_n++;
10148                         }
10149                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
10150                                 ret = flow_dv_create_action_sample(dev,
10151                                                           dev_flow,
10152                                                           num_of_dest,
10153                                                           &sample_res,
10154                                                           &mdest_res,
10155                                                           sample_actions,
10156                                                           action_flags,
10157                                                           error);
10158                                 if (ret < 0)
10159                                         return rte_flow_error_set
10160                                                 (error, rte_errno,
10161                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10162                                                 NULL,
10163                                                 "cannot create sample action");
10164                                 if (num_of_dest > 1) {
10165                                         dev_flow->dv.actions[sample_act_pos] =
10166                                         dev_flow->dv.dest_array_res->action;
10167                                 } else {
10168                                         dev_flow->dv.actions[sample_act_pos] =
10169                                         dev_flow->dv.sample_res->verbs_action;
10170                                 }
10171                         }
10172                         break;
10173                 default:
10174                         break;
10175                 }
10176                 if (mhdr_res->actions_num &&
10177                     modify_action_position == UINT32_MAX)
10178                         modify_action_position = actions_n++;
10179         }
10180         /*
10181          * For multiple destination (sample action with ratio=1), the encap
10182          * action and port id action will be combined into group action.
10183          * So need remove the original these actions in the flow and only
10184          * use the sample action instead of.
10185          */
10186         if (num_of_dest > 1 && sample_act->dr_port_id_action) {
10187                 int i;
10188                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
10189
10190                 for (i = 0; i < actions_n; i++) {
10191                         if ((sample_act->dr_encap_action &&
10192                                 sample_act->dr_encap_action ==
10193                                 dev_flow->dv.actions[i]) ||
10194                                 (sample_act->dr_port_id_action &&
10195                                 sample_act->dr_port_id_action ==
10196                                 dev_flow->dv.actions[i]))
10197                                 continue;
10198                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
10199                 }
10200                 memcpy((void *)dev_flow->dv.actions,
10201                                 (void *)temp_actions,
10202                                 tmp_actions_n * sizeof(void *));
10203                 actions_n = tmp_actions_n;
10204         }
10205         dev_flow->dv.actions_n = actions_n;
10206         dev_flow->act_flags = action_flags;
10207         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
10208                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
10209                 int item_type = items->type;
10210
10211                 if (!mlx5_flow_os_item_supported(item_type))
10212                         return rte_flow_error_set(error, ENOTSUP,
10213                                                   RTE_FLOW_ERROR_TYPE_ITEM,
10214                                                   NULL, "item not supported");
10215                 switch (item_type) {
10216                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
10217                         flow_dv_translate_item_port_id(dev, match_mask,
10218                                                        match_value, items);
10219                         last_item = MLX5_FLOW_ITEM_PORT_ID;
10220                         break;
10221                 case RTE_FLOW_ITEM_TYPE_ETH:
10222                         flow_dv_translate_item_eth(match_mask, match_value,
10223                                                    items, tunnel,
10224                                                    dev_flow->dv.group);
10225                         matcher.priority = action_flags &
10226                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
10227                                         !dev_flow->external ?
10228                                         MLX5_PRIORITY_MAP_L3 :
10229                                         MLX5_PRIORITY_MAP_L2;
10230                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
10231                                              MLX5_FLOW_LAYER_OUTER_L2;
10232                         break;
10233                 case RTE_FLOW_ITEM_TYPE_VLAN:
10234                         flow_dv_translate_item_vlan(dev_flow,
10235                                                     match_mask, match_value,
10236                                                     items, tunnel,
10237                                                     dev_flow->dv.group);
10238                         matcher.priority = MLX5_PRIORITY_MAP_L2;
10239                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
10240                                               MLX5_FLOW_LAYER_INNER_VLAN) :
10241                                              (MLX5_FLOW_LAYER_OUTER_L2 |
10242                                               MLX5_FLOW_LAYER_OUTER_VLAN);
10243                         break;
10244                 case RTE_FLOW_ITEM_TYPE_IPV4:
10245                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10246                                                   &item_flags, &tunnel);
10247                         flow_dv_translate_item_ipv4(match_mask, match_value,
10248                                                     items, tunnel,
10249                                                     dev_flow->dv.group);
10250                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10251                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
10252                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
10253                         if (items->mask != NULL &&
10254                             ((const struct rte_flow_item_ipv4 *)
10255                              items->mask)->hdr.next_proto_id) {
10256                                 next_protocol =
10257                                         ((const struct rte_flow_item_ipv4 *)
10258                                          (items->spec))->hdr.next_proto_id;
10259                                 next_protocol &=
10260                                         ((const struct rte_flow_item_ipv4 *)
10261                                          (items->mask))->hdr.next_proto_id;
10262                         } else {
10263                                 /* Reset for inner layer. */
10264                                 next_protocol = 0xff;
10265                         }
10266                         break;
10267                 case RTE_FLOW_ITEM_TYPE_IPV6:
10268                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10269                                                   &item_flags, &tunnel);
10270                         flow_dv_translate_item_ipv6(match_mask, match_value,
10271                                                     items, tunnel,
10272                                                     dev_flow->dv.group);
10273                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10274                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10275                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10276                         if (items->mask != NULL &&
10277                             ((const struct rte_flow_item_ipv6 *)
10278                              items->mask)->hdr.proto) {
10279                                 next_protocol =
10280                                         ((const struct rte_flow_item_ipv6 *)
10281                                          items->spec)->hdr.proto;
10282                                 next_protocol &=
10283                                         ((const struct rte_flow_item_ipv6 *)
10284                                          items->mask)->hdr.proto;
10285                         } else {
10286                                 /* Reset for inner layer. */
10287                                 next_protocol = 0xff;
10288                         }
10289                         break;
10290                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10291                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
10292                                                              match_value,
10293                                                              items, tunnel);
10294                         last_item = tunnel ?
10295                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10296                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10297                         if (items->mask != NULL &&
10298                             ((const struct rte_flow_item_ipv6_frag_ext *)
10299                              items->mask)->hdr.next_header) {
10300                                 next_protocol =
10301                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10302                                  items->spec)->hdr.next_header;
10303                                 next_protocol &=
10304                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10305                                  items->mask)->hdr.next_header;
10306                         } else {
10307                                 /* Reset for inner layer. */
10308                                 next_protocol = 0xff;
10309                         }
10310                         break;
10311                 case RTE_FLOW_ITEM_TYPE_TCP:
10312                         flow_dv_translate_item_tcp(match_mask, match_value,
10313                                                    items, tunnel);
10314                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10315                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10316                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10317                         break;
10318                 case RTE_FLOW_ITEM_TYPE_UDP:
10319                         flow_dv_translate_item_udp(match_mask, match_value,
10320                                                    items, tunnel);
10321                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10322                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10323                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10324                         break;
10325                 case RTE_FLOW_ITEM_TYPE_GRE:
10326                         flow_dv_translate_item_gre(match_mask, match_value,
10327                                                    items, tunnel);
10328                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10329                         last_item = MLX5_FLOW_LAYER_GRE;
10330                         break;
10331                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10332                         flow_dv_translate_item_gre_key(match_mask,
10333                                                        match_value, items);
10334                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10335                         break;
10336                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10337                         flow_dv_translate_item_nvgre(match_mask, match_value,
10338                                                      items, tunnel);
10339                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10340                         last_item = MLX5_FLOW_LAYER_GRE;
10341                         break;
10342                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10343                         flow_dv_translate_item_vxlan(match_mask, match_value,
10344                                                      items, tunnel);
10345                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10346                         last_item = MLX5_FLOW_LAYER_VXLAN;
10347                         break;
10348                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10349                         flow_dv_translate_item_vxlan_gpe(match_mask,
10350                                                          match_value, items,
10351                                                          tunnel);
10352                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10353                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10354                         break;
10355                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10356                         flow_dv_translate_item_geneve(match_mask, match_value,
10357                                                       items, tunnel);
10358                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10359                         last_item = MLX5_FLOW_LAYER_GENEVE;
10360                         break;
10361                 case RTE_FLOW_ITEM_TYPE_MPLS:
10362                         flow_dv_translate_item_mpls(match_mask, match_value,
10363                                                     items, last_item, tunnel);
10364                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10365                         last_item = MLX5_FLOW_LAYER_MPLS;
10366                         break;
10367                 case RTE_FLOW_ITEM_TYPE_MARK:
10368                         flow_dv_translate_item_mark(dev, match_mask,
10369                                                     match_value, items);
10370                         last_item = MLX5_FLOW_ITEM_MARK;
10371                         break;
10372                 case RTE_FLOW_ITEM_TYPE_META:
10373                         flow_dv_translate_item_meta(dev, match_mask,
10374                                                     match_value, attr, items);
10375                         last_item = MLX5_FLOW_ITEM_METADATA;
10376                         break;
10377                 case RTE_FLOW_ITEM_TYPE_ICMP:
10378                         flow_dv_translate_item_icmp(match_mask, match_value,
10379                                                     items, tunnel);
10380                         last_item = MLX5_FLOW_LAYER_ICMP;
10381                         break;
10382                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10383                         flow_dv_translate_item_icmp6(match_mask, match_value,
10384                                                       items, tunnel);
10385                         last_item = MLX5_FLOW_LAYER_ICMP6;
10386                         break;
10387                 case RTE_FLOW_ITEM_TYPE_TAG:
10388                         flow_dv_translate_item_tag(dev, match_mask,
10389                                                    match_value, items);
10390                         last_item = MLX5_FLOW_ITEM_TAG;
10391                         break;
10392                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10393                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10394                                                         match_value, items);
10395                         last_item = MLX5_FLOW_ITEM_TAG;
10396                         break;
10397                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10398                         flow_dv_translate_item_tx_queue(dev, match_mask,
10399                                                         match_value,
10400                                                         items);
10401                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10402                         break;
10403                 case RTE_FLOW_ITEM_TYPE_GTP:
10404                         flow_dv_translate_item_gtp(match_mask, match_value,
10405                                                    items, tunnel);
10406                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10407                         last_item = MLX5_FLOW_LAYER_GTP;
10408                         break;
10409                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10410                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10411                                 /* Create it only the first time to be used. */
10412                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10413                                 if (ret)
10414                                         return rte_flow_error_set
10415                                                 (error, -ret,
10416                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10417                                                 NULL,
10418                                                 "cannot create eCPRI parser");
10419                         }
10420                         /* Adjust the length matcher and device flow value. */
10421                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10422                         dev_flow->dv.value.size =
10423                                         MLX5_ST_SZ_BYTES(fte_match_param);
10424                         flow_dv_translate_item_ecpri(dev, match_mask,
10425                                                      match_value, items);
10426                         /* No other protocol should follow eCPRI layer. */
10427                         last_item = MLX5_FLOW_LAYER_ECPRI;
10428                         break;
10429                 default:
10430                         break;
10431                 }
10432                 item_flags |= last_item;
10433         }
10434         /*
10435          * When E-Switch mode is enabled, we have two cases where we need to
10436          * set the source port manually.
10437          * The first one, is in case of Nic steering rule, and the second is
10438          * E-Switch rule where no port_id item was found. In both cases
10439          * the source port is set according the current port in use.
10440          */
10441         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10442             (priv->representor || priv->master)) {
10443                 if (flow_dv_translate_item_port_id(dev, match_mask,
10444                                                    match_value, NULL))
10445                         return -rte_errno;
10446         }
10447 #ifdef RTE_LIBRTE_MLX5_DEBUG
10448         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10449                                               dev_flow->dv.value.buf));
10450 #endif
10451         /*
10452          * Layers may be already initialized from prefix flow if this dev_flow
10453          * is the suffix flow.
10454          */
10455         handle->layers |= item_flags;
10456         if (action_flags & MLX5_FLOW_ACTION_RSS)
10457                 flow_dv_hashfields_set(dev_flow, rss_desc);
10458         /* Register matcher. */
10459         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10460                                     matcher.mask.size);
10461         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10462                                                      matcher.priority);
10463         /* reserved field no needs to be set to 0 here. */
10464         tbl_key.domain = attr->transfer;
10465         tbl_key.direction = attr->egress;
10466         tbl_key.table_id = dev_flow->dv.group;
10467         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
10468                 return -rte_errno;
10469         return 0;
10470 }
10471
10472 /**
10473  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10474  * and tunnel.
10475  *
10476  * @param[in, out] action
10477  *   Shred RSS action holding hash RX queue objects.
10478  * @param[in] hash_fields
10479  *   Defines combination of packet fields to participate in RX hash.
10480  * @param[in] tunnel
10481  *   Tunnel type
10482  * @param[in] hrxq_idx
10483  *   Hash RX queue index to set.
10484  *
10485  * @return
10486  *   0 on success, otherwise negative errno value.
10487  */
10488 static int
10489 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10490                               const uint64_t hash_fields,
10491                               const int tunnel,
10492                               uint32_t hrxq_idx)
10493 {
10494         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10495
10496         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10497         case MLX5_RSS_HASH_IPV4:
10498                 hrxqs[0] = hrxq_idx;
10499                 return 0;
10500         case MLX5_RSS_HASH_IPV4_TCP:
10501                 hrxqs[1] = hrxq_idx;
10502                 return 0;
10503         case MLX5_RSS_HASH_IPV4_UDP:
10504                 hrxqs[2] = hrxq_idx;
10505                 return 0;
10506         case MLX5_RSS_HASH_IPV6:
10507                 hrxqs[3] = hrxq_idx;
10508                 return 0;
10509         case MLX5_RSS_HASH_IPV6_TCP:
10510                 hrxqs[4] = hrxq_idx;
10511                 return 0;
10512         case MLX5_RSS_HASH_IPV6_UDP:
10513                 hrxqs[5] = hrxq_idx;
10514                 return 0;
10515         case MLX5_RSS_HASH_NONE:
10516                 hrxqs[6] = hrxq_idx;
10517                 return 0;
10518         default:
10519                 return -1;
10520         }
10521 }
10522
10523 /**
10524  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10525  * and tunnel.
10526  *
10527  * @param[in] dev
10528  *   Pointer to the Ethernet device structure.
10529  * @param[in] idx
10530  *   Shared RSS action ID holding hash RX queue objects.
10531  * @param[in] hash_fields
10532  *   Defines combination of packet fields to participate in RX hash.
10533  * @param[in] tunnel
10534  *   Tunnel type
10535  *
10536  * @return
10537  *   Valid hash RX queue index, otherwise 0.
10538  */
10539 static uint32_t
10540 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
10541                                  const uint64_t hash_fields,
10542                                  const int tunnel)
10543 {
10544         struct mlx5_priv *priv = dev->data->dev_private;
10545         struct mlx5_shared_action_rss *shared_rss =
10546             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
10547         const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
10548                                                         shared_rss->hrxq_tunnel;
10549
10550         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10551         case MLX5_RSS_HASH_IPV4:
10552                 return hrxqs[0];
10553         case MLX5_RSS_HASH_IPV4_TCP:
10554                 return hrxqs[1];
10555         case MLX5_RSS_HASH_IPV4_UDP:
10556                 return hrxqs[2];
10557         case MLX5_RSS_HASH_IPV6:
10558                 return hrxqs[3];
10559         case MLX5_RSS_HASH_IPV6_TCP:
10560                 return hrxqs[4];
10561         case MLX5_RSS_HASH_IPV6_UDP:
10562                 return hrxqs[5];
10563         case MLX5_RSS_HASH_NONE:
10564                 return hrxqs[6];
10565         default:
10566                 return 0;
10567         }
10568 }
10569
10570 /**
10571  * Retrieves hash RX queue suitable for the *flow*.
10572  * If shared action configured for *flow* suitable hash RX queue will be
10573  * retrieved from attached shared action.
10574  *
10575  * @param[in] dev
10576  *   Pointer to the Ethernet device structure.
10577  * @param[in] flow
10578  *   Shred RSS action holding hash RX queue objects.
10579  * @param[in] dev_flow
10580  *   Pointer to the sub flow.
10581  * @param[out] hrxq
10582  *   Pointer to retrieved hash RX queue object.
10583  *
10584  * @return
10585  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
10586  */
10587 static uint32_t
10588 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow,
10589                            struct mlx5_flow *dev_flow,
10590                            struct mlx5_hrxq **hrxq)
10591 {
10592         struct mlx5_priv *priv = dev->data->dev_private;
10593         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10594         uint32_t hrxq_idx;
10595
10596         if (flow->shared_rss) {
10597                 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10598                                 (dev, flow->shared_rss, dev_flow->hash_fields,
10599                                  !!(dev_flow->handle->layers &
10600                                     MLX5_FLOW_LAYER_TUNNEL));
10601                 if (hrxq_idx) {
10602                         *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10603                                                hrxq_idx);
10604                         __atomic_fetch_add(&(*hrxq)->refcnt, 1,
10605                                            __ATOMIC_RELAXED);
10606                 }
10607         } else {
10608                 struct mlx5_flow_rss_desc *rss_desc =
10609                                 &wks->rss_desc[!!wks->flow_nested_idx];
10610
10611                 *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
10612                                              &hrxq_idx);
10613         }
10614         return hrxq_idx;
10615 }
10616
10617 /**
10618  * Apply the flow to the NIC, lock free,
10619  * (mutex should be acquired by caller).
10620  *
10621  * @param[in] dev
10622  *   Pointer to the Ethernet device structure.
10623  * @param[in, out] flow
10624  *   Pointer to flow structure.
10625  * @param[out] error
10626  *   Pointer to error structure.
10627  *
10628  * @return
10629  *   0 on success, a negative errno value otherwise and rte_errno is set.
10630  */
10631 static int
10632 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10633               struct rte_flow_error *error)
10634 {
10635         struct mlx5_flow_dv_workspace *dv;
10636         struct mlx5_flow_handle *dh;
10637         struct mlx5_flow_handle_dv *dv_h;
10638         struct mlx5_flow *dev_flow;
10639         struct mlx5_priv *priv = dev->data->dev_private;
10640         uint32_t handle_idx;
10641         int n;
10642         int err;
10643         int idx;
10644         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10645
10646         MLX5_ASSERT(wks);
10647         for (idx = wks->flow_idx - 1; idx >= wks->flow_nested_idx; idx--) {
10648                 dev_flow = &wks->flows[idx];
10649                 dv = &dev_flow->dv;
10650                 dh = dev_flow->handle;
10651                 dv_h = &dh->dvh;
10652                 n = dv->actions_n;
10653                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10654                         if (dv->transfer) {
10655                                 dv->actions[n++] = priv->sh->esw_drop_action;
10656                         } else {
10657                                 MLX5_ASSERT(priv->drop_queue.hrxq);
10658                                 dv->actions[n++] =
10659                                                 priv->drop_queue.hrxq->action;
10660                         }
10661                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10662                            !dv_h->rix_sample && !dv_h->rix_dest_array) {
10663                         struct mlx5_hrxq *hrxq = NULL;
10664                         uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10665                                                 (dev, flow, dev_flow, &hrxq);
10666                         if (!hrxq) {
10667                                 rte_flow_error_set
10668                                         (error, rte_errno,
10669                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10670                                          "cannot get hash queue");
10671                                 goto error;
10672                         }
10673                         dh->rix_hrxq = hrxq_idx;
10674                         dv->actions[n++] = hrxq->action;
10675                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10676                         if (!priv->sh->default_miss_action) {
10677                                 rte_flow_error_set
10678                                         (error, rte_errno,
10679                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10680                                          "default miss action not be created.");
10681                                 goto error;
10682                         }
10683                         dv->actions[n++] = priv->sh->default_miss_action;
10684                 }
10685                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10686                                                (void *)&dv->value, n,
10687                                                dv->actions, &dh->drv_flow);
10688                 if (err) {
10689                         rte_flow_error_set(error, errno,
10690                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10691                                            NULL,
10692                                            "hardware refuses to create flow");
10693                         goto error;
10694                 }
10695                 if (priv->vmwa_context &&
10696                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
10697                         /*
10698                          * The rule contains the VLAN pattern.
10699                          * For VF we are going to create VLAN
10700                          * interface to make hypervisor set correct
10701                          * e-Switch vport context.
10702                          */
10703                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10704                 }
10705         }
10706         return 0;
10707 error:
10708         err = rte_errno; /* Save rte_errno before cleanup. */
10709         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10710                        handle_idx, dh, next) {
10711                 /* hrxq is union, don't clear it if the flag is not set. */
10712                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10713                         mlx5_hrxq_release(dev, dh->rix_hrxq);
10714                         dh->rix_hrxq = 0;
10715                 }
10716                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10717                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10718         }
10719         rte_errno = err; /* Restore rte_errno. */
10720         return -rte_errno;
10721 }
10722
10723 void
10724 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
10725                           struct mlx5_cache_entry *entry)
10726 {
10727         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
10728                                                           entry);
10729
10730         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
10731         mlx5_free(cache);
10732 }
10733
10734 /**
10735  * Release the flow matcher.
10736  *
10737  * @param dev
10738  *   Pointer to Ethernet device.
10739  * @param handle
10740  *   Pointer to mlx5_flow_handle.
10741  *
10742  * @return
10743  *   1 while a reference on it exists, 0 when freed.
10744  */
10745 static int
10746 flow_dv_matcher_release(struct rte_eth_dev *dev,
10747                         struct mlx5_flow_handle *handle)
10748 {
10749         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10750         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
10751                                                             typeof(*tbl), tbl);
10752         int ret;
10753
10754         MLX5_ASSERT(matcher->matcher_object);
10755         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
10756         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
10757         return ret;
10758 }
10759
10760 /**
10761  * Release encap_decap resource.
10762  *
10763  * @param list
10764  *   Pointer to the hash list.
10765  * @param entry
10766  *   Pointer to exist resource entry object.
10767  */
10768 void
10769 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
10770                               struct mlx5_hlist_entry *entry)
10771 {
10772         struct mlx5_dev_ctx_shared *sh = list->ctx;
10773         struct mlx5_flow_dv_encap_decap_resource *res =
10774                 container_of(entry, typeof(*res), entry);
10775
10776         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10777         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
10778 }
10779
10780 /**
10781  * Release an encap/decap resource.
10782  *
10783  * @param dev
10784  *   Pointer to Ethernet device.
10785  * @param encap_decap_idx
10786  *   Index of encap decap resource.
10787  *
10788  * @return
10789  *   1 while a reference on it exists, 0 when freed.
10790  */
10791 static int
10792 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10793                                      uint32_t encap_decap_idx)
10794 {
10795         struct mlx5_priv *priv = dev->data->dev_private;
10796         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10797
10798         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10799                                         encap_decap_idx);
10800         if (!cache_resource)
10801                 return 0;
10802         MLX5_ASSERT(cache_resource->action);
10803         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
10804                                      &cache_resource->entry);
10805 }
10806
10807 /**
10808  * Release an jump to table action resource.
10809  *
10810  * @param dev
10811  *   Pointer to Ethernet device.
10812  * @param handle
10813  *   Pointer to mlx5_flow_handle.
10814  *
10815  * @return
10816  *   1 while a reference on it exists, 0 when freed.
10817  */
10818 static int
10819 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10820                                   struct mlx5_flow_handle *handle)
10821 {
10822         struct mlx5_priv *priv = dev->data->dev_private;
10823         struct mlx5_flow_tbl_data_entry *tbl_data;
10824
10825         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10826                              handle->rix_jump);
10827         if (!tbl_data)
10828                 return 0;
10829         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
10830 }
10831
10832 void
10833 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10834                          struct mlx5_hlist_entry *entry)
10835 {
10836         struct mlx5_flow_dv_modify_hdr_resource *res =
10837                 container_of(entry, typeof(*res), entry);
10838
10839         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10840         mlx5_free(entry);
10841 }
10842
10843 /**
10844  * Release a modify-header resource.
10845  *
10846  * @param dev
10847  *   Pointer to Ethernet device.
10848  * @param handle
10849  *   Pointer to mlx5_flow_handle.
10850  *
10851  * @return
10852  *   1 while a reference on it exists, 0 when freed.
10853  */
10854 static int
10855 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10856                                     struct mlx5_flow_handle *handle)
10857 {
10858         struct mlx5_priv *priv = dev->data->dev_private;
10859         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10860
10861         MLX5_ASSERT(entry->action);
10862         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10863 }
10864
10865 void
10866 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
10867                           struct mlx5_cache_entry *entry)
10868 {
10869         struct mlx5_dev_ctx_shared *sh = list->ctx;
10870         struct mlx5_flow_dv_port_id_action_resource *cache =
10871                         container_of(entry, typeof(*cache), entry);
10872
10873         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10874         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
10875 }
10876
10877 /**
10878  * Release port ID action resource.
10879  *
10880  * @param dev
10881  *   Pointer to Ethernet device.
10882  * @param handle
10883  *   Pointer to mlx5_flow_handle.
10884  *
10885  * @return
10886  *   1 while a reference on it exists, 0 when freed.
10887  */
10888 static int
10889 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10890                                         uint32_t port_id)
10891 {
10892         struct mlx5_priv *priv = dev->data->dev_private;
10893         struct mlx5_flow_dv_port_id_action_resource *cache;
10894
10895         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
10896         if (!cache)
10897                 return 0;
10898         MLX5_ASSERT(cache->action);
10899         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
10900                                      &cache->entry);
10901 }
10902
10903 void
10904 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
10905                             struct mlx5_cache_entry *entry)
10906 {
10907         struct mlx5_dev_ctx_shared *sh = list->ctx;
10908         struct mlx5_flow_dv_push_vlan_action_resource *cache =
10909                         container_of(entry, typeof(*cache), entry);
10910
10911         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10912         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
10913 }
10914
10915 /**
10916  * Release push vlan action resource.
10917  *
10918  * @param dev
10919  *   Pointer to Ethernet device.
10920  * @param handle
10921  *   Pointer to mlx5_flow_handle.
10922  *
10923  * @return
10924  *   1 while a reference on it exists, 0 when freed.
10925  */
10926 static int
10927 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
10928                                           struct mlx5_flow_handle *handle)
10929 {
10930         struct mlx5_priv *priv = dev->data->dev_private;
10931         struct mlx5_flow_dv_push_vlan_action_resource *cache;
10932         uint32_t idx = handle->dvh.rix_push_vlan;
10933
10934         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
10935         if (!cache)
10936                 return 0;
10937         MLX5_ASSERT(cache->action);
10938         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
10939                                      &cache->entry);
10940 }
10941
10942 /**
10943  * Release the fate resource.
10944  *
10945  * @param dev
10946  *   Pointer to Ethernet device.
10947  * @param handle
10948  *   Pointer to mlx5_flow_handle.
10949  */
10950 static void
10951 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
10952                                struct mlx5_flow_handle *handle)
10953 {
10954         if (!handle->rix_fate)
10955                 return;
10956         switch (handle->fate_action) {
10957         case MLX5_FLOW_FATE_QUEUE:
10958                 mlx5_hrxq_release(dev, handle->rix_hrxq);
10959                 break;
10960         case MLX5_FLOW_FATE_JUMP:
10961                 flow_dv_jump_tbl_resource_release(dev, handle);
10962                 break;
10963         case MLX5_FLOW_FATE_PORT_ID:
10964                 flow_dv_port_id_action_resource_release(dev,
10965                                 handle->rix_port_id_action);
10966                 break;
10967         default:
10968                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
10969                 break;
10970         }
10971         handle->rix_fate = 0;
10972 }
10973
10974 void
10975 flow_dv_sample_remove_cb(struct mlx5_cache_list *list,
10976                          struct mlx5_cache_entry *entry)
10977 {
10978         struct rte_eth_dev *dev = list->ctx;
10979         struct mlx5_priv *priv = dev->data->dev_private;
10980         struct mlx5_flow_dv_sample_resource *cache_resource =
10981                         container_of(entry, typeof(*cache_resource), entry);
10982
10983         if (cache_resource->verbs_action)
10984                 claim_zero(mlx5_glue->destroy_flow_action
10985                                 (cache_resource->verbs_action));
10986         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
10987                 if (cache_resource->default_miss)
10988                         claim_zero(mlx5_glue->destroy_flow_action
10989                           (cache_resource->default_miss));
10990         }
10991         if (cache_resource->normal_path_tbl)
10992                 flow_dv_tbl_resource_release(MLX5_SH(dev),
10993                         cache_resource->normal_path_tbl);
10994         flow_dv_sample_sub_actions_release(dev,
10995                                 &cache_resource->sample_idx);
10996         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
10997                         cache_resource->idx);
10998         DRV_LOG(DEBUG, "sample resource %p: removed",
10999                 (void *)cache_resource);
11000 }
11001
11002 /**
11003  * Release an sample resource.
11004  *
11005  * @param dev
11006  *   Pointer to Ethernet device.
11007  * @param handle
11008  *   Pointer to mlx5_flow_handle.
11009  *
11010  * @return
11011  *   1 while a reference on it exists, 0 when freed.
11012  */
11013 static int
11014 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
11015                                      struct mlx5_flow_handle *handle)
11016 {
11017         struct mlx5_priv *priv = dev->data->dev_private;
11018         struct mlx5_flow_dv_sample_resource *cache_resource;
11019
11020         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11021                          handle->dvh.rix_sample);
11022         if (!cache_resource)
11023                 return 0;
11024         MLX5_ASSERT(cache_resource->verbs_action);
11025         return mlx5_cache_unregister(&priv->sh->sample_action_list,
11026                                      &cache_resource->entry);
11027 }
11028
11029 void
11030 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
11031                              struct mlx5_cache_entry *entry)
11032 {
11033         struct rte_eth_dev *dev = list->ctx;
11034         struct mlx5_priv *priv = dev->data->dev_private;
11035         struct mlx5_flow_dv_dest_array_resource *cache_resource =
11036                         container_of(entry, typeof(*cache_resource), entry);
11037         uint32_t i = 0;
11038
11039         MLX5_ASSERT(cache_resource->action);
11040         if (cache_resource->action)
11041                 claim_zero(mlx5_glue->destroy_flow_action
11042                                         (cache_resource->action));
11043         for (; i < cache_resource->num_of_dest; i++)
11044                 flow_dv_sample_sub_actions_release(dev,
11045                                 &cache_resource->sample_idx[i]);
11046         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11047                         cache_resource->idx);
11048         DRV_LOG(DEBUG, "destination array resource %p: removed",
11049                 (void *)cache_resource);
11050 }
11051
11052 /**
11053  * Release an destination array resource.
11054  *
11055  * @param dev
11056  *   Pointer to Ethernet device.
11057  * @param handle
11058  *   Pointer to mlx5_flow_handle.
11059  *
11060  * @return
11061  *   1 while a reference on it exists, 0 when freed.
11062  */
11063 static int
11064 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
11065                                     struct mlx5_flow_handle *handle)
11066 {
11067         struct mlx5_priv *priv = dev->data->dev_private;
11068         struct mlx5_flow_dv_dest_array_resource *cache;
11069
11070         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11071                                handle->dvh.rix_dest_array);
11072         if (!cache)
11073                 return 0;
11074         MLX5_ASSERT(cache->action);
11075         return mlx5_cache_unregister(&priv->sh->dest_array_list,
11076                                      &cache->entry);
11077 }
11078
11079 /**
11080  * Remove the flow from the NIC but keeps it in memory.
11081  * Lock free, (mutex should be acquired by caller).
11082  *
11083  * @param[in] dev
11084  *   Pointer to Ethernet device.
11085  * @param[in, out] flow
11086  *   Pointer to flow structure.
11087  */
11088 static void
11089 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
11090 {
11091         struct mlx5_flow_handle *dh;
11092         uint32_t handle_idx;
11093         struct mlx5_priv *priv = dev->data->dev_private;
11094
11095         if (!flow)
11096                 return;
11097         handle_idx = flow->dev_handles;
11098         while (handle_idx) {
11099                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11100                                     handle_idx);
11101                 if (!dh)
11102                         return;
11103                 if (dh->drv_flow) {
11104                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
11105                         dh->drv_flow = NULL;
11106                 }
11107                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
11108                         flow_dv_fate_resource_release(dev, dh);
11109                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
11110                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
11111                 handle_idx = dh->next.next;
11112         }
11113 }
11114
11115 /**
11116  * Remove the flow from the NIC and the memory.
11117  * Lock free, (mutex should be acquired by caller).
11118  *
11119  * @param[in] dev
11120  *   Pointer to the Ethernet device structure.
11121  * @param[in, out] flow
11122  *   Pointer to flow structure.
11123  */
11124 static void
11125 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
11126 {
11127         struct mlx5_flow_handle *dev_handle;
11128         struct mlx5_priv *priv = dev->data->dev_private;
11129
11130         if (!flow)
11131                 return;
11132         flow_dv_remove(dev, flow);
11133         if (flow->shared_rss) {
11134                 struct mlx5_shared_action_rss *shared_rss = mlx5_ipool_get
11135                                 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11136                                                               flow->shared_rss);
11137
11138                 __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
11139         }
11140         if (flow->counter) {
11141                 flow_dv_counter_free(dev, flow->counter);
11142                 flow->counter = 0;
11143         }
11144         if (flow->meter) {
11145                 struct mlx5_flow_meter *fm;
11146
11147                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
11148                                     flow->meter);
11149                 if (fm)
11150                         mlx5_flow_meter_detach(fm);
11151                 flow->meter = 0;
11152         }
11153         if (flow->age)
11154                 flow_dv_aso_age_release(dev, flow->age);
11155         while (flow->dev_handles) {
11156                 uint32_t tmp_idx = flow->dev_handles;
11157
11158                 dev_handle = mlx5_ipool_get(priv->sh->ipool
11159                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
11160                 if (!dev_handle)
11161                         return;
11162                 flow->dev_handles = dev_handle->next.next;
11163                 if (dev_handle->dvh.matcher)
11164                         flow_dv_matcher_release(dev, dev_handle);
11165                 if (dev_handle->dvh.rix_sample)
11166                         flow_dv_sample_resource_release(dev, dev_handle);
11167                 if (dev_handle->dvh.rix_dest_array)
11168                         flow_dv_dest_array_resource_release(dev, dev_handle);
11169                 if (dev_handle->dvh.rix_encap_decap)
11170                         flow_dv_encap_decap_resource_release(dev,
11171                                 dev_handle->dvh.rix_encap_decap);
11172                 if (dev_handle->dvh.modify_hdr)
11173                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
11174                 if (dev_handle->dvh.rix_push_vlan)
11175                         flow_dv_push_vlan_action_resource_release(dev,
11176                                                                   dev_handle);
11177                 if (dev_handle->dvh.rix_tag)
11178                         flow_dv_tag_release(dev,
11179                                             dev_handle->dvh.rix_tag);
11180                 flow_dv_fate_resource_release(dev, dev_handle);
11181                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11182                            tmp_idx);
11183         }
11184 }
11185
11186 /**
11187  * Release array of hash RX queue objects.
11188  * Helper function.
11189  *
11190  * @param[in] dev
11191  *   Pointer to the Ethernet device structure.
11192  * @param[in, out] hrxqs
11193  *   Array of hash RX queue objects.
11194  *
11195  * @return
11196  *   Total number of references to hash RX queue objects in *hrxqs* array
11197  *   after this operation.
11198  */
11199 static int
11200 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
11201                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
11202 {
11203         size_t i;
11204         int remaining = 0;
11205
11206         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
11207                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
11208
11209                 if (!ret)
11210                         (*hrxqs)[i] = 0;
11211                 remaining += ret;
11212         }
11213         return remaining;
11214 }
11215
11216 /**
11217  * Release all hash RX queue objects representing shared RSS action.
11218  *
11219  * @param[in] dev
11220  *   Pointer to the Ethernet device structure.
11221  * @param[in, out] action
11222  *   Shared RSS action to remove hash RX queue objects from.
11223  *
11224  * @return
11225  *   Total number of references to hash RX queue objects stored in *action*
11226  *   after this operation.
11227  *   Expected to be 0 if no external references held.
11228  */
11229 static int
11230 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
11231                                  struct mlx5_shared_action_rss *action)
11232 {
11233         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
11234                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
11235 }
11236
11237 /**
11238  * Setup shared RSS action.
11239  * Prepare set of hash RX queue objects sufficient to handle all valid
11240  * hash_fields combinations (see enum ibv_rx_hash_fields).
11241  *
11242  * @param[in] dev
11243  *   Pointer to the Ethernet device structure.
11244  * @param[in, out] action
11245  *   Partially initialized shared RSS action.
11246  * @param[out] error
11247  *   Perform verbose error reporting if not NULL. Initialized in case of
11248  *   error only.
11249  *
11250  * @return
11251  *   0 on success, otherwise negative errno value.
11252  */
11253 static int
11254 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11255                         struct mlx5_shared_action_rss *action,
11256                         struct rte_flow_error *error)
11257 {
11258         struct mlx5_flow_rss_desc rss_desc = { 0 };
11259         size_t i;
11260         int err;
11261
11262         memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
11263         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
11264         rss_desc.const_q = action->origin.queue;
11265         rss_desc.queue_num = action->origin.queue_num;
11266         rss_desc.standalone = true;
11267         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11268                 uint32_t hrxq_idx;
11269                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11270                 int tunnel;
11271
11272                 for (tunnel = 0; tunnel < 2; tunnel++) {
11273                         rss_desc.tunnel = tunnel;
11274                         rss_desc.hash_fields = hash_fields;
11275                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
11276                         if (!hrxq_idx) {
11277                                 rte_flow_error_set
11278                                         (error, rte_errno,
11279                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11280                                          "cannot get hash queue");
11281                                 goto error_hrxq_new;
11282                         }
11283                         err = __flow_dv_action_rss_hrxq_set
11284                                 (action, hash_fields, tunnel, hrxq_idx);
11285                         MLX5_ASSERT(!err);
11286                 }
11287         }
11288         return 0;
11289 error_hrxq_new:
11290         err = rte_errno;
11291         __flow_dv_action_rss_hrxqs_release(dev, action);
11292         rte_errno = err;
11293         return -rte_errno;
11294 }
11295
11296 /**
11297  * Create shared RSS action.
11298  *
11299  * @param[in] dev
11300  *   Pointer to the Ethernet device structure.
11301  * @param[in] conf
11302  *   Shared action configuration.
11303  * @param[in] rss
11304  *   RSS action specification used to create shared action.
11305  * @param[out] error
11306  *   Perform verbose error reporting if not NULL. Initialized in case of
11307  *   error only.
11308  *
11309  * @return
11310  *   A valid shared action ID in case of success, 0 otherwise and
11311  *   rte_errno is set.
11312  */
11313 static uint32_t
11314 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11315                             const struct rte_flow_shared_action_conf *conf,
11316                             const struct rte_flow_action_rss *rss,
11317                             struct rte_flow_error *error)
11318 {
11319         struct mlx5_priv *priv = dev->data->dev_private;
11320         struct mlx5_shared_action_rss *shared_action = NULL;
11321         void *queue = NULL;
11322         struct rte_flow_action_rss *origin;
11323         const uint8_t *rss_key;
11324         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11325         uint32_t idx;
11326
11327         RTE_SET_USED(conf);
11328         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11329                             0, SOCKET_ID_ANY);
11330         shared_action = mlx5_ipool_zmalloc
11331                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
11332         if (!shared_action || !queue) {
11333                 rte_flow_error_set(error, ENOMEM,
11334                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11335                                    "cannot allocate resource memory");
11336                 goto error_rss_init;
11337         }
11338         if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
11339                 rte_flow_error_set(error, E2BIG,
11340                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11341                                    "rss action number out of range");
11342                 goto error_rss_init;
11343         }
11344         shared_action->queue = queue;
11345         origin = &shared_action->origin;
11346         origin->func = rss->func;
11347         origin->level = rss->level;
11348         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11349         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11350         /* NULL RSS key indicates default RSS key. */
11351         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11352         memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11353         origin->key = &shared_action->key[0];
11354         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11355         memcpy(shared_action->queue, rss->queue, queue_size);
11356         origin->queue = shared_action->queue;
11357         origin->queue_num = rss->queue_num;
11358         if (__flow_dv_action_rss_setup(dev, shared_action, error))
11359                 goto error_rss_init;
11360         __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
11361         rte_spinlock_lock(&priv->shared_act_sl);
11362         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11363                      &priv->rss_shared_actions, idx, shared_action, next);
11364         rte_spinlock_unlock(&priv->shared_act_sl);
11365         return idx;
11366 error_rss_init:
11367         if (shared_action)
11368                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11369                                 idx);
11370         if (queue)
11371                 mlx5_free(queue);
11372         return 0;
11373 }
11374
11375 /**
11376  * Destroy the shared RSS action.
11377  * Release related hash RX queue objects.
11378  *
11379  * @param[in] dev
11380  *   Pointer to the Ethernet device structure.
11381  * @param[in] idx
11382  *   The shared RSS action object ID to be removed.
11383  * @param[out] error
11384  *   Perform verbose error reporting if not NULL. Initialized in case of
11385  *   error only.
11386  *
11387  * @return
11388  *   0 on success, otherwise negative errno value.
11389  */
11390 static int
11391 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
11392                              struct rte_flow_error *error)
11393 {
11394         struct mlx5_priv *priv = dev->data->dev_private;
11395         struct mlx5_shared_action_rss *shared_rss =
11396             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11397         uint32_t old_refcnt = 1;
11398         int remaining;
11399
11400         if (!shared_rss)
11401                 return rte_flow_error_set(error, EINVAL,
11402                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11403                                           "invalid shared action");
11404         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11405         if (remaining)
11406                 return rte_flow_error_set(error, ETOOMANYREFS,
11407                                           RTE_FLOW_ERROR_TYPE_ACTION,
11408                                           NULL,
11409                                           "shared rss hrxq has references");
11410         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
11411                                          0, 0, __ATOMIC_ACQUIRE,
11412                                          __ATOMIC_RELAXED))
11413                 return rte_flow_error_set(error, ETOOMANYREFS,
11414                                           RTE_FLOW_ERROR_TYPE_ACTION,
11415                                           NULL,
11416                                           "shared rss has references");
11417         rte_free(shared_rss->queue);
11418         rte_spinlock_lock(&priv->shared_act_sl);
11419         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11420                      &priv->rss_shared_actions, idx, shared_rss, next);
11421         rte_spinlock_unlock(&priv->shared_act_sl);
11422         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11423                         idx);
11424         return 0;
11425 }
11426
11427 /**
11428  * Create shared action, lock free,
11429  * (mutex should be acquired by caller).
11430  * Dispatcher for action type specific call.
11431  *
11432  * @param[in] dev
11433  *   Pointer to the Ethernet device structure.
11434  * @param[in] conf
11435  *   Shared action configuration.
11436  * @param[in] action
11437  *   Action specification used to create shared action.
11438  * @param[out] error
11439  *   Perform verbose error reporting if not NULL. Initialized in case of
11440  *   error only.
11441  *
11442  * @return
11443  *   A valid shared action handle in case of success, NULL otherwise and
11444  *   rte_errno is set.
11445  */
11446 static struct rte_flow_shared_action *
11447 flow_dv_action_create(struct rte_eth_dev *dev,
11448                       const struct rte_flow_shared_action_conf *conf,
11449                       const struct rte_flow_action *action,
11450                       struct rte_flow_error *err)
11451 {
11452         uint32_t idx = 0;
11453         uint32_t ret = 0;
11454
11455         switch (action->type) {
11456         case RTE_FLOW_ACTION_TYPE_RSS:
11457                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
11458                 idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
11459                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
11460                 break;
11461         case RTE_FLOW_ACTION_TYPE_AGE:
11462                 ret = flow_dv_translate_create_aso_age(dev, action->conf);
11463                 idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
11464                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
11465                 if (ret) {
11466                         struct mlx5_aso_age_action *aso_age =
11467                                               flow_aso_age_get_by_idx(dev, ret);
11468
11469                         if (!aso_age->age_params.context)
11470                                 aso_age->age_params.context =
11471                                                          (void *)(uintptr_t)idx;
11472                 }
11473                 break;
11474         default:
11475                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11476                                    NULL, "action type not supported");
11477                 break;
11478         }
11479         return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
11480 }
11481
11482 /**
11483  * Destroy the shared action.
11484  * Release action related resources on the NIC and the memory.
11485  * Lock free, (mutex should be acquired by caller).
11486  * Dispatcher for action type specific call.
11487  *
11488  * @param[in] dev
11489  *   Pointer to the Ethernet device structure.
11490  * @param[in] action
11491  *   The shared action object to be removed.
11492  * @param[out] error
11493  *   Perform verbose error reporting if not NULL. Initialized in case of
11494  *   error only.
11495  *
11496  * @return
11497  *   0 on success, otherwise negative errno value.
11498  */
11499 static int
11500 flow_dv_action_destroy(struct rte_eth_dev *dev,
11501                        struct rte_flow_shared_action *action,
11502                        struct rte_flow_error *error)
11503 {
11504         uint32_t act_idx = (uint32_t)(uintptr_t)action;
11505         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11506         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11507         int ret;
11508
11509         switch (type) {
11510         case MLX5_SHARED_ACTION_TYPE_RSS:
11511                 return __flow_dv_action_rss_release(dev, idx, error);
11512         case MLX5_SHARED_ACTION_TYPE_AGE:
11513                 ret = flow_dv_aso_age_release(dev, idx);
11514                 if (ret)
11515                         /*
11516                          * In this case, the last flow has a reference will
11517                          * actually release the age action.
11518                          */
11519                         DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
11520                                 " released with references %d.", idx, ret);
11521                 return 0;
11522         default:
11523                 return rte_flow_error_set(error, ENOTSUP,
11524                                           RTE_FLOW_ERROR_TYPE_ACTION,
11525                                           NULL,
11526                                           "action type not supported");
11527         }
11528 }
11529
11530 /**
11531  * Updates in place shared RSS action configuration.
11532  *
11533  * @param[in] dev
11534  *   Pointer to the Ethernet device structure.
11535  * @param[in] idx
11536  *   The shared RSS action object ID to be updated.
11537  * @param[in] action_conf
11538  *   RSS action specification used to modify *shared_rss*.
11539  * @param[out] error
11540  *   Perform verbose error reporting if not NULL. Initialized in case of
11541  *   error only.
11542  *
11543  * @return
11544  *   0 on success, otherwise negative errno value.
11545  * @note: currently only support update of RSS queues.
11546  */
11547 static int
11548 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
11549                             const struct rte_flow_action_rss *action_conf,
11550                             struct rte_flow_error *error)
11551 {
11552         struct mlx5_priv *priv = dev->data->dev_private;
11553         struct mlx5_shared_action_rss *shared_rss =
11554             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11555         size_t i;
11556         int ret;
11557         void *queue = NULL;
11558         const uint8_t *rss_key;
11559         uint32_t rss_key_len;
11560         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11561
11562         if (!shared_rss)
11563                 return rte_flow_error_set(error, EINVAL,
11564                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11565                                           "invalid shared action to update");
11566         queue = mlx5_malloc(MLX5_MEM_ZERO,
11567                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11568                             0, SOCKET_ID_ANY);
11569         if (!queue)
11570                 return rte_flow_error_set(error, ENOMEM,
11571                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11572                                           NULL,
11573                                           "cannot allocate resource memory");
11574         if (action_conf->key) {
11575                 rss_key = action_conf->key;
11576                 rss_key_len = action_conf->key_len;
11577         } else {
11578                 rss_key = rss_hash_default_key;
11579                 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
11580         }
11581         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11582                 uint32_t hrxq_idx;
11583                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11584                 int tunnel;
11585
11586                 for (tunnel = 0; tunnel < 2; tunnel++) {
11587                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup
11588                                         (dev, idx, hash_fields, tunnel);
11589                         MLX5_ASSERT(hrxq_idx);
11590                         ret = mlx5_hrxq_modify
11591                                 (dev, hrxq_idx,
11592                                  rss_key, rss_key_len,
11593                                  hash_fields,
11594                                  action_conf->queue, action_conf->queue_num);
11595                         if (ret) {
11596                                 mlx5_free(queue);
11597                                 return rte_flow_error_set
11598                                         (error, rte_errno,
11599                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11600                                          "cannot update hash queue");
11601                         }
11602                 }
11603         }
11604         mlx5_free(shared_rss->queue);
11605         shared_rss->queue = queue;
11606         memcpy(shared_rss->queue, action_conf->queue, queue_size);
11607         shared_rss->origin.queue = shared_rss->queue;
11608         shared_rss->origin.queue_num = action_conf->queue_num;
11609         return 0;
11610 }
11611
11612 /**
11613  * Updates in place shared action configuration, lock free,
11614  * (mutex should be acquired by caller).
11615  *
11616  * @param[in] dev
11617  *   Pointer to the Ethernet device structure.
11618  * @param[in] action
11619  *   The shared action object to be updated.
11620  * @param[in] action_conf
11621  *   Action specification used to modify *action*.
11622  *   *action_conf* should be of type correlating with type of the *action*,
11623  *   otherwise considered as invalid.
11624  * @param[out] error
11625  *   Perform verbose error reporting if not NULL. Initialized in case of
11626  *   error only.
11627  *
11628  * @return
11629  *   0 on success, otherwise negative errno value.
11630  */
11631 static int
11632 flow_dv_action_update(struct rte_eth_dev *dev,
11633                         struct rte_flow_shared_action *action,
11634                         const void *action_conf,
11635                         struct rte_flow_error *err)
11636 {
11637         uint32_t act_idx = (uint32_t)(uintptr_t)action;
11638         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11639         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11640
11641         switch (type) {
11642         case MLX5_SHARED_ACTION_TYPE_RSS:
11643                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
11644         default:
11645                 return rte_flow_error_set(err, ENOTSUP,
11646                                           RTE_FLOW_ERROR_TYPE_ACTION,
11647                                           NULL,
11648                                           "action type update not supported");
11649         }
11650 }
11651
11652 static int
11653 flow_dv_action_query(struct rte_eth_dev *dev,
11654                      const struct rte_flow_shared_action *action, void *data,
11655                      struct rte_flow_error *error)
11656 {
11657         struct mlx5_age_param *age_param;
11658         struct rte_flow_query_age *resp;
11659         uint32_t act_idx = (uint32_t)(uintptr_t)action;
11660         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11661         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11662
11663         switch (type) {
11664         case MLX5_SHARED_ACTION_TYPE_AGE:
11665                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
11666                 resp = data;
11667                 resp->aged = __atomic_load_n(&age_param->state,
11668                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
11669                                                                           1 : 0;
11670                 resp->sec_since_last_hit_valid = !resp->aged;
11671                 if (resp->sec_since_last_hit_valid)
11672                         resp->sec_since_last_hit = __atomic_load_n
11673                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
11674                 return 0;
11675         default:
11676                 return rte_flow_error_set(error, ENOTSUP,
11677                                           RTE_FLOW_ERROR_TYPE_ACTION,
11678                                           NULL,
11679                                           "action type query not supported");
11680         }
11681 }
11682
11683 /**
11684  * Query a dv flow  rule for its statistics via devx.
11685  *
11686  * @param[in] dev
11687  *   Pointer to Ethernet device.
11688  * @param[in] flow
11689  *   Pointer to the sub flow.
11690  * @param[out] data
11691  *   data retrieved by the query.
11692  * @param[out] error
11693  *   Perform verbose error reporting if not NULL.
11694  *
11695  * @return
11696  *   0 on success, a negative errno value otherwise and rte_errno is set.
11697  */
11698 static int
11699 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11700                     void *data, struct rte_flow_error *error)
11701 {
11702         struct mlx5_priv *priv = dev->data->dev_private;
11703         struct rte_flow_query_count *qc = data;
11704
11705         if (!priv->config.devx)
11706                 return rte_flow_error_set(error, ENOTSUP,
11707                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11708                                           NULL,
11709                                           "counters are not supported");
11710         if (flow->counter) {
11711                 uint64_t pkts, bytes;
11712                 struct mlx5_flow_counter *cnt;
11713
11714                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11715                                                  NULL);
11716                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11717                                                &bytes);
11718
11719                 if (err)
11720                         return rte_flow_error_set(error, -err,
11721                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11722                                         NULL, "cannot read counters");
11723                 qc->hits_set = 1;
11724                 qc->bytes_set = 1;
11725                 qc->hits = pkts - cnt->hits;
11726                 qc->bytes = bytes - cnt->bytes;
11727                 if (qc->reset) {
11728                         cnt->hits = pkts;
11729                         cnt->bytes = bytes;
11730                 }
11731                 return 0;
11732         }
11733         return rte_flow_error_set(error, EINVAL,
11734                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11735                                   NULL,
11736                                   "counters are not available");
11737 }
11738
11739 /**
11740  * Query a flow rule AGE action for aging information.
11741  *
11742  * @param[in] dev
11743  *   Pointer to Ethernet device.
11744  * @param[in] flow
11745  *   Pointer to the sub flow.
11746  * @param[out] data
11747  *   data retrieved by the query.
11748  * @param[out] error
11749  *   Perform verbose error reporting if not NULL.
11750  *
11751  * @return
11752  *   0 on success, a negative errno value otherwise and rte_errno is set.
11753  */
11754 static int
11755 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11756                   void *data, struct rte_flow_error *error)
11757 {
11758         struct rte_flow_query_age *resp = data;
11759         struct mlx5_age_param *age_param;
11760
11761         if (flow->age) {
11762                 struct mlx5_aso_age_action *act =
11763                                      flow_aso_age_get_by_idx(dev, flow->age);
11764
11765                 age_param = &act->age_params;
11766         } else if (flow->counter) {
11767                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
11768
11769                 if (!age_param || !age_param->timeout)
11770                         return rte_flow_error_set
11771                                         (error, EINVAL,
11772                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11773                                          NULL, "cannot read age data");
11774         } else {
11775                 return rte_flow_error_set(error, EINVAL,
11776                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11777                                           NULL, "age data not available");
11778         }
11779         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
11780                                      AGE_TMOUT ? 1 : 0;
11781         resp->sec_since_last_hit_valid = !resp->aged;
11782         if (resp->sec_since_last_hit_valid)
11783                 resp->sec_since_last_hit = __atomic_load_n
11784                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
11785         return 0;
11786 }
11787
11788 /**
11789  * Query a flow.
11790  *
11791  * @see rte_flow_query()
11792  * @see rte_flow_ops
11793  */
11794 static int
11795 flow_dv_query(struct rte_eth_dev *dev,
11796               struct rte_flow *flow __rte_unused,
11797               const struct rte_flow_action *actions __rte_unused,
11798               void *data __rte_unused,
11799               struct rte_flow_error *error __rte_unused)
11800 {
11801         int ret = -EINVAL;
11802
11803         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11804                 switch (actions->type) {
11805                 case RTE_FLOW_ACTION_TYPE_VOID:
11806                         break;
11807                 case RTE_FLOW_ACTION_TYPE_COUNT:
11808                         ret = flow_dv_query_count(dev, flow, data, error);
11809                         break;
11810                 case RTE_FLOW_ACTION_TYPE_AGE:
11811                         ret = flow_dv_query_age(dev, flow, data, error);
11812                         break;
11813                 default:
11814                         return rte_flow_error_set(error, ENOTSUP,
11815                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11816                                                   actions,
11817                                                   "action not supported");
11818                 }
11819         }
11820         return ret;
11821 }
11822
11823 /**
11824  * Destroy the meter table set.
11825  * Lock free, (mutex should be acquired by caller).
11826  *
11827  * @param[in] dev
11828  *   Pointer to Ethernet device.
11829  * @param[in] tbl
11830  *   Pointer to the meter table set.
11831  *
11832  * @return
11833  *   Always 0.
11834  */
11835 static int
11836 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11837                         struct mlx5_meter_domains_infos *tbl)
11838 {
11839         struct mlx5_priv *priv = dev->data->dev_private;
11840         struct mlx5_meter_domains_infos *mtd =
11841                                 (struct mlx5_meter_domains_infos *)tbl;
11842
11843         if (!mtd || !priv->config.dv_flow_en)
11844                 return 0;
11845         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11846                 claim_zero(mlx5_flow_os_destroy_flow
11847                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11848         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11849                 claim_zero(mlx5_flow_os_destroy_flow
11850                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11851         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11852                 claim_zero(mlx5_flow_os_destroy_flow
11853                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11854         if (mtd->egress.color_matcher)
11855                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11856                            (mtd->egress.color_matcher));
11857         if (mtd->egress.any_matcher)
11858                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11859                            (mtd->egress.any_matcher));
11860         if (mtd->egress.tbl)
11861                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
11862         if (mtd->egress.sfx_tbl)
11863                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
11864         if (mtd->ingress.color_matcher)
11865                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11866                            (mtd->ingress.color_matcher));
11867         if (mtd->ingress.any_matcher)
11868                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11869                            (mtd->ingress.any_matcher));
11870         if (mtd->ingress.tbl)
11871                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
11872         if (mtd->ingress.sfx_tbl)
11873                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11874                                              mtd->ingress.sfx_tbl);
11875         if (mtd->transfer.color_matcher)
11876                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11877                            (mtd->transfer.color_matcher));
11878         if (mtd->transfer.any_matcher)
11879                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11880                            (mtd->transfer.any_matcher));
11881         if (mtd->transfer.tbl)
11882                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
11883         if (mtd->transfer.sfx_tbl)
11884                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11885                                              mtd->transfer.sfx_tbl);
11886         if (mtd->drop_actn)
11887                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
11888         mlx5_free(mtd);
11889         return 0;
11890 }
11891
11892 /* Number of meter flow actions, count and jump or count and drop. */
11893 #define METER_ACTIONS 2
11894
11895 /**
11896  * Create specify domain meter table and suffix table.
11897  *
11898  * @param[in] dev
11899  *   Pointer to Ethernet device.
11900  * @param[in,out] mtb
11901  *   Pointer to DV meter table set.
11902  * @param[in] egress
11903  *   Table attribute.
11904  * @param[in] transfer
11905  *   Table attribute.
11906  * @param[in] color_reg_c_idx
11907  *   Reg C index for color match.
11908  *
11909  * @return
11910  *   0 on success, -1 otherwise and rte_errno is set.
11911  */
11912 static int
11913 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
11914                            struct mlx5_meter_domains_infos *mtb,
11915                            uint8_t egress, uint8_t transfer,
11916                            uint32_t color_reg_c_idx)
11917 {
11918         struct mlx5_priv *priv = dev->data->dev_private;
11919         struct mlx5_dev_ctx_shared *sh = priv->sh;
11920         struct mlx5_flow_dv_match_params mask = {
11921                 .size = sizeof(mask.buf),
11922         };
11923         struct mlx5_flow_dv_match_params value = {
11924                 .size = sizeof(value.buf),
11925         };
11926         struct mlx5dv_flow_matcher_attr dv_attr = {
11927                 .type = IBV_FLOW_ATTR_NORMAL,
11928                 .priority = 0,
11929                 .match_criteria_enable = 0,
11930                 .match_mask = (void *)&mask,
11931         };
11932         void *actions[METER_ACTIONS];
11933         struct mlx5_meter_domain_info *dtb;
11934         struct rte_flow_error error;
11935         int i = 0;
11936         int ret;
11937
11938         if (transfer)
11939                 dtb = &mtb->transfer;
11940         else if (egress)
11941                 dtb = &mtb->egress;
11942         else
11943                 dtb = &mtb->ingress;
11944         /* Create the meter table with METER level. */
11945         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
11946                                             egress, transfer, false, NULL, 0,
11947                                             0, &error);
11948         if (!dtb->tbl) {
11949                 DRV_LOG(ERR, "Failed to create meter policer table.");
11950                 return -1;
11951         }
11952         /* Create the meter suffix table with SUFFIX level. */
11953         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
11954                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
11955                                             egress, transfer, false, NULL, 0,
11956                                             0, &error);
11957         if (!dtb->sfx_tbl) {
11958                 DRV_LOG(ERR, "Failed to create meter suffix table.");
11959                 return -1;
11960         }
11961         /* Create matchers, Any and Color. */
11962         dv_attr.priority = 3;
11963         dv_attr.match_criteria_enable = 0;
11964         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11965                                                &dtb->any_matcher);
11966         if (ret) {
11967                 DRV_LOG(ERR, "Failed to create meter"
11968                              " policer default matcher.");
11969                 goto error_exit;
11970         }
11971         dv_attr.priority = 0;
11972         dv_attr.match_criteria_enable =
11973                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
11974         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
11975                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
11976         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
11977                                                &dtb->color_matcher);
11978         if (ret) {
11979                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
11980                 goto error_exit;
11981         }
11982         if (mtb->count_actns[RTE_MTR_DROPPED])
11983                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
11984         actions[i++] = mtb->drop_actn;
11985         /* Default rule: lowest priority, match any, actions: drop. */
11986         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
11987                                        actions,
11988                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
11989         if (ret) {
11990                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
11991                 goto error_exit;
11992         }
11993         return 0;
11994 error_exit:
11995         return -1;
11996 }
11997
11998 /**
11999  * Create the needed meter and suffix tables.
12000  * Lock free, (mutex should be acquired by caller).
12001  *
12002  * @param[in] dev
12003  *   Pointer to Ethernet device.
12004  * @param[in] fm
12005  *   Pointer to the flow meter.
12006  *
12007  * @return
12008  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
12009  */
12010 static struct mlx5_meter_domains_infos *
12011 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
12012                        const struct mlx5_flow_meter *fm)
12013 {
12014         struct mlx5_priv *priv = dev->data->dev_private;
12015         struct mlx5_meter_domains_infos *mtb;
12016         int ret;
12017         int i;
12018
12019         if (!priv->mtr_en) {
12020                 rte_errno = ENOTSUP;
12021                 return NULL;
12022         }
12023         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
12024         if (!mtb) {
12025                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
12026                 return NULL;
12027         }
12028         /* Create meter count actions */
12029         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
12030                 struct mlx5_flow_counter *cnt;
12031                 if (!fm->policer_stats.cnt[i])
12032                         continue;
12033                 cnt = flow_dv_counter_get_by_idx(dev,
12034                       fm->policer_stats.cnt[i], NULL);
12035                 mtb->count_actns[i] = cnt->action;
12036         }
12037         /* Create drop action. */
12038         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
12039         if (ret) {
12040                 DRV_LOG(ERR, "Failed to create drop action.");
12041                 goto error_exit;
12042         }
12043         /* Egress meter table. */
12044         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
12045         if (ret) {
12046                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
12047                 goto error_exit;
12048         }
12049         /* Ingress meter table. */
12050         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
12051         if (ret) {
12052                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
12053                 goto error_exit;
12054         }
12055         /* FDB meter table. */
12056         if (priv->config.dv_esw_en) {
12057                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
12058                                                  priv->mtr_color_reg);
12059                 if (ret) {
12060                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
12061                         goto error_exit;
12062                 }
12063         }
12064         return mtb;
12065 error_exit:
12066         flow_dv_destroy_mtr_tbl(dev, mtb);
12067         return NULL;
12068 }
12069
12070 /**
12071  * Destroy domain policer rule.
12072  *
12073  * @param[in] dt
12074  *   Pointer to domain table.
12075  */
12076 static void
12077 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
12078 {
12079         int i;
12080
12081         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12082                 if (dt->policer_rules[i]) {
12083                         claim_zero(mlx5_flow_os_destroy_flow
12084                                    (dt->policer_rules[i]));
12085                         dt->policer_rules[i] = NULL;
12086                 }
12087         }
12088         if (dt->jump_actn) {
12089                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
12090                 dt->jump_actn = NULL;
12091         }
12092 }
12093
12094 /**
12095  * Destroy policer rules.
12096  *
12097  * @param[in] dev
12098  *   Pointer to Ethernet device.
12099  * @param[in] fm
12100  *   Pointer to flow meter structure.
12101  * @param[in] attr
12102  *   Pointer to flow attributes.
12103  *
12104  * @return
12105  *   Always 0.
12106  */
12107 static int
12108 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
12109                               const struct mlx5_flow_meter *fm,
12110                               const struct rte_flow_attr *attr)
12111 {
12112         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
12113
12114         if (!mtb)
12115                 return 0;
12116         if (attr->egress)
12117                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
12118         if (attr->ingress)
12119                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
12120         if (attr->transfer)
12121                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
12122         return 0;
12123 }
12124
12125 /**
12126  * Create specify domain meter policer rule.
12127  *
12128  * @param[in] fm
12129  *   Pointer to flow meter structure.
12130  * @param[in] mtb
12131  *   Pointer to DV meter table set.
12132  * @param[in] mtr_reg_c
12133  *   Color match REG_C.
12134  *
12135  * @return
12136  *   0 on success, -1 otherwise.
12137  */
12138 static int
12139 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
12140                                     struct mlx5_meter_domain_info *dtb,
12141                                     uint8_t mtr_reg_c)
12142 {
12143         struct mlx5_flow_dv_match_params matcher = {
12144                 .size = sizeof(matcher.buf),
12145         };
12146         struct mlx5_flow_dv_match_params value = {
12147                 .size = sizeof(value.buf),
12148         };
12149         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12150         void *actions[METER_ACTIONS];
12151         int i;
12152         int ret = 0;
12153
12154         /* Create jump action. */
12155         if (!dtb->jump_actn)
12156                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
12157                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
12158         if (ret) {
12159                 DRV_LOG(ERR, "Failed to create policer jump action.");
12160                 goto error;
12161         }
12162         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12163                 int j = 0;
12164
12165                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
12166                                        rte_col_2_mlx5_col(i), UINT8_MAX);
12167                 if (mtb->count_actns[i])
12168                         actions[j++] = mtb->count_actns[i];
12169                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
12170                         actions[j++] = mtb->drop_actn;
12171                 else
12172                         actions[j++] = dtb->jump_actn;
12173                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
12174                                                (void *)&value, j, actions,
12175                                                &dtb->policer_rules[i]);
12176                 if (ret) {
12177                         DRV_LOG(ERR, "Failed to create policer rule.");
12178                         goto error;
12179                 }
12180         }
12181         return 0;
12182 error:
12183         rte_errno = errno;
12184         return -1;
12185 }
12186
12187 /**
12188  * Create policer rules.
12189  *
12190  * @param[in] dev
12191  *   Pointer to Ethernet device.
12192  * @param[in] fm
12193  *   Pointer to flow meter structure.
12194  * @param[in] attr
12195  *   Pointer to flow attributes.
12196  *
12197  * @return
12198  *   0 on success, -1 otherwise.
12199  */
12200 static int
12201 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
12202                              struct mlx5_flow_meter *fm,
12203                              const struct rte_flow_attr *attr)
12204 {
12205         struct mlx5_priv *priv = dev->data->dev_private;
12206         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12207         int ret;
12208
12209         if (attr->egress) {
12210                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
12211                                                 priv->mtr_color_reg);
12212                 if (ret) {
12213                         DRV_LOG(ERR, "Failed to create egress policer.");
12214                         goto error;
12215                 }
12216         }
12217         if (attr->ingress) {
12218                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
12219                                                 priv->mtr_color_reg);
12220                 if (ret) {
12221                         DRV_LOG(ERR, "Failed to create ingress policer.");
12222                         goto error;
12223                 }
12224         }
12225         if (attr->transfer) {
12226                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
12227                                                 priv->mtr_color_reg);
12228                 if (ret) {
12229                         DRV_LOG(ERR, "Failed to create transfer policer.");
12230                         goto error;
12231                 }
12232         }
12233         return 0;
12234 error:
12235         flow_dv_destroy_policer_rules(dev, fm, attr);
12236         return -1;
12237 }
12238
12239 /**
12240  * Validate the batch counter support in root table.
12241  *
12242  * Create a simple flow with invalid counter and drop action on root table to
12243  * validate if batch counter with offset on root table is supported or not.
12244  *
12245  * @param[in] dev
12246  *   Pointer to rte_eth_dev structure.
12247  *
12248  * @return
12249  *   0 on success, a negative errno value otherwise and rte_errno is set.
12250  */
12251 int
12252 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
12253 {
12254         struct mlx5_priv *priv = dev->data->dev_private;
12255         struct mlx5_dev_ctx_shared *sh = priv->sh;
12256         struct mlx5_flow_dv_match_params mask = {
12257                 .size = sizeof(mask.buf),
12258         };
12259         struct mlx5_flow_dv_match_params value = {
12260                 .size = sizeof(value.buf),
12261         };
12262         struct mlx5dv_flow_matcher_attr dv_attr = {
12263                 .type = IBV_FLOW_ATTR_NORMAL,
12264                 .priority = 0,
12265                 .match_criteria_enable = 0,
12266                 .match_mask = (void *)&mask,
12267         };
12268         void *actions[2] = { 0 };
12269         struct mlx5_flow_tbl_resource *tbl = NULL, *dest_tbl = NULL;
12270         struct mlx5_devx_obj *dcs = NULL;
12271         void *matcher = NULL;
12272         void *flow = NULL;
12273         int i, ret = -1;
12274
12275         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
12276         if (!tbl)
12277                 goto err;
12278         dest_tbl = flow_dv_tbl_resource_get(dev, 1, 0, 0, false,
12279                                             NULL, 0, 0, NULL);
12280         if (!dest_tbl)
12281                 goto err;
12282         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
12283         if (!dcs)
12284                 goto err;
12285         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
12286                                                     &actions[0]);
12287         if (ret)
12288                 goto err;
12289         ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
12290                                 (dest_tbl->obj, &actions[1]);
12291         if (ret)
12292                 goto err;
12293         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
12294         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
12295                                                &matcher);
12296         if (ret)
12297                 goto err;
12298         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
12299                                        actions, &flow);
12300 err:
12301         /*
12302          * If batch counter with offset is not supported, the driver will not
12303          * validate the invalid offset value, flow create should success.
12304          * In this case, it means batch counter is not supported in root table.
12305          *
12306          * Otherwise, if flow create is failed, counter offset is supported.
12307          */
12308         if (flow) {
12309                 DRV_LOG(INFO, "Batch counter is not supported in root "
12310                               "table. Switch to fallback mode.");
12311                 rte_errno = ENOTSUP;
12312                 ret = -rte_errno;
12313                 claim_zero(mlx5_flow_os_destroy_flow(flow));
12314         } else {
12315                 /* Check matcher to make sure validate fail at flow create. */
12316                 if (!matcher || (matcher && errno != EINVAL))
12317                         DRV_LOG(ERR, "Unexpected error in counter offset "
12318                                      "support detection");
12319                 ret = 0;
12320         }
12321         for (i = 0; i < 2; i++) {
12322                 if (actions[i])
12323                         claim_zero(mlx5_flow_os_destroy_flow_action
12324                                    (actions[i]));
12325         }
12326         if (matcher)
12327                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
12328         if (tbl)
12329                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12330         if (dest_tbl)
12331                 flow_dv_tbl_resource_release(MLX5_SH(dev), dest_tbl);
12332         if (dcs)
12333                 claim_zero(mlx5_devx_cmd_destroy(dcs));
12334         return ret;
12335 }
12336
12337 /**
12338  * Query a devx counter.
12339  *
12340  * @param[in] dev
12341  *   Pointer to the Ethernet device structure.
12342  * @param[in] cnt
12343  *   Index to the flow counter.
12344  * @param[in] clear
12345  *   Set to clear the counter statistics.
12346  * @param[out] pkts
12347  *   The statistics value of packets.
12348  * @param[out] bytes
12349  *   The statistics value of bytes.
12350  *
12351  * @return
12352  *   0 on success, otherwise return -1.
12353  */
12354 static int
12355 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12356                       uint64_t *pkts, uint64_t *bytes)
12357 {
12358         struct mlx5_priv *priv = dev->data->dev_private;
12359         struct mlx5_flow_counter *cnt;
12360         uint64_t inn_pkts, inn_bytes;
12361         int ret;
12362
12363         if (!priv->config.devx)
12364                 return -1;
12365
12366         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
12367         if (ret)
12368                 return -1;
12369         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
12370         *pkts = inn_pkts - cnt->hits;
12371         *bytes = inn_bytes - cnt->bytes;
12372         if (clear) {
12373                 cnt->hits = inn_pkts;
12374                 cnt->bytes = inn_bytes;
12375         }
12376         return 0;
12377 }
12378
12379 /**
12380  * Get aged-out flows.
12381  *
12382  * @param[in] dev
12383  *   Pointer to the Ethernet device structure.
12384  * @param[in] context
12385  *   The address of an array of pointers to the aged-out flows contexts.
12386  * @param[in] nb_contexts
12387  *   The length of context array pointers.
12388  * @param[out] error
12389  *   Perform verbose error reporting if not NULL. Initialized in case of
12390  *   error only.
12391  *
12392  * @return
12393  *   how many contexts get in success, otherwise negative errno value.
12394  *   if nb_contexts is 0, return the amount of all aged contexts.
12395  *   if nb_contexts is not 0 , return the amount of aged flows reported
12396  *   in the context array.
12397  * @note: only stub for now
12398  */
12399 static int
12400 flow_get_aged_flows(struct rte_eth_dev *dev,
12401                     void **context,
12402                     uint32_t nb_contexts,
12403                     struct rte_flow_error *error)
12404 {
12405         struct mlx5_priv *priv = dev->data->dev_private;
12406         struct mlx5_age_info *age_info;
12407         struct mlx5_age_param *age_param;
12408         struct mlx5_flow_counter *counter;
12409         struct mlx5_aso_age_action *act;
12410         int nb_flows = 0;
12411
12412         if (nb_contexts && !context)
12413                 return rte_flow_error_set(error, EINVAL,
12414                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12415                                           NULL, "empty context");
12416         age_info = GET_PORT_AGE_INFO(priv);
12417         rte_spinlock_lock(&age_info->aged_sl);
12418         LIST_FOREACH(act, &age_info->aged_aso, next) {
12419                 nb_flows++;
12420                 if (nb_contexts) {
12421                         context[nb_flows - 1] =
12422                                                 act->age_params.context;
12423                         if (!(--nb_contexts))
12424                                 break;
12425                 }
12426         }
12427         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12428                 nb_flows++;
12429                 if (nb_contexts) {
12430                         age_param = MLX5_CNT_TO_AGE(counter);
12431                         context[nb_flows - 1] = age_param->context;
12432                         if (!(--nb_contexts))
12433                                 break;
12434                 }
12435         }
12436         rte_spinlock_unlock(&age_info->aged_sl);
12437         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12438         return nb_flows;
12439 }
12440
12441 /*
12442  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12443  */
12444 static uint32_t
12445 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12446 {
12447         return flow_dv_counter_alloc(dev, 0);
12448 }
12449
12450 /**
12451  * Validate shared action.
12452  * Dispatcher for action type specific validation.
12453  *
12454  * @param[in] dev
12455  *   Pointer to the Ethernet device structure.
12456  * @param[in] conf
12457  *   Shared action configuration.
12458  * @param[in] action
12459  *   The shared action object to validate.
12460  * @param[out] error
12461  *   Perform verbose error reporting if not NULL. Initialized in case of
12462  *   error only.
12463  *
12464  * @return
12465  *   0 on success, otherwise negative errno value.
12466  */
12467 static int
12468 flow_dv_action_validate(struct rte_eth_dev *dev,
12469                         const struct rte_flow_shared_action_conf *conf,
12470                         const struct rte_flow_action *action,
12471                         struct rte_flow_error *err)
12472 {
12473         struct mlx5_priv *priv = dev->data->dev_private;
12474
12475         RTE_SET_USED(conf);
12476         switch (action->type) {
12477         case RTE_FLOW_ACTION_TYPE_RSS:
12478                 return mlx5_validate_action_rss(dev, action, err);
12479         case RTE_FLOW_ACTION_TYPE_AGE:
12480                 if (!priv->sh->aso_age_mng)
12481                         return rte_flow_error_set(err, ENOTSUP,
12482                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12483                                                 NULL,
12484                                              "shared age action not supported");
12485                 return flow_dv_validate_action_age(0, action, dev, err);
12486         default:
12487                 return rte_flow_error_set(err, ENOTSUP,
12488                                           RTE_FLOW_ERROR_TYPE_ACTION,
12489                                           NULL,
12490                                           "action type not supported");
12491         }
12492 }
12493
12494 static int
12495 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12496 {
12497         struct mlx5_priv *priv = dev->data->dev_private;
12498         int ret = 0;
12499
12500         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12501                 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12502                                                 flags);
12503                 if (ret != 0)
12504                         return ret;
12505         }
12506         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12507                 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12508                 if (ret != 0)
12509                         return ret;
12510         }
12511         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12512                 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12513                 if (ret != 0)
12514                         return ret;
12515         }
12516         return 0;
12517 }
12518
12519 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12520         .validate = flow_dv_validate,
12521         .prepare = flow_dv_prepare,
12522         .translate = flow_dv_translate,
12523         .apply = flow_dv_apply,
12524         .remove = flow_dv_remove,
12525         .destroy = flow_dv_destroy,
12526         .query = flow_dv_query,
12527         .create_mtr_tbls = flow_dv_create_mtr_tbl,
12528         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12529         .create_policer_rules = flow_dv_create_policer_rules,
12530         .destroy_policer_rules = flow_dv_destroy_policer_rules,
12531         .counter_alloc = flow_dv_counter_allocate,
12532         .counter_free = flow_dv_counter_free,
12533         .counter_query = flow_dv_counter_query,
12534         .get_aged_flows = flow_get_aged_flows,
12535         .action_validate = flow_dv_action_validate,
12536         .action_create = flow_dv_action_create,
12537         .action_destroy = flow_dv_action_destroy,
12538         .action_update = flow_dv_action_update,
12539         .action_query = flow_dv_action_query,
12540         .sync_domain = flow_dv_sync_domain,
12541 };
12542
12543 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
12544