net/mlx5: support E-Switch mirroring and jump in one flow
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                      uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84 static void
85 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
86
87 static int
88 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
89                                   uint32_t rix_jump);
90
91 /**
92  * Initialize flow attributes structure according to flow items' types.
93  *
94  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
95  * mode. For tunnel mode, the items to be modified are the outermost ones.
96  *
97  * @param[in] item
98  *   Pointer to item specification.
99  * @param[out] attr
100  *   Pointer to flow attributes structure.
101  * @param[in] dev_flow
102  *   Pointer to the sub flow.
103  * @param[in] tunnel_decap
104  *   Whether action is after tunnel decapsulation.
105  */
106 static void
107 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
108                   struct mlx5_flow *dev_flow, bool tunnel_decap)
109 {
110         uint64_t layers = dev_flow->handle->layers;
111
112         /*
113          * If layers is already initialized, it means this dev_flow is the
114          * suffix flow, the layers flags is set by the prefix flow. Need to
115          * use the layer flags from prefix flow as the suffix flow may not
116          * have the user defined items as the flow is split.
117          */
118         if (layers) {
119                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
120                         attr->ipv4 = 1;
121                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
122                         attr->ipv6 = 1;
123                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
124                         attr->tcp = 1;
125                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
126                         attr->udp = 1;
127                 attr->valid = 1;
128                 return;
129         }
130         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
131                 uint8_t next_protocol = 0xff;
132                 switch (item->type) {
133                 case RTE_FLOW_ITEM_TYPE_GRE:
134                 case RTE_FLOW_ITEM_TYPE_NVGRE:
135                 case RTE_FLOW_ITEM_TYPE_VXLAN:
136                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
137                 case RTE_FLOW_ITEM_TYPE_GENEVE:
138                 case RTE_FLOW_ITEM_TYPE_MPLS:
139                         if (tunnel_decap)
140                                 attr->attr = 0;
141                         break;
142                 case RTE_FLOW_ITEM_TYPE_IPV4:
143                         if (!attr->ipv6)
144                                 attr->ipv4 = 1;
145                         if (item->mask != NULL &&
146                             ((const struct rte_flow_item_ipv4 *)
147                             item->mask)->hdr.next_proto_id)
148                                 next_protocol =
149                                     ((const struct rte_flow_item_ipv4 *)
150                                       (item->spec))->hdr.next_proto_id &
151                                     ((const struct rte_flow_item_ipv4 *)
152                                       (item->mask))->hdr.next_proto_id;
153                         if ((next_protocol == IPPROTO_IPIP ||
154                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
155                                 attr->attr = 0;
156                         break;
157                 case RTE_FLOW_ITEM_TYPE_IPV6:
158                         if (!attr->ipv4)
159                                 attr->ipv6 = 1;
160                         if (item->mask != NULL &&
161                             ((const struct rte_flow_item_ipv6 *)
162                             item->mask)->hdr.proto)
163                                 next_protocol =
164                                     ((const struct rte_flow_item_ipv6 *)
165                                       (item->spec))->hdr.proto &
166                                     ((const struct rte_flow_item_ipv6 *)
167                                       (item->mask))->hdr.proto;
168                         if ((next_protocol == IPPROTO_IPIP ||
169                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
170                                 attr->attr = 0;
171                         break;
172                 case RTE_FLOW_ITEM_TYPE_UDP:
173                         if (!attr->tcp)
174                                 attr->udp = 1;
175                         break;
176                 case RTE_FLOW_ITEM_TYPE_TCP:
177                         if (!attr->udp)
178                                 attr->tcp = 1;
179                         break;
180                 default:
181                         break;
182                 }
183         }
184         attr->valid = 1;
185 }
186
187 /**
188  * Convert rte_mtr_color to mlx5 color.
189  *
190  * @param[in] rcol
191  *   rte_mtr_color.
192  *
193  * @return
194  *   mlx5 color.
195  */
196 static int
197 rte_col_2_mlx5_col(enum rte_color rcol)
198 {
199         switch (rcol) {
200         case RTE_COLOR_GREEN:
201                 return MLX5_FLOW_COLOR_GREEN;
202         case RTE_COLOR_YELLOW:
203                 return MLX5_FLOW_COLOR_YELLOW;
204         case RTE_COLOR_RED:
205                 return MLX5_FLOW_COLOR_RED;
206         default:
207                 break;
208         }
209         return MLX5_FLOW_COLOR_UNDEFINED;
210 }
211
212 struct field_modify_info {
213         uint32_t size; /* Size of field in protocol header, in bytes. */
214         uint32_t offset; /* Offset of field in protocol header, in bytes. */
215         enum mlx5_modification_field id;
216 };
217
218 struct field_modify_info modify_eth[] = {
219         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
220         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
221         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
222         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
223         {0, 0, 0},
224 };
225
226 struct field_modify_info modify_vlan_out_first_vid[] = {
227         /* Size in bits !!! */
228         {12, 0, MLX5_MODI_OUT_FIRST_VID},
229         {0, 0, 0},
230 };
231
232 struct field_modify_info modify_ipv4[] = {
233         {1,  1, MLX5_MODI_OUT_IP_DSCP},
234         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
235         {4, 12, MLX5_MODI_OUT_SIPV4},
236         {4, 16, MLX5_MODI_OUT_DIPV4},
237         {0, 0, 0},
238 };
239
240 struct field_modify_info modify_ipv6[] = {
241         {1,  0, MLX5_MODI_OUT_IP_DSCP},
242         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
243         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
244         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
245         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
246         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
247         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
248         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
249         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
250         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
251         {0, 0, 0},
252 };
253
254 struct field_modify_info modify_udp[] = {
255         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
256         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
257         {0, 0, 0},
258 };
259
260 struct field_modify_info modify_tcp[] = {
261         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
262         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
263         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
264         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
265         {0, 0, 0},
266 };
267
268 static void
269 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
270                           uint8_t next_protocol, uint64_t *item_flags,
271                           int *tunnel)
272 {
273         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
274                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
275         if (next_protocol == IPPROTO_IPIP) {
276                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
277                 *tunnel = 1;
278         }
279         if (next_protocol == IPPROTO_IPV6) {
280                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
281                 *tunnel = 1;
282         }
283 }
284
285 /* Update VLAN's VID/PCP based on input rte_flow_action.
286  *
287  * @param[in] action
288  *   Pointer to struct rte_flow_action.
289  * @param[out] vlan
290  *   Pointer to struct rte_vlan_hdr.
291  */
292 static void
293 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
294                          struct rte_vlan_hdr *vlan)
295 {
296         uint16_t vlan_tci;
297         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
298                 vlan_tci =
299                     ((const struct rte_flow_action_of_set_vlan_pcp *)
300                                                action->conf)->vlan_pcp;
301                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
302                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
303                 vlan->vlan_tci |= vlan_tci;
304         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
305                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
306                 vlan->vlan_tci |= rte_be_to_cpu_16
307                     (((const struct rte_flow_action_of_set_vlan_vid *)
308                                              action->conf)->vlan_vid);
309         }
310 }
311
312 /**
313  * Fetch 1, 2, 3 or 4 byte field from the byte array
314  * and return as unsigned integer in host-endian format.
315  *
316  * @param[in] data
317  *   Pointer to data array.
318  * @param[in] size
319  *   Size of field to extract.
320  *
321  * @return
322  *   converted field in host endian format.
323  */
324 static inline uint32_t
325 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
326 {
327         uint32_t ret;
328
329         switch (size) {
330         case 1:
331                 ret = *data;
332                 break;
333         case 2:
334                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
335                 break;
336         case 3:
337                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
338                 ret = (ret << 8) | *(data + sizeof(uint16_t));
339                 break;
340         case 4:
341                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
342                 break;
343         default:
344                 MLX5_ASSERT(false);
345                 ret = 0;
346                 break;
347         }
348         return ret;
349 }
350
351 /**
352  * Convert modify-header action to DV specification.
353  *
354  * Data length of each action is determined by provided field description
355  * and the item mask. Data bit offset and width of each action is determined
356  * by provided item mask.
357  *
358  * @param[in] item
359  *   Pointer to item specification.
360  * @param[in] field
361  *   Pointer to field modification information.
362  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
363  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
364  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
365  * @param[in] dcopy
366  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
367  *   Negative offset value sets the same offset as source offset.
368  *   size field is ignored, value is taken from source field.
369  * @param[in,out] resource
370  *   Pointer to the modify-header resource.
371  * @param[in] type
372  *   Type of modification.
373  * @param[out] error
374  *   Pointer to the error structure.
375  *
376  * @return
377  *   0 on success, a negative errno value otherwise and rte_errno is set.
378  */
379 static int
380 flow_dv_convert_modify_action(struct rte_flow_item *item,
381                               struct field_modify_info *field,
382                               struct field_modify_info *dcopy,
383                               struct mlx5_flow_dv_modify_hdr_resource *resource,
384                               uint32_t type, struct rte_flow_error *error)
385 {
386         uint32_t i = resource->actions_num;
387         struct mlx5_modification_cmd *actions = resource->actions;
388
389         /*
390          * The item and mask are provided in big-endian format.
391          * The fields should be presented as in big-endian format either.
392          * Mask must be always present, it defines the actual field width.
393          */
394         MLX5_ASSERT(item->mask);
395         MLX5_ASSERT(field->size);
396         do {
397                 unsigned int size_b;
398                 unsigned int off_b;
399                 uint32_t mask;
400                 uint32_t data;
401
402                 if (i >= MLX5_MAX_MODIFY_NUM)
403                         return rte_flow_error_set(error, EINVAL,
404                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
405                                  "too many items to modify");
406                 /* Fetch variable byte size mask from the array. */
407                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
408                                            field->offset, field->size);
409                 if (!mask) {
410                         ++field;
411                         continue;
412                 }
413                 /* Deduce actual data width in bits from mask value. */
414                 off_b = rte_bsf32(mask);
415                 size_b = sizeof(uint32_t) * CHAR_BIT -
416                          off_b - __builtin_clz(mask);
417                 MLX5_ASSERT(size_b);
418                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
419                 actions[i] = (struct mlx5_modification_cmd) {
420                         .action_type = type,
421                         .field = field->id,
422                         .offset = off_b,
423                         .length = size_b,
424                 };
425                 /* Convert entire record to expected big-endian format. */
426                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
427                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
428                         MLX5_ASSERT(dcopy);
429                         actions[i].dst_field = dcopy->id;
430                         actions[i].dst_offset =
431                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
432                         /* Convert entire record to big-endian format. */
433                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
434                 } else {
435                         MLX5_ASSERT(item->spec);
436                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
437                                                    field->offset, field->size);
438                         /* Shift out the trailing masked bits from data. */
439                         data = (data & mask) >> off_b;
440                         actions[i].data1 = rte_cpu_to_be_32(data);
441                 }
442                 ++i;
443                 ++field;
444         } while (field->size);
445         if (resource->actions_num == i)
446                 return rte_flow_error_set(error, EINVAL,
447                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
448                                           "invalid modification flow item");
449         resource->actions_num = i;
450         return 0;
451 }
452
453 /**
454  * Convert modify-header set IPv4 address action to DV specification.
455  *
456  * @param[in,out] resource
457  *   Pointer to the modify-header resource.
458  * @param[in] action
459  *   Pointer to action specification.
460  * @param[out] error
461  *   Pointer to the error structure.
462  *
463  * @return
464  *   0 on success, a negative errno value otherwise and rte_errno is set.
465  */
466 static int
467 flow_dv_convert_action_modify_ipv4
468                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
469                          const struct rte_flow_action *action,
470                          struct rte_flow_error *error)
471 {
472         const struct rte_flow_action_set_ipv4 *conf =
473                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
474         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
475         struct rte_flow_item_ipv4 ipv4;
476         struct rte_flow_item_ipv4 ipv4_mask;
477
478         memset(&ipv4, 0, sizeof(ipv4));
479         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
480         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
481                 ipv4.hdr.src_addr = conf->ipv4_addr;
482                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
483         } else {
484                 ipv4.hdr.dst_addr = conf->ipv4_addr;
485                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
486         }
487         item.spec = &ipv4;
488         item.mask = &ipv4_mask;
489         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
490                                              MLX5_MODIFICATION_TYPE_SET, error);
491 }
492
493 /**
494  * Convert modify-header set IPv6 address action to DV specification.
495  *
496  * @param[in,out] resource
497  *   Pointer to the modify-header resource.
498  * @param[in] action
499  *   Pointer to action specification.
500  * @param[out] error
501  *   Pointer to the error structure.
502  *
503  * @return
504  *   0 on success, a negative errno value otherwise and rte_errno is set.
505  */
506 static int
507 flow_dv_convert_action_modify_ipv6
508                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
509                          const struct rte_flow_action *action,
510                          struct rte_flow_error *error)
511 {
512         const struct rte_flow_action_set_ipv6 *conf =
513                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
514         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
515         struct rte_flow_item_ipv6 ipv6;
516         struct rte_flow_item_ipv6 ipv6_mask;
517
518         memset(&ipv6, 0, sizeof(ipv6));
519         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
520         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
521                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
522                        sizeof(ipv6.hdr.src_addr));
523                 memcpy(&ipv6_mask.hdr.src_addr,
524                        &rte_flow_item_ipv6_mask.hdr.src_addr,
525                        sizeof(ipv6.hdr.src_addr));
526         } else {
527                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
528                        sizeof(ipv6.hdr.dst_addr));
529                 memcpy(&ipv6_mask.hdr.dst_addr,
530                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
531                        sizeof(ipv6.hdr.dst_addr));
532         }
533         item.spec = &ipv6;
534         item.mask = &ipv6_mask;
535         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
536                                              MLX5_MODIFICATION_TYPE_SET, error);
537 }
538
539 /**
540  * Convert modify-header set MAC address action to DV specification.
541  *
542  * @param[in,out] resource
543  *   Pointer to the modify-header resource.
544  * @param[in] action
545  *   Pointer to action specification.
546  * @param[out] error
547  *   Pointer to the error structure.
548  *
549  * @return
550  *   0 on success, a negative errno value otherwise and rte_errno is set.
551  */
552 static int
553 flow_dv_convert_action_modify_mac
554                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
555                          const struct rte_flow_action *action,
556                          struct rte_flow_error *error)
557 {
558         const struct rte_flow_action_set_mac *conf =
559                 (const struct rte_flow_action_set_mac *)(action->conf);
560         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
561         struct rte_flow_item_eth eth;
562         struct rte_flow_item_eth eth_mask;
563
564         memset(&eth, 0, sizeof(eth));
565         memset(&eth_mask, 0, sizeof(eth_mask));
566         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
567                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
568                        sizeof(eth.src.addr_bytes));
569                 memcpy(&eth_mask.src.addr_bytes,
570                        &rte_flow_item_eth_mask.src.addr_bytes,
571                        sizeof(eth_mask.src.addr_bytes));
572         } else {
573                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
574                        sizeof(eth.dst.addr_bytes));
575                 memcpy(&eth_mask.dst.addr_bytes,
576                        &rte_flow_item_eth_mask.dst.addr_bytes,
577                        sizeof(eth_mask.dst.addr_bytes));
578         }
579         item.spec = &eth;
580         item.mask = &eth_mask;
581         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
582                                              MLX5_MODIFICATION_TYPE_SET, error);
583 }
584
585 /**
586  * Convert modify-header set VLAN VID action to DV specification.
587  *
588  * @param[in,out] resource
589  *   Pointer to the modify-header resource.
590  * @param[in] action
591  *   Pointer to action specification.
592  * @param[out] error
593  *   Pointer to the error structure.
594  *
595  * @return
596  *   0 on success, a negative errno value otherwise and rte_errno is set.
597  */
598 static int
599 flow_dv_convert_action_modify_vlan_vid
600                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
601                          const struct rte_flow_action *action,
602                          struct rte_flow_error *error)
603 {
604         const struct rte_flow_action_of_set_vlan_vid *conf =
605                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
606         int i = resource->actions_num;
607         struct mlx5_modification_cmd *actions = resource->actions;
608         struct field_modify_info *field = modify_vlan_out_first_vid;
609
610         if (i >= MLX5_MAX_MODIFY_NUM)
611                 return rte_flow_error_set(error, EINVAL,
612                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
613                          "too many items to modify");
614         actions[i] = (struct mlx5_modification_cmd) {
615                 .action_type = MLX5_MODIFICATION_TYPE_SET,
616                 .field = field->id,
617                 .length = field->size,
618                 .offset = field->offset,
619         };
620         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
621         actions[i].data1 = conf->vlan_vid;
622         actions[i].data1 = actions[i].data1 << 16;
623         resource->actions_num = ++i;
624         return 0;
625 }
626
627 /**
628  * Convert modify-header set TP action to DV specification.
629  *
630  * @param[in,out] resource
631  *   Pointer to the modify-header resource.
632  * @param[in] action
633  *   Pointer to action specification.
634  * @param[in] items
635  *   Pointer to rte_flow_item objects list.
636  * @param[in] attr
637  *   Pointer to flow attributes structure.
638  * @param[in] dev_flow
639  *   Pointer to the sub flow.
640  * @param[in] tunnel_decap
641  *   Whether action is after tunnel decapsulation.
642  * @param[out] error
643  *   Pointer to the error structure.
644  *
645  * @return
646  *   0 on success, a negative errno value otherwise and rte_errno is set.
647  */
648 static int
649 flow_dv_convert_action_modify_tp
650                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
651                          const struct rte_flow_action *action,
652                          const struct rte_flow_item *items,
653                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
654                          bool tunnel_decap, struct rte_flow_error *error)
655 {
656         const struct rte_flow_action_set_tp *conf =
657                 (const struct rte_flow_action_set_tp *)(action->conf);
658         struct rte_flow_item item;
659         struct rte_flow_item_udp udp;
660         struct rte_flow_item_udp udp_mask;
661         struct rte_flow_item_tcp tcp;
662         struct rte_flow_item_tcp tcp_mask;
663         struct field_modify_info *field;
664
665         if (!attr->valid)
666                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
667         if (attr->udp) {
668                 memset(&udp, 0, sizeof(udp));
669                 memset(&udp_mask, 0, sizeof(udp_mask));
670                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
671                         udp.hdr.src_port = conf->port;
672                         udp_mask.hdr.src_port =
673                                         rte_flow_item_udp_mask.hdr.src_port;
674                 } else {
675                         udp.hdr.dst_port = conf->port;
676                         udp_mask.hdr.dst_port =
677                                         rte_flow_item_udp_mask.hdr.dst_port;
678                 }
679                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
680                 item.spec = &udp;
681                 item.mask = &udp_mask;
682                 field = modify_udp;
683         } else {
684                 MLX5_ASSERT(attr->tcp);
685                 memset(&tcp, 0, sizeof(tcp));
686                 memset(&tcp_mask, 0, sizeof(tcp_mask));
687                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
688                         tcp.hdr.src_port = conf->port;
689                         tcp_mask.hdr.src_port =
690                                         rte_flow_item_tcp_mask.hdr.src_port;
691                 } else {
692                         tcp.hdr.dst_port = conf->port;
693                         tcp_mask.hdr.dst_port =
694                                         rte_flow_item_tcp_mask.hdr.dst_port;
695                 }
696                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
697                 item.spec = &tcp;
698                 item.mask = &tcp_mask;
699                 field = modify_tcp;
700         }
701         return flow_dv_convert_modify_action(&item, field, NULL, resource,
702                                              MLX5_MODIFICATION_TYPE_SET, error);
703 }
704
705 /**
706  * Convert modify-header set TTL action to DV specification.
707  *
708  * @param[in,out] resource
709  *   Pointer to the modify-header resource.
710  * @param[in] action
711  *   Pointer to action specification.
712  * @param[in] items
713  *   Pointer to rte_flow_item objects list.
714  * @param[in] attr
715  *   Pointer to flow attributes structure.
716  * @param[in] dev_flow
717  *   Pointer to the sub flow.
718  * @param[in] tunnel_decap
719  *   Whether action is after tunnel decapsulation.
720  * @param[out] error
721  *   Pointer to the error structure.
722  *
723  * @return
724  *   0 on success, a negative errno value otherwise and rte_errno is set.
725  */
726 static int
727 flow_dv_convert_action_modify_ttl
728                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
729                          const struct rte_flow_action *action,
730                          const struct rte_flow_item *items,
731                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
732                          bool tunnel_decap, struct rte_flow_error *error)
733 {
734         const struct rte_flow_action_set_ttl *conf =
735                 (const struct rte_flow_action_set_ttl *)(action->conf);
736         struct rte_flow_item item;
737         struct rte_flow_item_ipv4 ipv4;
738         struct rte_flow_item_ipv4 ipv4_mask;
739         struct rte_flow_item_ipv6 ipv6;
740         struct rte_flow_item_ipv6 ipv6_mask;
741         struct field_modify_info *field;
742
743         if (!attr->valid)
744                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
745         if (attr->ipv4) {
746                 memset(&ipv4, 0, sizeof(ipv4));
747                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
748                 ipv4.hdr.time_to_live = conf->ttl_value;
749                 ipv4_mask.hdr.time_to_live = 0xFF;
750                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
751                 item.spec = &ipv4;
752                 item.mask = &ipv4_mask;
753                 field = modify_ipv4;
754         } else {
755                 MLX5_ASSERT(attr->ipv6);
756                 memset(&ipv6, 0, sizeof(ipv6));
757                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
758                 ipv6.hdr.hop_limits = conf->ttl_value;
759                 ipv6_mask.hdr.hop_limits = 0xFF;
760                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
761                 item.spec = &ipv6;
762                 item.mask = &ipv6_mask;
763                 field = modify_ipv6;
764         }
765         return flow_dv_convert_modify_action(&item, field, NULL, resource,
766                                              MLX5_MODIFICATION_TYPE_SET, error);
767 }
768
769 /**
770  * Convert modify-header decrement TTL action to DV specification.
771  *
772  * @param[in,out] resource
773  *   Pointer to the modify-header resource.
774  * @param[in] action
775  *   Pointer to action specification.
776  * @param[in] items
777  *   Pointer to rte_flow_item objects list.
778  * @param[in] attr
779  *   Pointer to flow attributes structure.
780  * @param[in] dev_flow
781  *   Pointer to the sub flow.
782  * @param[in] tunnel_decap
783  *   Whether action is after tunnel decapsulation.
784  * @param[out] error
785  *   Pointer to the error structure.
786  *
787  * @return
788  *   0 on success, a negative errno value otherwise and rte_errno is set.
789  */
790 static int
791 flow_dv_convert_action_modify_dec_ttl
792                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
793                          const struct rte_flow_item *items,
794                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
795                          bool tunnel_decap, struct rte_flow_error *error)
796 {
797         struct rte_flow_item item;
798         struct rte_flow_item_ipv4 ipv4;
799         struct rte_flow_item_ipv4 ipv4_mask;
800         struct rte_flow_item_ipv6 ipv6;
801         struct rte_flow_item_ipv6 ipv6_mask;
802         struct field_modify_info *field;
803
804         if (!attr->valid)
805                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
806         if (attr->ipv4) {
807                 memset(&ipv4, 0, sizeof(ipv4));
808                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
809                 ipv4.hdr.time_to_live = 0xFF;
810                 ipv4_mask.hdr.time_to_live = 0xFF;
811                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
812                 item.spec = &ipv4;
813                 item.mask = &ipv4_mask;
814                 field = modify_ipv4;
815         } else {
816                 MLX5_ASSERT(attr->ipv6);
817                 memset(&ipv6, 0, sizeof(ipv6));
818                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
819                 ipv6.hdr.hop_limits = 0xFF;
820                 ipv6_mask.hdr.hop_limits = 0xFF;
821                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
822                 item.spec = &ipv6;
823                 item.mask = &ipv6_mask;
824                 field = modify_ipv6;
825         }
826         return flow_dv_convert_modify_action(&item, field, NULL, resource,
827                                              MLX5_MODIFICATION_TYPE_ADD, error);
828 }
829
830 /**
831  * Convert modify-header increment/decrement TCP Sequence number
832  * to DV specification.
833  *
834  * @param[in,out] resource
835  *   Pointer to the modify-header resource.
836  * @param[in] action
837  *   Pointer to action specification.
838  * @param[out] error
839  *   Pointer to the error structure.
840  *
841  * @return
842  *   0 on success, a negative errno value otherwise and rte_errno is set.
843  */
844 static int
845 flow_dv_convert_action_modify_tcp_seq
846                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
847                          const struct rte_flow_action *action,
848                          struct rte_flow_error *error)
849 {
850         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
851         uint64_t value = rte_be_to_cpu_32(*conf);
852         struct rte_flow_item item;
853         struct rte_flow_item_tcp tcp;
854         struct rte_flow_item_tcp tcp_mask;
855
856         memset(&tcp, 0, sizeof(tcp));
857         memset(&tcp_mask, 0, sizeof(tcp_mask));
858         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
859                 /*
860                  * The HW has no decrement operation, only increment operation.
861                  * To simulate decrement X from Y using increment operation
862                  * we need to add UINT32_MAX X times to Y.
863                  * Each adding of UINT32_MAX decrements Y by 1.
864                  */
865                 value *= UINT32_MAX;
866         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
867         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
868         item.type = RTE_FLOW_ITEM_TYPE_TCP;
869         item.spec = &tcp;
870         item.mask = &tcp_mask;
871         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
872                                              MLX5_MODIFICATION_TYPE_ADD, error);
873 }
874
875 /**
876  * Convert modify-header increment/decrement TCP Acknowledgment number
877  * to DV specification.
878  *
879  * @param[in,out] resource
880  *   Pointer to the modify-header resource.
881  * @param[in] action
882  *   Pointer to action specification.
883  * @param[out] error
884  *   Pointer to the error structure.
885  *
886  * @return
887  *   0 on success, a negative errno value otherwise and rte_errno is set.
888  */
889 static int
890 flow_dv_convert_action_modify_tcp_ack
891                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
892                          const struct rte_flow_action *action,
893                          struct rte_flow_error *error)
894 {
895         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
896         uint64_t value = rte_be_to_cpu_32(*conf);
897         struct rte_flow_item item;
898         struct rte_flow_item_tcp tcp;
899         struct rte_flow_item_tcp tcp_mask;
900
901         memset(&tcp, 0, sizeof(tcp));
902         memset(&tcp_mask, 0, sizeof(tcp_mask));
903         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
904                 /*
905                  * The HW has no decrement operation, only increment operation.
906                  * To simulate decrement X from Y using increment operation
907                  * we need to add UINT32_MAX X times to Y.
908                  * Each adding of UINT32_MAX decrements Y by 1.
909                  */
910                 value *= UINT32_MAX;
911         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
912         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
913         item.type = RTE_FLOW_ITEM_TYPE_TCP;
914         item.spec = &tcp;
915         item.mask = &tcp_mask;
916         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
917                                              MLX5_MODIFICATION_TYPE_ADD, error);
918 }
919
920 static enum mlx5_modification_field reg_to_field[] = {
921         [REG_NON] = MLX5_MODI_OUT_NONE,
922         [REG_A] = MLX5_MODI_META_DATA_REG_A,
923         [REG_B] = MLX5_MODI_META_DATA_REG_B,
924         [REG_C_0] = MLX5_MODI_META_REG_C_0,
925         [REG_C_1] = MLX5_MODI_META_REG_C_1,
926         [REG_C_2] = MLX5_MODI_META_REG_C_2,
927         [REG_C_3] = MLX5_MODI_META_REG_C_3,
928         [REG_C_4] = MLX5_MODI_META_REG_C_4,
929         [REG_C_5] = MLX5_MODI_META_REG_C_5,
930         [REG_C_6] = MLX5_MODI_META_REG_C_6,
931         [REG_C_7] = MLX5_MODI_META_REG_C_7,
932 };
933
934 /**
935  * Convert register set to DV specification.
936  *
937  * @param[in,out] resource
938  *   Pointer to the modify-header resource.
939  * @param[in] action
940  *   Pointer to action specification.
941  * @param[out] error
942  *   Pointer to the error structure.
943  *
944  * @return
945  *   0 on success, a negative errno value otherwise and rte_errno is set.
946  */
947 static int
948 flow_dv_convert_action_set_reg
949                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
950                          const struct rte_flow_action *action,
951                          struct rte_flow_error *error)
952 {
953         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
954         struct mlx5_modification_cmd *actions = resource->actions;
955         uint32_t i = resource->actions_num;
956
957         if (i >= MLX5_MAX_MODIFY_NUM)
958                 return rte_flow_error_set(error, EINVAL,
959                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
960                                           "too many items to modify");
961         MLX5_ASSERT(conf->id != REG_NON);
962         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
963         actions[i] = (struct mlx5_modification_cmd) {
964                 .action_type = MLX5_MODIFICATION_TYPE_SET,
965                 .field = reg_to_field[conf->id],
966         };
967         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
968         actions[i].data1 = rte_cpu_to_be_32(conf->data);
969         ++i;
970         resource->actions_num = i;
971         return 0;
972 }
973
974 /**
975  * Convert SET_TAG action to DV specification.
976  *
977  * @param[in] dev
978  *   Pointer to the rte_eth_dev structure.
979  * @param[in,out] resource
980  *   Pointer to the modify-header resource.
981  * @param[in] conf
982  *   Pointer to action specification.
983  * @param[out] error
984  *   Pointer to the error structure.
985  *
986  * @return
987  *   0 on success, a negative errno value otherwise and rte_errno is set.
988  */
989 static int
990 flow_dv_convert_action_set_tag
991                         (struct rte_eth_dev *dev,
992                          struct mlx5_flow_dv_modify_hdr_resource *resource,
993                          const struct rte_flow_action_set_tag *conf,
994                          struct rte_flow_error *error)
995 {
996         rte_be32_t data = rte_cpu_to_be_32(conf->data);
997         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
998         struct rte_flow_item item = {
999                 .spec = &data,
1000                 .mask = &mask,
1001         };
1002         struct field_modify_info reg_c_x[] = {
1003                 [1] = {0, 0, 0},
1004         };
1005         enum mlx5_modification_field reg_type;
1006         int ret;
1007
1008         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1009         if (ret < 0)
1010                 return ret;
1011         MLX5_ASSERT(ret != REG_NON);
1012         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1013         reg_type = reg_to_field[ret];
1014         MLX5_ASSERT(reg_type > 0);
1015         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1016         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1017                                              MLX5_MODIFICATION_TYPE_SET, error);
1018 }
1019
1020 /**
1021  * Convert internal COPY_REG action to DV specification.
1022  *
1023  * @param[in] dev
1024  *   Pointer to the rte_eth_dev structure.
1025  * @param[in,out] res
1026  *   Pointer to the modify-header resource.
1027  * @param[in] action
1028  *   Pointer to action specification.
1029  * @param[out] error
1030  *   Pointer to the error structure.
1031  *
1032  * @return
1033  *   0 on success, a negative errno value otherwise and rte_errno is set.
1034  */
1035 static int
1036 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1037                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1038                                  const struct rte_flow_action *action,
1039                                  struct rte_flow_error *error)
1040 {
1041         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1042         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1043         struct rte_flow_item item = {
1044                 .spec = NULL,
1045                 .mask = &mask,
1046         };
1047         struct field_modify_info reg_src[] = {
1048                 {4, 0, reg_to_field[conf->src]},
1049                 {0, 0, 0},
1050         };
1051         struct field_modify_info reg_dst = {
1052                 .offset = 0,
1053                 .id = reg_to_field[conf->dst],
1054         };
1055         /* Adjust reg_c[0] usage according to reported mask. */
1056         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1057                 struct mlx5_priv *priv = dev->data->dev_private;
1058                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1059
1060                 MLX5_ASSERT(reg_c0);
1061                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1062                 if (conf->dst == REG_C_0) {
1063                         /* Copy to reg_c[0], within mask only. */
1064                         reg_dst.offset = rte_bsf32(reg_c0);
1065                         /*
1066                          * Mask is ignoring the enianness, because
1067                          * there is no conversion in datapath.
1068                          */
1069 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1070                         /* Copy from destination lower bits to reg_c[0]. */
1071                         mask = reg_c0 >> reg_dst.offset;
1072 #else
1073                         /* Copy from destination upper bits to reg_c[0]. */
1074                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1075                                           rte_fls_u32(reg_c0));
1076 #endif
1077                 } else {
1078                         mask = rte_cpu_to_be_32(reg_c0);
1079 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1080                         /* Copy from reg_c[0] to destination lower bits. */
1081                         reg_dst.offset = 0;
1082 #else
1083                         /* Copy from reg_c[0] to destination upper bits. */
1084                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1085                                          (rte_fls_u32(reg_c0) -
1086                                           rte_bsf32(reg_c0));
1087 #endif
1088                 }
1089         }
1090         return flow_dv_convert_modify_action(&item,
1091                                              reg_src, &reg_dst, res,
1092                                              MLX5_MODIFICATION_TYPE_COPY,
1093                                              error);
1094 }
1095
1096 /**
1097  * Convert MARK action to DV specification. This routine is used
1098  * in extensive metadata only and requires metadata register to be
1099  * handled. In legacy mode hardware tag resource is engaged.
1100  *
1101  * @param[in] dev
1102  *   Pointer to the rte_eth_dev structure.
1103  * @param[in] conf
1104  *   Pointer to MARK action specification.
1105  * @param[in,out] resource
1106  *   Pointer to the modify-header resource.
1107  * @param[out] error
1108  *   Pointer to the error structure.
1109  *
1110  * @return
1111  *   0 on success, a negative errno value otherwise and rte_errno is set.
1112  */
1113 static int
1114 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1115                             const struct rte_flow_action_mark *conf,
1116                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1117                             struct rte_flow_error *error)
1118 {
1119         struct mlx5_priv *priv = dev->data->dev_private;
1120         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1121                                            priv->sh->dv_mark_mask);
1122         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1123         struct rte_flow_item item = {
1124                 .spec = &data,
1125                 .mask = &mask,
1126         };
1127         struct field_modify_info reg_c_x[] = {
1128                 [1] = {0, 0, 0},
1129         };
1130         int reg;
1131
1132         if (!mask)
1133                 return rte_flow_error_set(error, EINVAL,
1134                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1135                                           NULL, "zero mark action mask");
1136         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1137         if (reg < 0)
1138                 return reg;
1139         MLX5_ASSERT(reg > 0);
1140         if (reg == REG_C_0) {
1141                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1142                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1143
1144                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1145                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1146                 mask = rte_cpu_to_be_32(mask << shl_c0);
1147         }
1148         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1149         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1150                                              MLX5_MODIFICATION_TYPE_SET, error);
1151 }
1152
1153 /**
1154  * Get metadata register index for specified steering domain.
1155  *
1156  * @param[in] dev
1157  *   Pointer to the rte_eth_dev structure.
1158  * @param[in] attr
1159  *   Attributes of flow to determine steering domain.
1160  * @param[out] error
1161  *   Pointer to the error structure.
1162  *
1163  * @return
1164  *   positive index on success, a negative errno value otherwise
1165  *   and rte_errno is set.
1166  */
1167 static enum modify_reg
1168 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1169                          const struct rte_flow_attr *attr,
1170                          struct rte_flow_error *error)
1171 {
1172         int reg =
1173                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1174                                           MLX5_METADATA_FDB :
1175                                             attr->egress ?
1176                                             MLX5_METADATA_TX :
1177                                             MLX5_METADATA_RX, 0, error);
1178         if (reg < 0)
1179                 return rte_flow_error_set(error,
1180                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1181                                           NULL, "unavailable "
1182                                           "metadata register");
1183         return reg;
1184 }
1185
1186 /**
1187  * Convert SET_META action to DV specification.
1188  *
1189  * @param[in] dev
1190  *   Pointer to the rte_eth_dev structure.
1191  * @param[in,out] resource
1192  *   Pointer to the modify-header resource.
1193  * @param[in] attr
1194  *   Attributes of flow that includes this item.
1195  * @param[in] conf
1196  *   Pointer to action specification.
1197  * @param[out] error
1198  *   Pointer to the error structure.
1199  *
1200  * @return
1201  *   0 on success, a negative errno value otherwise and rte_errno is set.
1202  */
1203 static int
1204 flow_dv_convert_action_set_meta
1205                         (struct rte_eth_dev *dev,
1206                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1207                          const struct rte_flow_attr *attr,
1208                          const struct rte_flow_action_set_meta *conf,
1209                          struct rte_flow_error *error)
1210 {
1211         uint32_t data = conf->data;
1212         uint32_t mask = conf->mask;
1213         struct rte_flow_item item = {
1214                 .spec = &data,
1215                 .mask = &mask,
1216         };
1217         struct field_modify_info reg_c_x[] = {
1218                 [1] = {0, 0, 0},
1219         };
1220         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1221
1222         if (reg < 0)
1223                 return reg;
1224         MLX5_ASSERT(reg != REG_NON);
1225         /*
1226          * In datapath code there is no endianness
1227          * coversions for perfromance reasons, all
1228          * pattern conversions are done in rte_flow.
1229          */
1230         if (reg == REG_C_0) {
1231                 struct mlx5_priv *priv = dev->data->dev_private;
1232                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1233                 uint32_t shl_c0;
1234
1235                 MLX5_ASSERT(msk_c0);
1236 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1237                 shl_c0 = rte_bsf32(msk_c0);
1238 #else
1239                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1240 #endif
1241                 mask <<= shl_c0;
1242                 data <<= shl_c0;
1243                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1244         }
1245         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1246         /* The routine expects parameters in memory as big-endian ones. */
1247         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1248                                              MLX5_MODIFICATION_TYPE_SET, error);
1249 }
1250
1251 /**
1252  * Convert modify-header set IPv4 DSCP action to DV specification.
1253  *
1254  * @param[in,out] resource
1255  *   Pointer to the modify-header resource.
1256  * @param[in] action
1257  *   Pointer to action specification.
1258  * @param[out] error
1259  *   Pointer to the error structure.
1260  *
1261  * @return
1262  *   0 on success, a negative errno value otherwise and rte_errno is set.
1263  */
1264 static int
1265 flow_dv_convert_action_modify_ipv4_dscp
1266                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1267                          const struct rte_flow_action *action,
1268                          struct rte_flow_error *error)
1269 {
1270         const struct rte_flow_action_set_dscp *conf =
1271                 (const struct rte_flow_action_set_dscp *)(action->conf);
1272         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1273         struct rte_flow_item_ipv4 ipv4;
1274         struct rte_flow_item_ipv4 ipv4_mask;
1275
1276         memset(&ipv4, 0, sizeof(ipv4));
1277         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1278         ipv4.hdr.type_of_service = conf->dscp;
1279         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1280         item.spec = &ipv4;
1281         item.mask = &ipv4_mask;
1282         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1283                                              MLX5_MODIFICATION_TYPE_SET, error);
1284 }
1285
1286 /**
1287  * Convert modify-header set IPv6 DSCP action to DV specification.
1288  *
1289  * @param[in,out] resource
1290  *   Pointer to the modify-header resource.
1291  * @param[in] action
1292  *   Pointer to action specification.
1293  * @param[out] error
1294  *   Pointer to the error structure.
1295  *
1296  * @return
1297  *   0 on success, a negative errno value otherwise and rte_errno is set.
1298  */
1299 static int
1300 flow_dv_convert_action_modify_ipv6_dscp
1301                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1302                          const struct rte_flow_action *action,
1303                          struct rte_flow_error *error)
1304 {
1305         const struct rte_flow_action_set_dscp *conf =
1306                 (const struct rte_flow_action_set_dscp *)(action->conf);
1307         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1308         struct rte_flow_item_ipv6 ipv6;
1309         struct rte_flow_item_ipv6 ipv6_mask;
1310
1311         memset(&ipv6, 0, sizeof(ipv6));
1312         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1313         /*
1314          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1315          * rdma-core only accept the DSCP bits byte aligned start from
1316          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1317          * bits in IPv6 case as rdma-core requires byte aligned value.
1318          */
1319         ipv6.hdr.vtc_flow = conf->dscp;
1320         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1321         item.spec = &ipv6;
1322         item.mask = &ipv6_mask;
1323         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1324                                              MLX5_MODIFICATION_TYPE_SET, error);
1325 }
1326
1327 /**
1328  * Validate MARK item.
1329  *
1330  * @param[in] dev
1331  *   Pointer to the rte_eth_dev structure.
1332  * @param[in] item
1333  *   Item specification.
1334  * @param[in] attr
1335  *   Attributes of flow that includes this item.
1336  * @param[out] error
1337  *   Pointer to error structure.
1338  *
1339  * @return
1340  *   0 on success, a negative errno value otherwise and rte_errno is set.
1341  */
1342 static int
1343 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1344                            const struct rte_flow_item *item,
1345                            const struct rte_flow_attr *attr __rte_unused,
1346                            struct rte_flow_error *error)
1347 {
1348         struct mlx5_priv *priv = dev->data->dev_private;
1349         struct mlx5_dev_config *config = &priv->config;
1350         const struct rte_flow_item_mark *spec = item->spec;
1351         const struct rte_flow_item_mark *mask = item->mask;
1352         const struct rte_flow_item_mark nic_mask = {
1353                 .id = priv->sh->dv_mark_mask,
1354         };
1355         int ret;
1356
1357         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1358                 return rte_flow_error_set(error, ENOTSUP,
1359                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1360                                           "extended metadata feature"
1361                                           " isn't enabled");
1362         if (!mlx5_flow_ext_mreg_supported(dev))
1363                 return rte_flow_error_set(error, ENOTSUP,
1364                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1365                                           "extended metadata register"
1366                                           " isn't supported");
1367         if (!nic_mask.id)
1368                 return rte_flow_error_set(error, ENOTSUP,
1369                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1370                                           "extended metadata register"
1371                                           " isn't available");
1372         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1373         if (ret < 0)
1374                 return ret;
1375         if (!spec)
1376                 return rte_flow_error_set(error, EINVAL,
1377                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1378                                           item->spec,
1379                                           "data cannot be empty");
1380         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1381                 return rte_flow_error_set(error, EINVAL,
1382                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1383                                           &spec->id,
1384                                           "mark id exceeds the limit");
1385         if (!mask)
1386                 mask = &nic_mask;
1387         if (!mask->id)
1388                 return rte_flow_error_set(error, EINVAL,
1389                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1390                                         "mask cannot be zero");
1391
1392         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1393                                         (const uint8_t *)&nic_mask,
1394                                         sizeof(struct rte_flow_item_mark),
1395                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1396         if (ret < 0)
1397                 return ret;
1398         return 0;
1399 }
1400
1401 /**
1402  * Validate META item.
1403  *
1404  * @param[in] dev
1405  *   Pointer to the rte_eth_dev structure.
1406  * @param[in] item
1407  *   Item specification.
1408  * @param[in] attr
1409  *   Attributes of flow that includes this item.
1410  * @param[out] error
1411  *   Pointer to error structure.
1412  *
1413  * @return
1414  *   0 on success, a negative errno value otherwise and rte_errno is set.
1415  */
1416 static int
1417 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1418                            const struct rte_flow_item *item,
1419                            const struct rte_flow_attr *attr,
1420                            struct rte_flow_error *error)
1421 {
1422         struct mlx5_priv *priv = dev->data->dev_private;
1423         struct mlx5_dev_config *config = &priv->config;
1424         const struct rte_flow_item_meta *spec = item->spec;
1425         const struct rte_flow_item_meta *mask = item->mask;
1426         struct rte_flow_item_meta nic_mask = {
1427                 .data = UINT32_MAX
1428         };
1429         int reg;
1430         int ret;
1431
1432         if (!spec)
1433                 return rte_flow_error_set(error, EINVAL,
1434                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1435                                           item->spec,
1436                                           "data cannot be empty");
1437         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1438                 if (!mlx5_flow_ext_mreg_supported(dev))
1439                         return rte_flow_error_set(error, ENOTSUP,
1440                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1441                                           "extended metadata register"
1442                                           " isn't supported");
1443                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1444                 if (reg < 0)
1445                         return reg;
1446                 if (reg == REG_NON)
1447                         return rte_flow_error_set(error, ENOTSUP,
1448                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1449                                         "unavalable extended metadata register");
1450                 if (reg == REG_B)
1451                         return rte_flow_error_set(error, ENOTSUP,
1452                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1453                                           "match on reg_b "
1454                                           "isn't supported");
1455                 if (reg != REG_A)
1456                         nic_mask.data = priv->sh->dv_meta_mask;
1457         } else if (attr->transfer) {
1458                 return rte_flow_error_set(error, ENOTSUP,
1459                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1460                                         "extended metadata feature "
1461                                         "should be enabled when "
1462                                         "meta item is requested "
1463                                         "with e-switch mode ");
1464         }
1465         if (!mask)
1466                 mask = &rte_flow_item_meta_mask;
1467         if (!mask->data)
1468                 return rte_flow_error_set(error, EINVAL,
1469                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1470                                         "mask cannot be zero");
1471
1472         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1473                                         (const uint8_t *)&nic_mask,
1474                                         sizeof(struct rte_flow_item_meta),
1475                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1476         return ret;
1477 }
1478
1479 /**
1480  * Validate TAG item.
1481  *
1482  * @param[in] dev
1483  *   Pointer to the rte_eth_dev structure.
1484  * @param[in] item
1485  *   Item specification.
1486  * @param[in] attr
1487  *   Attributes of flow that includes this item.
1488  * @param[out] error
1489  *   Pointer to error structure.
1490  *
1491  * @return
1492  *   0 on success, a negative errno value otherwise and rte_errno is set.
1493  */
1494 static int
1495 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1496                           const struct rte_flow_item *item,
1497                           const struct rte_flow_attr *attr __rte_unused,
1498                           struct rte_flow_error *error)
1499 {
1500         const struct rte_flow_item_tag *spec = item->spec;
1501         const struct rte_flow_item_tag *mask = item->mask;
1502         const struct rte_flow_item_tag nic_mask = {
1503                 .data = RTE_BE32(UINT32_MAX),
1504                 .index = 0xff,
1505         };
1506         int ret;
1507
1508         if (!mlx5_flow_ext_mreg_supported(dev))
1509                 return rte_flow_error_set(error, ENOTSUP,
1510                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1511                                           "extensive metadata register"
1512                                           " isn't supported");
1513         if (!spec)
1514                 return rte_flow_error_set(error, EINVAL,
1515                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1516                                           item->spec,
1517                                           "data cannot be empty");
1518         if (!mask)
1519                 mask = &rte_flow_item_tag_mask;
1520         if (!mask->data)
1521                 return rte_flow_error_set(error, EINVAL,
1522                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1523                                         "mask cannot be zero");
1524
1525         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1526                                         (const uint8_t *)&nic_mask,
1527                                         sizeof(struct rte_flow_item_tag),
1528                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1529         if (ret < 0)
1530                 return ret;
1531         if (mask->index != 0xff)
1532                 return rte_flow_error_set(error, EINVAL,
1533                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1534                                           "partial mask for tag index"
1535                                           " is not supported");
1536         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1537         if (ret < 0)
1538                 return ret;
1539         MLX5_ASSERT(ret != REG_NON);
1540         return 0;
1541 }
1542
1543 /**
1544  * Validate vport item.
1545  *
1546  * @param[in] dev
1547  *   Pointer to the rte_eth_dev structure.
1548  * @param[in] item
1549  *   Item specification.
1550  * @param[in] attr
1551  *   Attributes of flow that includes this item.
1552  * @param[in] item_flags
1553  *   Bit-fields that holds the items detected until now.
1554  * @param[out] error
1555  *   Pointer to error structure.
1556  *
1557  * @return
1558  *   0 on success, a negative errno value otherwise and rte_errno is set.
1559  */
1560 static int
1561 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1562                               const struct rte_flow_item *item,
1563                               const struct rte_flow_attr *attr,
1564                               uint64_t item_flags,
1565                               struct rte_flow_error *error)
1566 {
1567         const struct rte_flow_item_port_id *spec = item->spec;
1568         const struct rte_flow_item_port_id *mask = item->mask;
1569         const struct rte_flow_item_port_id switch_mask = {
1570                         .id = 0xffffffff,
1571         };
1572         struct mlx5_priv *esw_priv;
1573         struct mlx5_priv *dev_priv;
1574         int ret;
1575
1576         if (!attr->transfer)
1577                 return rte_flow_error_set(error, EINVAL,
1578                                           RTE_FLOW_ERROR_TYPE_ITEM,
1579                                           NULL,
1580                                           "match on port id is valid only"
1581                                           " when transfer flag is enabled");
1582         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1583                 return rte_flow_error_set(error, ENOTSUP,
1584                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1585                                           "multiple source ports are not"
1586                                           " supported");
1587         if (!mask)
1588                 mask = &switch_mask;
1589         if (mask->id != 0xffffffff)
1590                 return rte_flow_error_set(error, ENOTSUP,
1591                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1592                                            mask,
1593                                            "no support for partial mask on"
1594                                            " \"id\" field");
1595         ret = mlx5_flow_item_acceptable
1596                                 (item, (const uint8_t *)mask,
1597                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1598                                  sizeof(struct rte_flow_item_port_id),
1599                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1600         if (ret)
1601                 return ret;
1602         if (!spec)
1603                 return 0;
1604         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1605         if (!esw_priv)
1606                 return rte_flow_error_set(error, rte_errno,
1607                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1608                                           "failed to obtain E-Switch info for"
1609                                           " port");
1610         dev_priv = mlx5_dev_to_eswitch_info(dev);
1611         if (!dev_priv)
1612                 return rte_flow_error_set(error, rte_errno,
1613                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1614                                           NULL,
1615                                           "failed to obtain E-Switch info");
1616         if (esw_priv->domain_id != dev_priv->domain_id)
1617                 return rte_flow_error_set(error, EINVAL,
1618                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1619                                           "cannot match on a port from a"
1620                                           " different E-Switch");
1621         return 0;
1622 }
1623
1624 /**
1625  * Validate VLAN item.
1626  *
1627  * @param[in] item
1628  *   Item specification.
1629  * @param[in] item_flags
1630  *   Bit-fields that holds the items detected until now.
1631  * @param[in] dev
1632  *   Ethernet device flow is being created on.
1633  * @param[out] error
1634  *   Pointer to error structure.
1635  *
1636  * @return
1637  *   0 on success, a negative errno value otherwise and rte_errno is set.
1638  */
1639 static int
1640 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1641                            uint64_t item_flags,
1642                            struct rte_eth_dev *dev,
1643                            struct rte_flow_error *error)
1644 {
1645         const struct rte_flow_item_vlan *mask = item->mask;
1646         const struct rte_flow_item_vlan nic_mask = {
1647                 .tci = RTE_BE16(UINT16_MAX),
1648                 .inner_type = RTE_BE16(UINT16_MAX),
1649                 .has_more_vlan = 1,
1650         };
1651         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1652         int ret;
1653         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1654                                         MLX5_FLOW_LAYER_INNER_L4) :
1655                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1656                                         MLX5_FLOW_LAYER_OUTER_L4);
1657         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1658                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1659
1660         if (item_flags & vlanm)
1661                 return rte_flow_error_set(error, EINVAL,
1662                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1663                                           "multiple VLAN layers not supported");
1664         else if ((item_flags & l34m) != 0)
1665                 return rte_flow_error_set(error, EINVAL,
1666                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1667                                           "VLAN cannot follow L3/L4 layer");
1668         if (!mask)
1669                 mask = &rte_flow_item_vlan_mask;
1670         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1671                                         (const uint8_t *)&nic_mask,
1672                                         sizeof(struct rte_flow_item_vlan),
1673                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1674         if (ret)
1675                 return ret;
1676         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1677                 struct mlx5_priv *priv = dev->data->dev_private;
1678
1679                 if (priv->vmwa_context) {
1680                         /*
1681                          * Non-NULL context means we have a virtual machine
1682                          * and SR-IOV enabled, we have to create VLAN interface
1683                          * to make hypervisor to setup E-Switch vport
1684                          * context correctly. We avoid creating the multiple
1685                          * VLAN interfaces, so we cannot support VLAN tag mask.
1686                          */
1687                         return rte_flow_error_set(error, EINVAL,
1688                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1689                                                   item,
1690                                                   "VLAN tag mask is not"
1691                                                   " supported in virtual"
1692                                                   " environment");
1693                 }
1694         }
1695         return 0;
1696 }
1697
1698 /*
1699  * GTP flags are contained in 1 byte of the format:
1700  * -------------------------------------------
1701  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1702  * |-----------------------------------------|
1703  * | value | Version | PT | Res | E | S | PN |
1704  * -------------------------------------------
1705  *
1706  * Matching is supported only for GTP flags E, S, PN.
1707  */
1708 #define MLX5_GTP_FLAGS_MASK     0x07
1709
1710 /**
1711  * Validate GTP item.
1712  *
1713  * @param[in] dev
1714  *   Pointer to the rte_eth_dev structure.
1715  * @param[in] item
1716  *   Item specification.
1717  * @param[in] item_flags
1718  *   Bit-fields that holds the items detected until now.
1719  * @param[out] error
1720  *   Pointer to error structure.
1721  *
1722  * @return
1723  *   0 on success, a negative errno value otherwise and rte_errno is set.
1724  */
1725 static int
1726 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1727                           const struct rte_flow_item *item,
1728                           uint64_t item_flags,
1729                           struct rte_flow_error *error)
1730 {
1731         struct mlx5_priv *priv = dev->data->dev_private;
1732         const struct rte_flow_item_gtp *spec = item->spec;
1733         const struct rte_flow_item_gtp *mask = item->mask;
1734         const struct rte_flow_item_gtp nic_mask = {
1735                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1736                 .msg_type = 0xff,
1737                 .teid = RTE_BE32(0xffffffff),
1738         };
1739
1740         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1741                 return rte_flow_error_set(error, ENOTSUP,
1742                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1743                                           "GTP support is not enabled");
1744         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1745                 return rte_flow_error_set(error, ENOTSUP,
1746                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1747                                           "multiple tunnel layers not"
1748                                           " supported");
1749         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1750                 return rte_flow_error_set(error, EINVAL,
1751                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1752                                           "no outer UDP layer found");
1753         if (!mask)
1754                 mask = &rte_flow_item_gtp_mask;
1755         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1756                 return rte_flow_error_set(error, ENOTSUP,
1757                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1758                                           "Match is supported for GTP"
1759                                           " flags only");
1760         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1761                                          (const uint8_t *)&nic_mask,
1762                                          sizeof(struct rte_flow_item_gtp),
1763                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1764 }
1765
1766 /**
1767  * Validate GTP PSC item.
1768  *
1769  * @param[in] item
1770  *   Item specification.
1771  * @param[in] last_item
1772  *   Previous validated item in the pattern items.
1773  * @param[in] gtp_item
1774  *   Previous GTP item specification.
1775  * @param[in] attr
1776  *   Pointer to flow attributes.
1777  * @param[out] error
1778  *   Pointer to error structure.
1779  *
1780  * @return
1781  *   0 on success, a negative errno value otherwise and rte_errno is set.
1782  */
1783 static int
1784 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
1785                               uint64_t last_item,
1786                               const struct rte_flow_item *gtp_item,
1787                               const struct rte_flow_attr *attr,
1788                               struct rte_flow_error *error)
1789 {
1790         const struct rte_flow_item_gtp *gtp_spec;
1791         const struct rte_flow_item_gtp *gtp_mask;
1792         const struct rte_flow_item_gtp_psc *spec;
1793         const struct rte_flow_item_gtp_psc *mask;
1794         const struct rte_flow_item_gtp_psc nic_mask = {
1795                 .pdu_type = 0xFF,
1796                 .qfi = 0xFF,
1797         };
1798
1799         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
1800                 return rte_flow_error_set
1801                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
1802                          "GTP PSC item must be preceded with GTP item");
1803         gtp_spec = gtp_item->spec;
1804         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
1805         /* GTP spec and E flag is requested to match zero. */
1806         if (gtp_spec &&
1807                 (gtp_mask->v_pt_rsv_flags &
1808                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
1809                 return rte_flow_error_set
1810                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
1811                          "GTP E flag must be 1 to match GTP PSC");
1812         /* Check the flow is not created in group zero. */
1813         if (!attr->transfer && !attr->group)
1814                 return rte_flow_error_set
1815                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1816                          "GTP PSC is not supported for group 0");
1817         /* GTP spec is here and E flag is requested to match zero. */
1818         if (!item->spec)
1819                 return 0;
1820         spec = item->spec;
1821         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
1822         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
1823                 return rte_flow_error_set
1824                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
1825                          "PDU type should be smaller than 16");
1826         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1827                                          (const uint8_t *)&nic_mask,
1828                                          sizeof(struct rte_flow_item_gtp_psc),
1829                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1830 }
1831
1832 /**
1833  * Validate IPV4 item.
1834  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1835  * add specific validation of fragment_offset field,
1836  *
1837  * @param[in] item
1838  *   Item specification.
1839  * @param[in] item_flags
1840  *   Bit-fields that holds the items detected until now.
1841  * @param[out] error
1842  *   Pointer to error structure.
1843  *
1844  * @return
1845  *   0 on success, a negative errno value otherwise and rte_errno is set.
1846  */
1847 static int
1848 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1849                            uint64_t item_flags,
1850                            uint64_t last_item,
1851                            uint16_t ether_type,
1852                            struct rte_flow_error *error)
1853 {
1854         int ret;
1855         const struct rte_flow_item_ipv4 *spec = item->spec;
1856         const struct rte_flow_item_ipv4 *last = item->last;
1857         const struct rte_flow_item_ipv4 *mask = item->mask;
1858         rte_be16_t fragment_offset_spec = 0;
1859         rte_be16_t fragment_offset_last = 0;
1860         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1861                 .hdr = {
1862                         .src_addr = RTE_BE32(0xffffffff),
1863                         .dst_addr = RTE_BE32(0xffffffff),
1864                         .type_of_service = 0xff,
1865                         .fragment_offset = RTE_BE16(0xffff),
1866                         .next_proto_id = 0xff,
1867                         .time_to_live = 0xff,
1868                 },
1869         };
1870
1871         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1872                                            ether_type, &nic_ipv4_mask,
1873                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1874         if (ret < 0)
1875                 return ret;
1876         if (spec && mask)
1877                 fragment_offset_spec = spec->hdr.fragment_offset &
1878                                        mask->hdr.fragment_offset;
1879         if (!fragment_offset_spec)
1880                 return 0;
1881         /*
1882          * spec and mask are valid, enforce using full mask to make sure the
1883          * complete value is used correctly.
1884          */
1885         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1886                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1887                 return rte_flow_error_set(error, EINVAL,
1888                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1889                                           item, "must use full mask for"
1890                                           " fragment_offset");
1891         /*
1892          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1893          * indicating this is 1st fragment of fragmented packet.
1894          * This is not yet supported in MLX5, return appropriate error message.
1895          */
1896         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1897                 return rte_flow_error_set(error, ENOTSUP,
1898                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1899                                           "match on first fragment not "
1900                                           "supported");
1901         if (fragment_offset_spec && !last)
1902                 return rte_flow_error_set(error, ENOTSUP,
1903                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1904                                           "specified value not supported");
1905         /* spec and last are valid, validate the specified range. */
1906         fragment_offset_last = last->hdr.fragment_offset &
1907                                mask->hdr.fragment_offset;
1908         /*
1909          * Match on fragment_offset spec 0x2001 and last 0x3fff
1910          * means MF is 1 and frag-offset is > 0.
1911          * This packet is fragment 2nd and onward, excluding last.
1912          * This is not yet supported in MLX5, return appropriate
1913          * error message.
1914          */
1915         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1916             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1917                 return rte_flow_error_set(error, ENOTSUP,
1918                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1919                                           last, "match on following "
1920                                           "fragments not supported");
1921         /*
1922          * Match on fragment_offset spec 0x0001 and last 0x1fff
1923          * means MF is 0 and frag-offset is > 0.
1924          * This packet is last fragment of fragmented packet.
1925          * This is not yet supported in MLX5, return appropriate
1926          * error message.
1927          */
1928         if (fragment_offset_spec == RTE_BE16(1) &&
1929             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1930                 return rte_flow_error_set(error, ENOTSUP,
1931                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1932                                           last, "match on last "
1933                                           "fragment not supported");
1934         /*
1935          * Match on fragment_offset spec 0x0001 and last 0x3fff
1936          * means MF and/or frag-offset is not 0.
1937          * This is a fragmented packet.
1938          * Other range values are invalid and rejected.
1939          */
1940         if (!(fragment_offset_spec == RTE_BE16(1) &&
1941               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1942                 return rte_flow_error_set(error, ENOTSUP,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1944                                           "specified range not supported");
1945         return 0;
1946 }
1947
1948 /**
1949  * Validate IPV6 fragment extension item.
1950  *
1951  * @param[in] item
1952  *   Item specification.
1953  * @param[in] item_flags
1954  *   Bit-fields that holds the items detected until now.
1955  * @param[out] error
1956  *   Pointer to error structure.
1957  *
1958  * @return
1959  *   0 on success, a negative errno value otherwise and rte_errno is set.
1960  */
1961 static int
1962 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1963                                     uint64_t item_flags,
1964                                     struct rte_flow_error *error)
1965 {
1966         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1967         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1968         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1969         rte_be16_t frag_data_spec = 0;
1970         rte_be16_t frag_data_last = 0;
1971         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1972         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1973                                       MLX5_FLOW_LAYER_OUTER_L4;
1974         int ret = 0;
1975         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1976                 .hdr = {
1977                         .next_header = 0xff,
1978                         .frag_data = RTE_BE16(0xffff),
1979                 },
1980         };
1981
1982         if (item_flags & l4m)
1983                 return rte_flow_error_set(error, EINVAL,
1984                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1985                                           "ipv6 fragment extension item cannot "
1986                                           "follow L4 item.");
1987         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1988             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1989                 return rte_flow_error_set(error, EINVAL,
1990                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1991                                           "ipv6 fragment extension item must "
1992                                           "follow ipv6 item");
1993         if (spec && mask)
1994                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1995         if (!frag_data_spec)
1996                 return 0;
1997         /*
1998          * spec and mask are valid, enforce using full mask to make sure the
1999          * complete value is used correctly.
2000          */
2001         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
2002                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2003                 return rte_flow_error_set(error, EINVAL,
2004                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2005                                           item, "must use full mask for"
2006                                           " frag_data");
2007         /*
2008          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2009          * This is 1st fragment of fragmented packet.
2010          */
2011         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2012                 return rte_flow_error_set(error, ENOTSUP,
2013                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2014                                           "match on first fragment not "
2015                                           "supported");
2016         if (frag_data_spec && !last)
2017                 return rte_flow_error_set(error, EINVAL,
2018                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2019                                           "specified value not supported");
2020         ret = mlx5_flow_item_acceptable
2021                                 (item, (const uint8_t *)mask,
2022                                  (const uint8_t *)&nic_mask,
2023                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2024                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2025         if (ret)
2026                 return ret;
2027         /* spec and last are valid, validate the specified range. */
2028         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2029         /*
2030          * Match on frag_data spec 0x0009 and last 0xfff9
2031          * means M is 1 and frag-offset is > 0.
2032          * This packet is fragment 2nd and onward, excluding last.
2033          * This is not yet supported in MLX5, return appropriate
2034          * error message.
2035          */
2036         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2037                                        RTE_IPV6_EHDR_MF_MASK) &&
2038             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2039                 return rte_flow_error_set(error, ENOTSUP,
2040                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2041                                           last, "match on following "
2042                                           "fragments not supported");
2043         /*
2044          * Match on frag_data spec 0x0008 and last 0xfff8
2045          * means M is 0 and frag-offset is > 0.
2046          * This packet is last fragment of fragmented packet.
2047          * This is not yet supported in MLX5, return appropriate
2048          * error message.
2049          */
2050         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2051             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2052                 return rte_flow_error_set(error, ENOTSUP,
2053                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2054                                           last, "match on last "
2055                                           "fragment not supported");
2056         /* Other range values are invalid and rejected. */
2057         return rte_flow_error_set(error, EINVAL,
2058                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2059                                   "specified range not supported");
2060 }
2061
2062 /**
2063  * Validate the pop VLAN action.
2064  *
2065  * @param[in] dev
2066  *   Pointer to the rte_eth_dev structure.
2067  * @param[in] action_flags
2068  *   Holds the actions detected until now.
2069  * @param[in] action
2070  *   Pointer to the pop vlan action.
2071  * @param[in] item_flags
2072  *   The items found in this flow rule.
2073  * @param[in] attr
2074  *   Pointer to flow attributes.
2075  * @param[out] error
2076  *   Pointer to error structure.
2077  *
2078  * @return
2079  *   0 on success, a negative errno value otherwise and rte_errno is set.
2080  */
2081 static int
2082 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2083                                  uint64_t action_flags,
2084                                  const struct rte_flow_action *action,
2085                                  uint64_t item_flags,
2086                                  const struct rte_flow_attr *attr,
2087                                  struct rte_flow_error *error)
2088 {
2089         const struct mlx5_priv *priv = dev->data->dev_private;
2090
2091         (void)action;
2092         (void)attr;
2093         if (!priv->sh->pop_vlan_action)
2094                 return rte_flow_error_set(error, ENOTSUP,
2095                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2096                                           NULL,
2097                                           "pop vlan action is not supported");
2098         if (attr->egress)
2099                 return rte_flow_error_set(error, ENOTSUP,
2100                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2101                                           NULL,
2102                                           "pop vlan action not supported for "
2103                                           "egress");
2104         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2105                 return rte_flow_error_set(error, ENOTSUP,
2106                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2107                                           "no support for multiple VLAN "
2108                                           "actions");
2109         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2110         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2111             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2112                 return rte_flow_error_set(error, ENOTSUP,
2113                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2114                                           NULL,
2115                                           "cannot pop vlan after decap without "
2116                                           "match on inner vlan in the flow");
2117         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2118         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2119             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2120                 return rte_flow_error_set(error, ENOTSUP,
2121                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2122                                           NULL,
2123                                           "cannot pop vlan without a "
2124                                           "match on (outer) vlan in the flow");
2125         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2126                 return rte_flow_error_set(error, EINVAL,
2127                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2128                                           "wrong action order, port_id should "
2129                                           "be after pop VLAN action");
2130         if (!attr->transfer && priv->representor)
2131                 return rte_flow_error_set(error, ENOTSUP,
2132                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2133                                           "pop vlan action for VF representor "
2134                                           "not supported on NIC table");
2135         return 0;
2136 }
2137
2138 /**
2139  * Get VLAN default info from vlan match info.
2140  *
2141  * @param[in] items
2142  *   the list of item specifications.
2143  * @param[out] vlan
2144  *   pointer VLAN info to fill to.
2145  *
2146  * @return
2147  *   0 on success, a negative errno value otherwise and rte_errno is set.
2148  */
2149 static void
2150 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2151                                   struct rte_vlan_hdr *vlan)
2152 {
2153         const struct rte_flow_item_vlan nic_mask = {
2154                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2155                                 MLX5DV_FLOW_VLAN_VID_MASK),
2156                 .inner_type = RTE_BE16(0xffff),
2157         };
2158
2159         if (items == NULL)
2160                 return;
2161         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2162                 int type = items->type;
2163
2164                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2165                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2166                         break;
2167         }
2168         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2169                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2170                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2171
2172                 /* If VLAN item in pattern doesn't contain data, return here. */
2173                 if (!vlan_v)
2174                         return;
2175                 if (!vlan_m)
2176                         vlan_m = &nic_mask;
2177                 /* Only full match values are accepted */
2178                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2179                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2180                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2181                         vlan->vlan_tci |=
2182                                 rte_be_to_cpu_16(vlan_v->tci &
2183                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2184                 }
2185                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2186                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2187                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2188                         vlan->vlan_tci |=
2189                                 rte_be_to_cpu_16(vlan_v->tci &
2190                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2191                 }
2192                 if (vlan_m->inner_type == nic_mask.inner_type)
2193                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2194                                                            vlan_m->inner_type);
2195         }
2196 }
2197
2198 /**
2199  * Validate the push VLAN action.
2200  *
2201  * @param[in] dev
2202  *   Pointer to the rte_eth_dev structure.
2203  * @param[in] action_flags
2204  *   Holds the actions detected until now.
2205  * @param[in] item_flags
2206  *   The items found in this flow rule.
2207  * @param[in] action
2208  *   Pointer to the action structure.
2209  * @param[in] attr
2210  *   Pointer to flow attributes
2211  * @param[out] error
2212  *   Pointer to error structure.
2213  *
2214  * @return
2215  *   0 on success, a negative errno value otherwise and rte_errno is set.
2216  */
2217 static int
2218 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2219                                   uint64_t action_flags,
2220                                   const struct rte_flow_item_vlan *vlan_m,
2221                                   const struct rte_flow_action *action,
2222                                   const struct rte_flow_attr *attr,
2223                                   struct rte_flow_error *error)
2224 {
2225         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2226         const struct mlx5_priv *priv = dev->data->dev_private;
2227
2228         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2229             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2230                 return rte_flow_error_set(error, EINVAL,
2231                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2232                                           "invalid vlan ethertype");
2233         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2234                 return rte_flow_error_set(error, EINVAL,
2235                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2236                                           "wrong action order, port_id should "
2237                                           "be after push VLAN");
2238         if (!attr->transfer && priv->representor)
2239                 return rte_flow_error_set(error, ENOTSUP,
2240                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2241                                           "push vlan action for VF representor "
2242                                           "not supported on NIC table");
2243         if (vlan_m &&
2244             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2245             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2246                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2247             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2248             !(mlx5_flow_find_action
2249                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2250                 return rte_flow_error_set(error, EINVAL,
2251                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2252                                           "not full match mask on VLAN PCP and "
2253                                           "there is no of_set_vlan_pcp action, "
2254                                           "push VLAN action cannot figure out "
2255                                           "PCP value");
2256         if (vlan_m &&
2257             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2258             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2259                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2260             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2261             !(mlx5_flow_find_action
2262                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2263                 return rte_flow_error_set(error, EINVAL,
2264                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2265                                           "not full match mask on VLAN VID and "
2266                                           "there is no of_set_vlan_vid action, "
2267                                           "push VLAN action cannot figure out "
2268                                           "VID value");
2269         (void)attr;
2270         return 0;
2271 }
2272
2273 /**
2274  * Validate the set VLAN PCP.
2275  *
2276  * @param[in] action_flags
2277  *   Holds the actions detected until now.
2278  * @param[in] actions
2279  *   Pointer to the list of actions remaining in the flow rule.
2280  * @param[out] error
2281  *   Pointer to error structure.
2282  *
2283  * @return
2284  *   0 on success, a negative errno value otherwise and rte_errno is set.
2285  */
2286 static int
2287 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2288                                      const struct rte_flow_action actions[],
2289                                      struct rte_flow_error *error)
2290 {
2291         const struct rte_flow_action *action = actions;
2292         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2293
2294         if (conf->vlan_pcp > 7)
2295                 return rte_flow_error_set(error, EINVAL,
2296                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2297                                           "VLAN PCP value is too big");
2298         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2299                 return rte_flow_error_set(error, ENOTSUP,
2300                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2301                                           "set VLAN PCP action must follow "
2302                                           "the push VLAN action");
2303         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2304                 return rte_flow_error_set(error, ENOTSUP,
2305                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2306                                           "Multiple VLAN PCP modification are "
2307                                           "not supported");
2308         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2309                 return rte_flow_error_set(error, EINVAL,
2310                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2311                                           "wrong action order, port_id should "
2312                                           "be after set VLAN PCP");
2313         return 0;
2314 }
2315
2316 /**
2317  * Validate the set VLAN VID.
2318  *
2319  * @param[in] item_flags
2320  *   Holds the items detected in this rule.
2321  * @param[in] action_flags
2322  *   Holds the actions detected until now.
2323  * @param[in] actions
2324  *   Pointer to the list of actions remaining in the flow rule.
2325  * @param[out] error
2326  *   Pointer to error structure.
2327  *
2328  * @return
2329  *   0 on success, a negative errno value otherwise and rte_errno is set.
2330  */
2331 static int
2332 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2333                                      uint64_t action_flags,
2334                                      const struct rte_flow_action actions[],
2335                                      struct rte_flow_error *error)
2336 {
2337         const struct rte_flow_action *action = actions;
2338         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2339
2340         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2341                 return rte_flow_error_set(error, EINVAL,
2342                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2343                                           "VLAN VID value is too big");
2344         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2345             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2346                 return rte_flow_error_set(error, ENOTSUP,
2347                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2348                                           "set VLAN VID action must follow push"
2349                                           " VLAN action or match on VLAN item");
2350         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2351                 return rte_flow_error_set(error, ENOTSUP,
2352                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2353                                           "Multiple VLAN VID modifications are "
2354                                           "not supported");
2355         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2356                 return rte_flow_error_set(error, EINVAL,
2357                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2358                                           "wrong action order, port_id should "
2359                                           "be after set VLAN VID");
2360         return 0;
2361 }
2362
2363 /*
2364  * Validate the FLAG action.
2365  *
2366  * @param[in] dev
2367  *   Pointer to the rte_eth_dev structure.
2368  * @param[in] action_flags
2369  *   Holds the actions detected until now.
2370  * @param[in] attr
2371  *   Pointer to flow attributes
2372  * @param[out] error
2373  *   Pointer to error structure.
2374  *
2375  * @return
2376  *   0 on success, a negative errno value otherwise and rte_errno is set.
2377  */
2378 static int
2379 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2380                              uint64_t action_flags,
2381                              const struct rte_flow_attr *attr,
2382                              struct rte_flow_error *error)
2383 {
2384         struct mlx5_priv *priv = dev->data->dev_private;
2385         struct mlx5_dev_config *config = &priv->config;
2386         int ret;
2387
2388         /* Fall back if no extended metadata register support. */
2389         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2390                 return mlx5_flow_validate_action_flag(action_flags, attr,
2391                                                       error);
2392         /* Extensive metadata mode requires registers. */
2393         if (!mlx5_flow_ext_mreg_supported(dev))
2394                 return rte_flow_error_set(error, ENOTSUP,
2395                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2396                                           "no metadata registers "
2397                                           "to support flag action");
2398         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2399                 return rte_flow_error_set(error, ENOTSUP,
2400                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2401                                           "extended metadata register"
2402                                           " isn't available");
2403         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2404         if (ret < 0)
2405                 return ret;
2406         MLX5_ASSERT(ret > 0);
2407         if (action_flags & MLX5_FLOW_ACTION_MARK)
2408                 return rte_flow_error_set(error, EINVAL,
2409                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2410                                           "can't mark and flag in same flow");
2411         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2412                 return rte_flow_error_set(error, EINVAL,
2413                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2414                                           "can't have 2 flag"
2415                                           " actions in same flow");
2416         return 0;
2417 }
2418
2419 /**
2420  * Validate MARK action.
2421  *
2422  * @param[in] dev
2423  *   Pointer to the rte_eth_dev structure.
2424  * @param[in] action
2425  *   Pointer to action.
2426  * @param[in] action_flags
2427  *   Holds the actions detected until now.
2428  * @param[in] attr
2429  *   Pointer to flow attributes
2430  * @param[out] error
2431  *   Pointer to error structure.
2432  *
2433  * @return
2434  *   0 on success, a negative errno value otherwise and rte_errno is set.
2435  */
2436 static int
2437 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2438                              const struct rte_flow_action *action,
2439                              uint64_t action_flags,
2440                              const struct rte_flow_attr *attr,
2441                              struct rte_flow_error *error)
2442 {
2443         struct mlx5_priv *priv = dev->data->dev_private;
2444         struct mlx5_dev_config *config = &priv->config;
2445         const struct rte_flow_action_mark *mark = action->conf;
2446         int ret;
2447
2448         /* Fall back if no extended metadata register support. */
2449         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2450                 return mlx5_flow_validate_action_mark(action, action_flags,
2451                                                       attr, error);
2452         /* Extensive metadata mode requires registers. */
2453         if (!mlx5_flow_ext_mreg_supported(dev))
2454                 return rte_flow_error_set(error, ENOTSUP,
2455                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2456                                           "no metadata registers "
2457                                           "to support mark action");
2458         if (!priv->sh->dv_mark_mask)
2459                 return rte_flow_error_set(error, ENOTSUP,
2460                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2461                                           "extended metadata register"
2462                                           " isn't available");
2463         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2464         if (ret < 0)
2465                 return ret;
2466         MLX5_ASSERT(ret > 0);
2467         if (!mark)
2468                 return rte_flow_error_set(error, EINVAL,
2469                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2470                                           "configuration cannot be null");
2471         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2472                 return rte_flow_error_set(error, EINVAL,
2473                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2474                                           &mark->id,
2475                                           "mark id exceeds the limit");
2476         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2477                 return rte_flow_error_set(error, EINVAL,
2478                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2479                                           "can't flag and mark in same flow");
2480         if (action_flags & MLX5_FLOW_ACTION_MARK)
2481                 return rte_flow_error_set(error, EINVAL,
2482                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2483                                           "can't have 2 mark actions in same"
2484                                           " flow");
2485         return 0;
2486 }
2487
2488 /**
2489  * Validate SET_META action.
2490  *
2491  * @param[in] dev
2492  *   Pointer to the rte_eth_dev structure.
2493  * @param[in] action
2494  *   Pointer to the action structure.
2495  * @param[in] action_flags
2496  *   Holds the actions detected until now.
2497  * @param[in] attr
2498  *   Pointer to flow attributes
2499  * @param[out] error
2500  *   Pointer to error structure.
2501  *
2502  * @return
2503  *   0 on success, a negative errno value otherwise and rte_errno is set.
2504  */
2505 static int
2506 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2507                                  const struct rte_flow_action *action,
2508                                  uint64_t action_flags __rte_unused,
2509                                  const struct rte_flow_attr *attr,
2510                                  struct rte_flow_error *error)
2511 {
2512         const struct rte_flow_action_set_meta *conf;
2513         uint32_t nic_mask = UINT32_MAX;
2514         int reg;
2515
2516         if (!mlx5_flow_ext_mreg_supported(dev))
2517                 return rte_flow_error_set(error, ENOTSUP,
2518                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2519                                           "extended metadata register"
2520                                           " isn't supported");
2521         reg = flow_dv_get_metadata_reg(dev, attr, error);
2522         if (reg < 0)
2523                 return reg;
2524         if (reg == REG_NON)
2525                 return rte_flow_error_set(error, ENOTSUP,
2526                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2527                                           "unavalable extended metadata register");
2528         if (reg != REG_A && reg != REG_B) {
2529                 struct mlx5_priv *priv = dev->data->dev_private;
2530
2531                 nic_mask = priv->sh->dv_meta_mask;
2532         }
2533         if (!(action->conf))
2534                 return rte_flow_error_set(error, EINVAL,
2535                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2536                                           "configuration cannot be null");
2537         conf = (const struct rte_flow_action_set_meta *)action->conf;
2538         if (!conf->mask)
2539                 return rte_flow_error_set(error, EINVAL,
2540                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2541                                           "zero mask doesn't have any effect");
2542         if (conf->mask & ~nic_mask)
2543                 return rte_flow_error_set(error, EINVAL,
2544                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2545                                           "meta data must be within reg C0");
2546         return 0;
2547 }
2548
2549 /**
2550  * Validate SET_TAG action.
2551  *
2552  * @param[in] dev
2553  *   Pointer to the rte_eth_dev structure.
2554  * @param[in] action
2555  *   Pointer to the action structure.
2556  * @param[in] action_flags
2557  *   Holds the actions detected until now.
2558  * @param[in] attr
2559  *   Pointer to flow attributes
2560  * @param[out] error
2561  *   Pointer to error structure.
2562  *
2563  * @return
2564  *   0 on success, a negative errno value otherwise and rte_errno is set.
2565  */
2566 static int
2567 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2568                                 const struct rte_flow_action *action,
2569                                 uint64_t action_flags,
2570                                 const struct rte_flow_attr *attr,
2571                                 struct rte_flow_error *error)
2572 {
2573         const struct rte_flow_action_set_tag *conf;
2574         const uint64_t terminal_action_flags =
2575                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2576                 MLX5_FLOW_ACTION_RSS;
2577         int ret;
2578
2579         if (!mlx5_flow_ext_mreg_supported(dev))
2580                 return rte_flow_error_set(error, ENOTSUP,
2581                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2582                                           "extensive metadata register"
2583                                           " isn't supported");
2584         if (!(action->conf))
2585                 return rte_flow_error_set(error, EINVAL,
2586                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2587                                           "configuration cannot be null");
2588         conf = (const struct rte_flow_action_set_tag *)action->conf;
2589         if (!conf->mask)
2590                 return rte_flow_error_set(error, EINVAL,
2591                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2592                                           "zero mask doesn't have any effect");
2593         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2594         if (ret < 0)
2595                 return ret;
2596         if (!attr->transfer && attr->ingress &&
2597             (action_flags & terminal_action_flags))
2598                 return rte_flow_error_set(error, EINVAL,
2599                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2600                                           "set_tag has no effect"
2601                                           " with terminal actions");
2602         return 0;
2603 }
2604
2605 /**
2606  * Validate count action.
2607  *
2608  * @param[in] dev
2609  *   Pointer to rte_eth_dev structure.
2610  * @param[out] error
2611  *   Pointer to error structure.
2612  *
2613  * @return
2614  *   0 on success, a negative errno value otherwise and rte_errno is set.
2615  */
2616 static int
2617 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2618                               struct rte_flow_error *error)
2619 {
2620         struct mlx5_priv *priv = dev->data->dev_private;
2621
2622         if (!priv->config.devx)
2623                 goto notsup_err;
2624 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2625         return 0;
2626 #endif
2627 notsup_err:
2628         return rte_flow_error_set
2629                       (error, ENOTSUP,
2630                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2631                        NULL,
2632                        "count action not supported");
2633 }
2634
2635 /**
2636  * Validate the L2 encap action.
2637  *
2638  * @param[in] dev
2639  *   Pointer to the rte_eth_dev structure.
2640  * @param[in] action_flags
2641  *   Holds the actions detected until now.
2642  * @param[in] action
2643  *   Pointer to the action structure.
2644  * @param[in] attr
2645  *   Pointer to flow attributes.
2646  * @param[out] error
2647  *   Pointer to error structure.
2648  *
2649  * @return
2650  *   0 on success, a negative errno value otherwise and rte_errno is set.
2651  */
2652 static int
2653 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2654                                  uint64_t action_flags,
2655                                  const struct rte_flow_action *action,
2656                                  const struct rte_flow_attr *attr,
2657                                  struct rte_flow_error *error)
2658 {
2659         const struct mlx5_priv *priv = dev->data->dev_private;
2660
2661         if (!(action->conf))
2662                 return rte_flow_error_set(error, EINVAL,
2663                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2664                                           "configuration cannot be null");
2665         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2666                 return rte_flow_error_set(error, EINVAL,
2667                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2668                                           "can only have a single encap action "
2669                                           "in a flow");
2670         if (!attr->transfer && priv->representor)
2671                 return rte_flow_error_set(error, ENOTSUP,
2672                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2673                                           "encap action for VF representor "
2674                                           "not supported on NIC table");
2675         return 0;
2676 }
2677
2678 /**
2679  * Validate a decap action.
2680  *
2681  * @param[in] dev
2682  *   Pointer to the rte_eth_dev structure.
2683  * @param[in] action_flags
2684  *   Holds the actions detected until now.
2685  * @param[in] action
2686  *   Pointer to the action structure.
2687  * @param[in] item_flags
2688  *   Holds the items detected.
2689  * @param[in] attr
2690  *   Pointer to flow attributes
2691  * @param[out] error
2692  *   Pointer to error structure.
2693  *
2694  * @return
2695  *   0 on success, a negative errno value otherwise and rte_errno is set.
2696  */
2697 static int
2698 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2699                               uint64_t action_flags,
2700                               const struct rte_flow_action *action,
2701                               const uint64_t item_flags,
2702                               const struct rte_flow_attr *attr,
2703                               struct rte_flow_error *error)
2704 {
2705         const struct mlx5_priv *priv = dev->data->dev_private;
2706
2707         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2708             !priv->config.decap_en)
2709                 return rte_flow_error_set(error, ENOTSUP,
2710                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2711                                           "decap is not enabled");
2712         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2713                 return rte_flow_error_set(error, ENOTSUP,
2714                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2715                                           action_flags &
2716                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2717                                           "have a single decap action" : "decap "
2718                                           "after encap is not supported");
2719         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2720                 return rte_flow_error_set(error, EINVAL,
2721                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2722                                           "can't have decap action after"
2723                                           " modify action");
2724         if (attr->egress)
2725                 return rte_flow_error_set(error, ENOTSUP,
2726                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2727                                           NULL,
2728                                           "decap action not supported for "
2729                                           "egress");
2730         if (!attr->transfer && priv->representor)
2731                 return rte_flow_error_set(error, ENOTSUP,
2732                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2733                                           "decap action for VF representor "
2734                                           "not supported on NIC table");
2735         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
2736             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
2737                 return rte_flow_error_set(error, ENOTSUP,
2738                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2739                                 "VXLAN item should be present for VXLAN decap");
2740         return 0;
2741 }
2742
2743 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2744
2745 /**
2746  * Validate the raw encap and decap actions.
2747  *
2748  * @param[in] dev
2749  *   Pointer to the rte_eth_dev structure.
2750  * @param[in] decap
2751  *   Pointer to the decap action.
2752  * @param[in] encap
2753  *   Pointer to the encap action.
2754  * @param[in] attr
2755  *   Pointer to flow attributes
2756  * @param[in/out] action_flags
2757  *   Holds the actions detected until now.
2758  * @param[out] actions_n
2759  *   pointer to the number of actions counter.
2760  * @param[in] action
2761  *   Pointer to the action structure.
2762  * @param[in] item_flags
2763  *   Holds the items detected.
2764  * @param[out] error
2765  *   Pointer to error structure.
2766  *
2767  * @return
2768  *   0 on success, a negative errno value otherwise and rte_errno is set.
2769  */
2770 static int
2771 flow_dv_validate_action_raw_encap_decap
2772         (struct rte_eth_dev *dev,
2773          const struct rte_flow_action_raw_decap *decap,
2774          const struct rte_flow_action_raw_encap *encap,
2775          const struct rte_flow_attr *attr, uint64_t *action_flags,
2776          int *actions_n, const struct rte_flow_action *action,
2777          uint64_t item_flags, struct rte_flow_error *error)
2778 {
2779         const struct mlx5_priv *priv = dev->data->dev_private;
2780         int ret;
2781
2782         if (encap && (!encap->size || !encap->data))
2783                 return rte_flow_error_set(error, EINVAL,
2784                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2785                                           "raw encap data cannot be empty");
2786         if (decap && encap) {
2787                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2788                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2789                         /* L3 encap. */
2790                         decap = NULL;
2791                 else if (encap->size <=
2792                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2793                            decap->size >
2794                            MLX5_ENCAPSULATION_DECISION_SIZE)
2795                         /* L3 decap. */
2796                         encap = NULL;
2797                 else if (encap->size >
2798                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2799                            decap->size >
2800                            MLX5_ENCAPSULATION_DECISION_SIZE)
2801                         /* 2 L2 actions: encap and decap. */
2802                         ;
2803                 else
2804                         return rte_flow_error_set(error,
2805                                 ENOTSUP,
2806                                 RTE_FLOW_ERROR_TYPE_ACTION,
2807                                 NULL, "unsupported too small "
2808                                 "raw decap and too small raw "
2809                                 "encap combination");
2810         }
2811         if (decap) {
2812                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
2813                                                     item_flags, attr, error);
2814                 if (ret < 0)
2815                         return ret;
2816                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2817                 ++(*actions_n);
2818         }
2819         if (encap) {
2820                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2821                         return rte_flow_error_set(error, ENOTSUP,
2822                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2823                                                   NULL,
2824                                                   "small raw encap size");
2825                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2826                         return rte_flow_error_set(error, EINVAL,
2827                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2828                                                   NULL,
2829                                                   "more than one encap action");
2830                 if (!attr->transfer && priv->representor)
2831                         return rte_flow_error_set
2832                                         (error, ENOTSUP,
2833                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2834                                          "encap action for VF representor "
2835                                          "not supported on NIC table");
2836                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2837                 ++(*actions_n);
2838         }
2839         return 0;
2840 }
2841
2842 /**
2843  * Match encap_decap resource.
2844  *
2845  * @param list
2846  *   Pointer to the hash list.
2847  * @param entry
2848  *   Pointer to exist resource entry object.
2849  * @param key
2850  *   Key of the new entry.
2851  * @param ctx_cb
2852  *   Pointer to new encap_decap resource.
2853  *
2854  * @return
2855  *   0 on matching, none-zero otherwise.
2856  */
2857 int
2858 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2859                              struct mlx5_hlist_entry *entry,
2860                              uint64_t key __rte_unused, void *cb_ctx)
2861 {
2862         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2863         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2864         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2865
2866         cache_resource = container_of(entry,
2867                                       struct mlx5_flow_dv_encap_decap_resource,
2868                                       entry);
2869         if (resource->reformat_type == cache_resource->reformat_type &&
2870             resource->ft_type == cache_resource->ft_type &&
2871             resource->flags == cache_resource->flags &&
2872             resource->size == cache_resource->size &&
2873             !memcmp((const void *)resource->buf,
2874                     (const void *)cache_resource->buf,
2875                     resource->size))
2876                 return 0;
2877         return -1;
2878 }
2879
2880 /**
2881  * Allocate encap_decap resource.
2882  *
2883  * @param list
2884  *   Pointer to the hash list.
2885  * @param entry
2886  *   Pointer to exist resource entry object.
2887  * @param ctx_cb
2888  *   Pointer to new encap_decap resource.
2889  *
2890  * @return
2891  *   0 on matching, none-zero otherwise.
2892  */
2893 struct mlx5_hlist_entry *
2894 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2895                               uint64_t key __rte_unused,
2896                               void *cb_ctx)
2897 {
2898         struct mlx5_dev_ctx_shared *sh = list->ctx;
2899         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2900         struct mlx5dv_dr_domain *domain;
2901         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2902         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2903         uint32_t idx;
2904         int ret;
2905
2906         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2907                 domain = sh->fdb_domain;
2908         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2909                 domain = sh->rx_domain;
2910         else
2911                 domain = sh->tx_domain;
2912         /* Register new encap/decap resource. */
2913         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2914                                        &idx);
2915         if (!cache_resource) {
2916                 rte_flow_error_set(ctx->error, ENOMEM,
2917                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2918                                    "cannot allocate resource memory");
2919                 return NULL;
2920         }
2921         *cache_resource = *resource;
2922         cache_resource->idx = idx;
2923         ret = mlx5_flow_os_create_flow_action_packet_reformat
2924                                         (sh->ctx, domain, cache_resource,
2925                                          &cache_resource->action);
2926         if (ret) {
2927                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2928                 rte_flow_error_set(ctx->error, ENOMEM,
2929                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2930                                    NULL, "cannot create action");
2931                 return NULL;
2932         }
2933
2934         return &cache_resource->entry;
2935 }
2936
2937 /**
2938  * Find existing encap/decap resource or create and register a new one.
2939  *
2940  * @param[in, out] dev
2941  *   Pointer to rte_eth_dev structure.
2942  * @param[in, out] resource
2943  *   Pointer to encap/decap resource.
2944  * @parm[in, out] dev_flow
2945  *   Pointer to the dev_flow.
2946  * @param[out] error
2947  *   pointer to error structure.
2948  *
2949  * @return
2950  *   0 on success otherwise -errno and errno is set.
2951  */
2952 static int
2953 flow_dv_encap_decap_resource_register
2954                         (struct rte_eth_dev *dev,
2955                          struct mlx5_flow_dv_encap_decap_resource *resource,
2956                          struct mlx5_flow *dev_flow,
2957                          struct rte_flow_error *error)
2958 {
2959         struct mlx5_priv *priv = dev->data->dev_private;
2960         struct mlx5_dev_ctx_shared *sh = priv->sh;
2961         struct mlx5_hlist_entry *entry;
2962         union {
2963                 struct {
2964                         uint32_t ft_type:8;
2965                         uint32_t refmt_type:8;
2966                         /*
2967                          * Header reformat actions can be shared between
2968                          * non-root tables. One bit to indicate non-root
2969                          * table or not.
2970                          */
2971                         uint32_t is_root:1;
2972                         uint32_t reserve:15;
2973                 };
2974                 uint32_t v32;
2975         } encap_decap_key = {
2976                 {
2977                         .ft_type = resource->ft_type,
2978                         .refmt_type = resource->reformat_type,
2979                         .is_root = !!dev_flow->dv.group,
2980                         .reserve = 0,
2981                 }
2982         };
2983         struct mlx5_flow_cb_ctx ctx = {
2984                 .error = error,
2985                 .data = resource,
2986         };
2987         uint64_t key64;
2988
2989         resource->flags = dev_flow->dv.group ? 0 : 1;
2990         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
2991                                  sizeof(encap_decap_key.v32), 0);
2992         if (resource->reformat_type !=
2993             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
2994             resource->size)
2995                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
2996         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
2997         if (!entry)
2998                 return -rte_errno;
2999         resource = container_of(entry, typeof(*resource), entry);
3000         dev_flow->dv.encap_decap = resource;
3001         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
3002         return 0;
3003 }
3004
3005 /**
3006  * Find existing table jump resource or create and register a new one.
3007  *
3008  * @param[in, out] dev
3009  *   Pointer to rte_eth_dev structure.
3010  * @param[in, out] tbl
3011  *   Pointer to flow table resource.
3012  * @parm[in, out] dev_flow
3013  *   Pointer to the dev_flow.
3014  * @param[out] error
3015  *   pointer to error structure.
3016  *
3017  * @return
3018  *   0 on success otherwise -errno and errno is set.
3019  */
3020 static int
3021 flow_dv_jump_tbl_resource_register
3022                         (struct rte_eth_dev *dev __rte_unused,
3023                          struct mlx5_flow_tbl_resource *tbl,
3024                          struct mlx5_flow *dev_flow,
3025                          struct rte_flow_error *error __rte_unused)
3026 {
3027         struct mlx5_flow_tbl_data_entry *tbl_data =
3028                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3029
3030         MLX5_ASSERT(tbl);
3031         MLX5_ASSERT(tbl_data->jump.action);
3032         dev_flow->handle->rix_jump = tbl_data->idx;
3033         dev_flow->dv.jump = &tbl_data->jump;
3034         return 0;
3035 }
3036
3037 int
3038 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3039                          struct mlx5_cache_entry *entry, void *cb_ctx)
3040 {
3041         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3042         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3043         struct mlx5_flow_dv_port_id_action_resource *res =
3044                         container_of(entry, typeof(*res), entry);
3045
3046         return ref->port_id != res->port_id;
3047 }
3048
3049 struct mlx5_cache_entry *
3050 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3051                           struct mlx5_cache_entry *entry __rte_unused,
3052                           void *cb_ctx)
3053 {
3054         struct mlx5_dev_ctx_shared *sh = list->ctx;
3055         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3056         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3057         struct mlx5_flow_dv_port_id_action_resource *cache;
3058         uint32_t idx;
3059         int ret;
3060
3061         /* Register new port id action resource. */
3062         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3063         if (!cache) {
3064                 rte_flow_error_set(ctx->error, ENOMEM,
3065                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3066                                    "cannot allocate port_id action cache memory");
3067                 return NULL;
3068         }
3069         *cache = *ref;
3070         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3071                                                         ref->port_id,
3072                                                         &cache->action);
3073         if (ret) {
3074                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3075                 rte_flow_error_set(ctx->error, ENOMEM,
3076                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3077                                    "cannot create action");
3078                 return NULL;
3079         }
3080         return &cache->entry;
3081 }
3082
3083 /**
3084  * Find existing table port ID resource or create and register a new one.
3085  *
3086  * @param[in, out] dev
3087  *   Pointer to rte_eth_dev structure.
3088  * @param[in, out] resource
3089  *   Pointer to port ID action resource.
3090  * @parm[in, out] dev_flow
3091  *   Pointer to the dev_flow.
3092  * @param[out] error
3093  *   pointer to error structure.
3094  *
3095  * @return
3096  *   0 on success otherwise -errno and errno is set.
3097  */
3098 static int
3099 flow_dv_port_id_action_resource_register
3100                         (struct rte_eth_dev *dev,
3101                          struct mlx5_flow_dv_port_id_action_resource *resource,
3102                          struct mlx5_flow *dev_flow,
3103                          struct rte_flow_error *error)
3104 {
3105         struct mlx5_priv *priv = dev->data->dev_private;
3106         struct mlx5_cache_entry *entry;
3107         struct mlx5_flow_dv_port_id_action_resource *cache;
3108         struct mlx5_flow_cb_ctx ctx = {
3109                 .error = error,
3110                 .data = resource,
3111         };
3112
3113         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3114         if (!entry)
3115                 return -rte_errno;
3116         cache = container_of(entry, typeof(*cache), entry);
3117         dev_flow->dv.port_id_action = cache;
3118         dev_flow->handle->rix_port_id_action = cache->idx;
3119         return 0;
3120 }
3121
3122 int
3123 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3124                          struct mlx5_cache_entry *entry, void *cb_ctx)
3125 {
3126         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3127         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3128         struct mlx5_flow_dv_push_vlan_action_resource *res =
3129                         container_of(entry, typeof(*res), entry);
3130
3131         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3132 }
3133
3134 struct mlx5_cache_entry *
3135 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3136                           struct mlx5_cache_entry *entry __rte_unused,
3137                           void *cb_ctx)
3138 {
3139         struct mlx5_dev_ctx_shared *sh = list->ctx;
3140         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3141         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3142         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3143         struct mlx5dv_dr_domain *domain;
3144         uint32_t idx;
3145         int ret;
3146
3147         /* Register new port id action resource. */
3148         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3149         if (!cache) {
3150                 rte_flow_error_set(ctx->error, ENOMEM,
3151                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3152                                    "cannot allocate push_vlan action cache memory");
3153                 return NULL;
3154         }
3155         *cache = *ref;
3156         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3157                 domain = sh->fdb_domain;
3158         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3159                 domain = sh->rx_domain;
3160         else
3161                 domain = sh->tx_domain;
3162         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3163                                                         &cache->action);
3164         if (ret) {
3165                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3166                 rte_flow_error_set(ctx->error, ENOMEM,
3167                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3168                                    "cannot create push vlan action");
3169                 return NULL;
3170         }
3171         return &cache->entry;
3172 }
3173
3174 /**
3175  * Find existing push vlan resource or create and register a new one.
3176  *
3177  * @param [in, out] dev
3178  *   Pointer to rte_eth_dev structure.
3179  * @param[in, out] resource
3180  *   Pointer to port ID action resource.
3181  * @parm[in, out] dev_flow
3182  *   Pointer to the dev_flow.
3183  * @param[out] error
3184  *   pointer to error structure.
3185  *
3186  * @return
3187  *   0 on success otherwise -errno and errno is set.
3188  */
3189 static int
3190 flow_dv_push_vlan_action_resource_register
3191                        (struct rte_eth_dev *dev,
3192                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3193                         struct mlx5_flow *dev_flow,
3194                         struct rte_flow_error *error)
3195 {
3196         struct mlx5_priv *priv = dev->data->dev_private;
3197         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3198         struct mlx5_cache_entry *entry;
3199         struct mlx5_flow_cb_ctx ctx = {
3200                 .error = error,
3201                 .data = resource,
3202         };
3203
3204         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3205         if (!entry)
3206                 return -rte_errno;
3207         cache = container_of(entry, typeof(*cache), entry);
3208
3209         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3210         dev_flow->dv.push_vlan_res = cache;
3211         return 0;
3212 }
3213
3214 /**
3215  * Get the size of specific rte_flow_item_type hdr size
3216  *
3217  * @param[in] item_type
3218  *   Tested rte_flow_item_type.
3219  *
3220  * @return
3221  *   sizeof struct item_type, 0 if void or irrelevant.
3222  */
3223 static size_t
3224 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3225 {
3226         size_t retval;
3227
3228         switch (item_type) {
3229         case RTE_FLOW_ITEM_TYPE_ETH:
3230                 retval = sizeof(struct rte_ether_hdr);
3231                 break;
3232         case RTE_FLOW_ITEM_TYPE_VLAN:
3233                 retval = sizeof(struct rte_vlan_hdr);
3234                 break;
3235         case RTE_FLOW_ITEM_TYPE_IPV4:
3236                 retval = sizeof(struct rte_ipv4_hdr);
3237                 break;
3238         case RTE_FLOW_ITEM_TYPE_IPV6:
3239                 retval = sizeof(struct rte_ipv6_hdr);
3240                 break;
3241         case RTE_FLOW_ITEM_TYPE_UDP:
3242                 retval = sizeof(struct rte_udp_hdr);
3243                 break;
3244         case RTE_FLOW_ITEM_TYPE_TCP:
3245                 retval = sizeof(struct rte_tcp_hdr);
3246                 break;
3247         case RTE_FLOW_ITEM_TYPE_VXLAN:
3248         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3249                 retval = sizeof(struct rte_vxlan_hdr);
3250                 break;
3251         case RTE_FLOW_ITEM_TYPE_GRE:
3252         case RTE_FLOW_ITEM_TYPE_NVGRE:
3253                 retval = sizeof(struct rte_gre_hdr);
3254                 break;
3255         case RTE_FLOW_ITEM_TYPE_MPLS:
3256                 retval = sizeof(struct rte_mpls_hdr);
3257                 break;
3258         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3259         default:
3260                 retval = 0;
3261                 break;
3262         }
3263         return retval;
3264 }
3265
3266 #define MLX5_ENCAP_IPV4_VERSION         0x40
3267 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3268 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3269 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3270 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3271 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3272 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3273
3274 /**
3275  * Convert the encap action data from list of rte_flow_item to raw buffer
3276  *
3277  * @param[in] items
3278  *   Pointer to rte_flow_item objects list.
3279  * @param[out] buf
3280  *   Pointer to the output buffer.
3281  * @param[out] size
3282  *   Pointer to the output buffer size.
3283  * @param[out] error
3284  *   Pointer to the error structure.
3285  *
3286  * @return
3287  *   0 on success, a negative errno value otherwise and rte_errno is set.
3288  */
3289 static int
3290 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3291                            size_t *size, struct rte_flow_error *error)
3292 {
3293         struct rte_ether_hdr *eth = NULL;
3294         struct rte_vlan_hdr *vlan = NULL;
3295         struct rte_ipv4_hdr *ipv4 = NULL;
3296         struct rte_ipv6_hdr *ipv6 = NULL;
3297         struct rte_udp_hdr *udp = NULL;
3298         struct rte_vxlan_hdr *vxlan = NULL;
3299         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3300         struct rte_gre_hdr *gre = NULL;
3301         size_t len;
3302         size_t temp_size = 0;
3303
3304         if (!items)
3305                 return rte_flow_error_set(error, EINVAL,
3306                                           RTE_FLOW_ERROR_TYPE_ACTION,
3307                                           NULL, "invalid empty data");
3308         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3309                 len = flow_dv_get_item_hdr_len(items->type);
3310                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3311                         return rte_flow_error_set(error, EINVAL,
3312                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3313                                                   (void *)items->type,
3314                                                   "items total size is too big"
3315                                                   " for encap action");
3316                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3317                 switch (items->type) {
3318                 case RTE_FLOW_ITEM_TYPE_ETH:
3319                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3320                         break;
3321                 case RTE_FLOW_ITEM_TYPE_VLAN:
3322                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3323                         if (!eth)
3324                                 return rte_flow_error_set(error, EINVAL,
3325                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3326                                                 (void *)items->type,
3327                                                 "eth header not found");
3328                         if (!eth->ether_type)
3329                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3330                         break;
3331                 case RTE_FLOW_ITEM_TYPE_IPV4:
3332                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3333                         if (!vlan && !eth)
3334                                 return rte_flow_error_set(error, EINVAL,
3335                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3336                                                 (void *)items->type,
3337                                                 "neither eth nor vlan"
3338                                                 " header found");
3339                         if (vlan && !vlan->eth_proto)
3340                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3341                         else if (eth && !eth->ether_type)
3342                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3343                         if (!ipv4->version_ihl)
3344                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3345                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3346                         if (!ipv4->time_to_live)
3347                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3348                         break;
3349                 case RTE_FLOW_ITEM_TYPE_IPV6:
3350                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3351                         if (!vlan && !eth)
3352                                 return rte_flow_error_set(error, EINVAL,
3353                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3354                                                 (void *)items->type,
3355                                                 "neither eth nor vlan"
3356                                                 " header found");
3357                         if (vlan && !vlan->eth_proto)
3358                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3359                         else if (eth && !eth->ether_type)
3360                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3361                         if (!ipv6->vtc_flow)
3362                                 ipv6->vtc_flow =
3363                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3364                         if (!ipv6->hop_limits)
3365                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3366                         break;
3367                 case RTE_FLOW_ITEM_TYPE_UDP:
3368                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3369                         if (!ipv4 && !ipv6)
3370                                 return rte_flow_error_set(error, EINVAL,
3371                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3372                                                 (void *)items->type,
3373                                                 "ip header not found");
3374                         if (ipv4 && !ipv4->next_proto_id)
3375                                 ipv4->next_proto_id = IPPROTO_UDP;
3376                         else if (ipv6 && !ipv6->proto)
3377                                 ipv6->proto = IPPROTO_UDP;
3378                         break;
3379                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3380                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3381                         if (!udp)
3382                                 return rte_flow_error_set(error, EINVAL,
3383                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3384                                                 (void *)items->type,
3385                                                 "udp header not found");
3386                         if (!udp->dst_port)
3387                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3388                         if (!vxlan->vx_flags)
3389                                 vxlan->vx_flags =
3390                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3391                         break;
3392                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3393                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3394                         if (!udp)
3395                                 return rte_flow_error_set(error, EINVAL,
3396                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3397                                                 (void *)items->type,
3398                                                 "udp header not found");
3399                         if (!vxlan_gpe->proto)
3400                                 return rte_flow_error_set(error, EINVAL,
3401                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3402                                                 (void *)items->type,
3403                                                 "next protocol not found");
3404                         if (!udp->dst_port)
3405                                 udp->dst_port =
3406                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3407                         if (!vxlan_gpe->vx_flags)
3408                                 vxlan_gpe->vx_flags =
3409                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3410                         break;
3411                 case RTE_FLOW_ITEM_TYPE_GRE:
3412                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3413                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3414                         if (!gre->proto)
3415                                 return rte_flow_error_set(error, EINVAL,
3416                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3417                                                 (void *)items->type,
3418                                                 "next protocol not found");
3419                         if (!ipv4 && !ipv6)
3420                                 return rte_flow_error_set(error, EINVAL,
3421                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3422                                                 (void *)items->type,
3423                                                 "ip header not found");
3424                         if (ipv4 && !ipv4->next_proto_id)
3425                                 ipv4->next_proto_id = IPPROTO_GRE;
3426                         else if (ipv6 && !ipv6->proto)
3427                                 ipv6->proto = IPPROTO_GRE;
3428                         break;
3429                 case RTE_FLOW_ITEM_TYPE_VOID:
3430                         break;
3431                 default:
3432                         return rte_flow_error_set(error, EINVAL,
3433                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3434                                                   (void *)items->type,
3435                                                   "unsupported item type");
3436                         break;
3437                 }
3438                 temp_size += len;
3439         }
3440         *size = temp_size;
3441         return 0;
3442 }
3443
3444 static int
3445 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3446 {
3447         struct rte_ether_hdr *eth = NULL;
3448         struct rte_vlan_hdr *vlan = NULL;
3449         struct rte_ipv6_hdr *ipv6 = NULL;
3450         struct rte_udp_hdr *udp = NULL;
3451         char *next_hdr;
3452         uint16_t proto;
3453
3454         eth = (struct rte_ether_hdr *)data;
3455         next_hdr = (char *)(eth + 1);
3456         proto = RTE_BE16(eth->ether_type);
3457
3458         /* VLAN skipping */
3459         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3460                 vlan = (struct rte_vlan_hdr *)next_hdr;
3461                 proto = RTE_BE16(vlan->eth_proto);
3462                 next_hdr += sizeof(struct rte_vlan_hdr);
3463         }
3464
3465         /* HW calculates IPv4 csum. no need to proceed */
3466         if (proto == RTE_ETHER_TYPE_IPV4)
3467                 return 0;
3468
3469         /* non IPv4/IPv6 header. not supported */
3470         if (proto != RTE_ETHER_TYPE_IPV6) {
3471                 return rte_flow_error_set(error, ENOTSUP,
3472                                           RTE_FLOW_ERROR_TYPE_ACTION,
3473                                           NULL, "Cannot offload non IPv4/IPv6");
3474         }
3475
3476         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3477
3478         /* ignore non UDP */
3479         if (ipv6->proto != IPPROTO_UDP)
3480                 return 0;
3481
3482         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3483         udp->dgram_cksum = 0;
3484
3485         return 0;
3486 }
3487
3488 /**
3489  * Convert L2 encap action to DV specification.
3490  *
3491  * @param[in] dev
3492  *   Pointer to rte_eth_dev structure.
3493  * @param[in] action
3494  *   Pointer to action structure.
3495  * @param[in, out] dev_flow
3496  *   Pointer to the mlx5_flow.
3497  * @param[in] transfer
3498  *   Mark if the flow is E-Switch flow.
3499  * @param[out] error
3500  *   Pointer to the error structure.
3501  *
3502  * @return
3503  *   0 on success, a negative errno value otherwise and rte_errno is set.
3504  */
3505 static int
3506 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3507                                const struct rte_flow_action *action,
3508                                struct mlx5_flow *dev_flow,
3509                                uint8_t transfer,
3510                                struct rte_flow_error *error)
3511 {
3512         const struct rte_flow_item *encap_data;
3513         const struct rte_flow_action_raw_encap *raw_encap_data;
3514         struct mlx5_flow_dv_encap_decap_resource res = {
3515                 .reformat_type =
3516                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3517                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3518                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3519         };
3520
3521         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3522                 raw_encap_data =
3523                         (const struct rte_flow_action_raw_encap *)action->conf;
3524                 res.size = raw_encap_data->size;
3525                 memcpy(res.buf, raw_encap_data->data, res.size);
3526         } else {
3527                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3528                         encap_data =
3529                                 ((const struct rte_flow_action_vxlan_encap *)
3530                                                 action->conf)->definition;
3531                 else
3532                         encap_data =
3533                                 ((const struct rte_flow_action_nvgre_encap *)
3534                                                 action->conf)->definition;
3535                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3536                                                &res.size, error))
3537                         return -rte_errno;
3538         }
3539         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3540                 return -rte_errno;
3541         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3542                 return rte_flow_error_set(error, EINVAL,
3543                                           RTE_FLOW_ERROR_TYPE_ACTION,
3544                                           NULL, "can't create L2 encap action");
3545         return 0;
3546 }
3547
3548 /**
3549  * Convert L2 decap action to DV specification.
3550  *
3551  * @param[in] dev
3552  *   Pointer to rte_eth_dev structure.
3553  * @param[in, out] dev_flow
3554  *   Pointer to the mlx5_flow.
3555  * @param[in] transfer
3556  *   Mark if the flow is E-Switch flow.
3557  * @param[out] error
3558  *   Pointer to the error structure.
3559  *
3560  * @return
3561  *   0 on success, a negative errno value otherwise and rte_errno is set.
3562  */
3563 static int
3564 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3565                                struct mlx5_flow *dev_flow,
3566                                uint8_t transfer,
3567                                struct rte_flow_error *error)
3568 {
3569         struct mlx5_flow_dv_encap_decap_resource res = {
3570                 .size = 0,
3571                 .reformat_type =
3572                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3573                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3574                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3575         };
3576
3577         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3578                 return rte_flow_error_set(error, EINVAL,
3579                                           RTE_FLOW_ERROR_TYPE_ACTION,
3580                                           NULL, "can't create L2 decap action");
3581         return 0;
3582 }
3583
3584 /**
3585  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3586  *
3587  * @param[in] dev
3588  *   Pointer to rte_eth_dev structure.
3589  * @param[in] action
3590  *   Pointer to action structure.
3591  * @param[in, out] dev_flow
3592  *   Pointer to the mlx5_flow.
3593  * @param[in] attr
3594  *   Pointer to the flow attributes.
3595  * @param[out] error
3596  *   Pointer to the error structure.
3597  *
3598  * @return
3599  *   0 on success, a negative errno value otherwise and rte_errno is set.
3600  */
3601 static int
3602 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3603                                 const struct rte_flow_action *action,
3604                                 struct mlx5_flow *dev_flow,
3605                                 const struct rte_flow_attr *attr,
3606                                 struct rte_flow_error *error)
3607 {
3608         const struct rte_flow_action_raw_encap *encap_data;
3609         struct mlx5_flow_dv_encap_decap_resource res;
3610
3611         memset(&res, 0, sizeof(res));
3612         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3613         res.size = encap_data->size;
3614         memcpy(res.buf, encap_data->data, res.size);
3615         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3616                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3617                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3618         if (attr->transfer)
3619                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3620         else
3621                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3622                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3623         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3624                 return rte_flow_error_set(error, EINVAL,
3625                                           RTE_FLOW_ERROR_TYPE_ACTION,
3626                                           NULL, "can't create encap action");
3627         return 0;
3628 }
3629
3630 /**
3631  * Create action push VLAN.
3632  *
3633  * @param[in] dev
3634  *   Pointer to rte_eth_dev structure.
3635  * @param[in] attr
3636  *   Pointer to the flow attributes.
3637  * @param[in] vlan
3638  *   Pointer to the vlan to push to the Ethernet header.
3639  * @param[in, out] dev_flow
3640  *   Pointer to the mlx5_flow.
3641  * @param[out] error
3642  *   Pointer to the error structure.
3643  *
3644  * @return
3645  *   0 on success, a negative errno value otherwise and rte_errno is set.
3646  */
3647 static int
3648 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3649                                 const struct rte_flow_attr *attr,
3650                                 const struct rte_vlan_hdr *vlan,
3651                                 struct mlx5_flow *dev_flow,
3652                                 struct rte_flow_error *error)
3653 {
3654         struct mlx5_flow_dv_push_vlan_action_resource res;
3655
3656         memset(&res, 0, sizeof(res));
3657         res.vlan_tag =
3658                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3659                                  vlan->vlan_tci);
3660         if (attr->transfer)
3661                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3662         else
3663                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3664                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3665         return flow_dv_push_vlan_action_resource_register
3666                                             (dev, &res, dev_flow, error);
3667 }
3668
3669 static int fdb_mirror_limit;
3670
3671 /**
3672  * Validate the modify-header actions.
3673  *
3674  * @param[in] action_flags
3675  *   Holds the actions detected until now.
3676  * @param[in] action
3677  *   Pointer to the modify action.
3678  * @param[out] error
3679  *   Pointer to error structure.
3680  *
3681  * @return
3682  *   0 on success, a negative errno value otherwise and rte_errno is set.
3683  */
3684 static int
3685 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3686                                    const struct rte_flow_action *action,
3687                                    struct rte_flow_error *error)
3688 {
3689         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3690                 return rte_flow_error_set(error, EINVAL,
3691                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3692                                           NULL, "action configuration not set");
3693         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3694                 return rte_flow_error_set(error, EINVAL,
3695                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3696                                           "can't have encap action before"
3697                                           " modify action");
3698         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror_limit)
3699                 return rte_flow_error_set(error, EINVAL,
3700                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3701                                           "can't support sample action before"
3702                                           " modify action for E-Switch"
3703                                           " mirroring");
3704         return 0;
3705 }
3706
3707 /**
3708  * Validate the modify-header MAC address actions.
3709  *
3710  * @param[in] action_flags
3711  *   Holds the actions detected until now.
3712  * @param[in] action
3713  *   Pointer to the modify action.
3714  * @param[in] item_flags
3715  *   Holds the items detected.
3716  * @param[out] error
3717  *   Pointer to error structure.
3718  *
3719  * @return
3720  *   0 on success, a negative errno value otherwise and rte_errno is set.
3721  */
3722 static int
3723 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3724                                    const struct rte_flow_action *action,
3725                                    const uint64_t item_flags,
3726                                    struct rte_flow_error *error)
3727 {
3728         int ret = 0;
3729
3730         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3731         if (!ret) {
3732                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3733                         return rte_flow_error_set(error, EINVAL,
3734                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3735                                                   NULL,
3736                                                   "no L2 item in pattern");
3737         }
3738         return ret;
3739 }
3740
3741 /**
3742  * Validate the modify-header IPv4 address actions.
3743  *
3744  * @param[in] action_flags
3745  *   Holds the actions detected until now.
3746  * @param[in] action
3747  *   Pointer to the modify action.
3748  * @param[in] item_flags
3749  *   Holds the items detected.
3750  * @param[out] error
3751  *   Pointer to error structure.
3752  *
3753  * @return
3754  *   0 on success, a negative errno value otherwise and rte_errno is set.
3755  */
3756 static int
3757 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3758                                     const struct rte_flow_action *action,
3759                                     const uint64_t item_flags,
3760                                     struct rte_flow_error *error)
3761 {
3762         int ret = 0;
3763         uint64_t layer;
3764
3765         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3766         if (!ret) {
3767                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3768                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3769                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3770                 if (!(item_flags & layer))
3771                         return rte_flow_error_set(error, EINVAL,
3772                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3773                                                   NULL,
3774                                                   "no ipv4 item in pattern");
3775         }
3776         return ret;
3777 }
3778
3779 /**
3780  * Validate the modify-header IPv6 address actions.
3781  *
3782  * @param[in] action_flags
3783  *   Holds the actions detected until now.
3784  * @param[in] action
3785  *   Pointer to the modify action.
3786  * @param[in] item_flags
3787  *   Holds the items detected.
3788  * @param[out] error
3789  *   Pointer to error structure.
3790  *
3791  * @return
3792  *   0 on success, a negative errno value otherwise and rte_errno is set.
3793  */
3794 static int
3795 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3796                                     const struct rte_flow_action *action,
3797                                     const uint64_t item_flags,
3798                                     struct rte_flow_error *error)
3799 {
3800         int ret = 0;
3801         uint64_t layer;
3802
3803         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3804         if (!ret) {
3805                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3806                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3807                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3808                 if (!(item_flags & layer))
3809                         return rte_flow_error_set(error, EINVAL,
3810                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3811                                                   NULL,
3812                                                   "no ipv6 item in pattern");
3813         }
3814         return ret;
3815 }
3816
3817 /**
3818  * Validate the modify-header TP actions.
3819  *
3820  * @param[in] action_flags
3821  *   Holds the actions detected until now.
3822  * @param[in] action
3823  *   Pointer to the modify action.
3824  * @param[in] item_flags
3825  *   Holds the items detected.
3826  * @param[out] error
3827  *   Pointer to error structure.
3828  *
3829  * @return
3830  *   0 on success, a negative errno value otherwise and rte_errno is set.
3831  */
3832 static int
3833 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3834                                   const struct rte_flow_action *action,
3835                                   const uint64_t item_flags,
3836                                   struct rte_flow_error *error)
3837 {
3838         int ret = 0;
3839         uint64_t layer;
3840
3841         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3842         if (!ret) {
3843                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3844                                  MLX5_FLOW_LAYER_INNER_L4 :
3845                                  MLX5_FLOW_LAYER_OUTER_L4;
3846                 if (!(item_flags & layer))
3847                         return rte_flow_error_set(error, EINVAL,
3848                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3849                                                   NULL, "no transport layer "
3850                                                   "in pattern");
3851         }
3852         return ret;
3853 }
3854
3855 /**
3856  * Validate the modify-header actions of increment/decrement
3857  * TCP Sequence-number.
3858  *
3859  * @param[in] action_flags
3860  *   Holds the actions detected until now.
3861  * @param[in] action
3862  *   Pointer to the modify action.
3863  * @param[in] item_flags
3864  *   Holds the items detected.
3865  * @param[out] error
3866  *   Pointer to error structure.
3867  *
3868  * @return
3869  *   0 on success, a negative errno value otherwise and rte_errno is set.
3870  */
3871 static int
3872 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3873                                        const struct rte_flow_action *action,
3874                                        const uint64_t item_flags,
3875                                        struct rte_flow_error *error)
3876 {
3877         int ret = 0;
3878         uint64_t layer;
3879
3880         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3881         if (!ret) {
3882                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3883                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3884                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3885                 if (!(item_flags & layer))
3886                         return rte_flow_error_set(error, EINVAL,
3887                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3888                                                   NULL, "no TCP item in"
3889                                                   " pattern");
3890                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3891                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3892                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3893                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3894                         return rte_flow_error_set(error, EINVAL,
3895                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3896                                                   NULL,
3897                                                   "cannot decrease and increase"
3898                                                   " TCP sequence number"
3899                                                   " at the same time");
3900         }
3901         return ret;
3902 }
3903
3904 /**
3905  * Validate the modify-header actions of increment/decrement
3906  * TCP Acknowledgment number.
3907  *
3908  * @param[in] action_flags
3909  *   Holds the actions detected until now.
3910  * @param[in] action
3911  *   Pointer to the modify action.
3912  * @param[in] item_flags
3913  *   Holds the items detected.
3914  * @param[out] error
3915  *   Pointer to error structure.
3916  *
3917  * @return
3918  *   0 on success, a negative errno value otherwise and rte_errno is set.
3919  */
3920 static int
3921 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3922                                        const struct rte_flow_action *action,
3923                                        const uint64_t item_flags,
3924                                        struct rte_flow_error *error)
3925 {
3926         int ret = 0;
3927         uint64_t layer;
3928
3929         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3930         if (!ret) {
3931                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3932                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3933                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3934                 if (!(item_flags & layer))
3935                         return rte_flow_error_set(error, EINVAL,
3936                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3937                                                   NULL, "no TCP item in"
3938                                                   " pattern");
3939                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3940                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3941                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3942                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3943                         return rte_flow_error_set(error, EINVAL,
3944                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3945                                                   NULL,
3946                                                   "cannot decrease and increase"
3947                                                   " TCP acknowledgment number"
3948                                                   " at the same time");
3949         }
3950         return ret;
3951 }
3952
3953 /**
3954  * Validate the modify-header TTL actions.
3955  *
3956  * @param[in] action_flags
3957  *   Holds the actions detected until now.
3958  * @param[in] action
3959  *   Pointer to the modify action.
3960  * @param[in] item_flags
3961  *   Holds the items detected.
3962  * @param[out] error
3963  *   Pointer to error structure.
3964  *
3965  * @return
3966  *   0 on success, a negative errno value otherwise and rte_errno is set.
3967  */
3968 static int
3969 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3970                                    const struct rte_flow_action *action,
3971                                    const uint64_t item_flags,
3972                                    struct rte_flow_error *error)
3973 {
3974         int ret = 0;
3975         uint64_t layer;
3976
3977         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3978         if (!ret) {
3979                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3980                                  MLX5_FLOW_LAYER_INNER_L3 :
3981                                  MLX5_FLOW_LAYER_OUTER_L3;
3982                 if (!(item_flags & layer))
3983                         return rte_flow_error_set(error, EINVAL,
3984                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3985                                                   NULL,
3986                                                   "no IP protocol in pattern");
3987         }
3988         return ret;
3989 }
3990
3991 /**
3992  * Validate jump action.
3993  *
3994  * @param[in] action
3995  *   Pointer to the jump action.
3996  * @param[in] action_flags
3997  *   Holds the actions detected until now.
3998  * @param[in] attributes
3999  *   Pointer to flow attributes
4000  * @param[in] external
4001  *   Action belongs to flow rule created by request external to PMD.
4002  * @param[out] error
4003  *   Pointer to error structure.
4004  *
4005  * @return
4006  *   0 on success, a negative errno value otherwise and rte_errno is set.
4007  */
4008 static int
4009 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4010                              const struct mlx5_flow_tunnel *tunnel,
4011                              const struct rte_flow_action *action,
4012                              uint64_t action_flags,
4013                              const struct rte_flow_attr *attributes,
4014                              bool external, struct rte_flow_error *error)
4015 {
4016         uint32_t target_group, table;
4017         int ret = 0;
4018         struct flow_grp_info grp_info = {
4019                 .external = !!external,
4020                 .transfer = !!attributes->transfer,
4021                 .fdb_def_rule = 1,
4022                 .std_tbl_fix = 0
4023         };
4024         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4025                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4026                 return rte_flow_error_set(error, EINVAL,
4027                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4028                                           "can't have 2 fate actions in"
4029                                           " same flow");
4030         if (action_flags & MLX5_FLOW_ACTION_METER)
4031                 return rte_flow_error_set(error, ENOTSUP,
4032                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4033                                           "jump with meter not support");
4034         if (!action->conf)
4035                 return rte_flow_error_set(error, EINVAL,
4036                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4037                                           NULL, "action configuration not set");
4038         target_group =
4039                 ((const struct rte_flow_action_jump *)action->conf)->group;
4040         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4041                                        &grp_info, error);
4042         if (ret)
4043                 return ret;
4044         if (attributes->group == target_group &&
4045             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4046                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4047                 return rte_flow_error_set(error, EINVAL,
4048                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4049                                           "target group must be other than"
4050                                           " the current flow group");
4051         return 0;
4052 }
4053
4054 /*
4055  * Validate the port_id action.
4056  *
4057  * @param[in] dev
4058  *   Pointer to rte_eth_dev structure.
4059  * @param[in] action_flags
4060  *   Bit-fields that holds the actions detected until now.
4061  * @param[in] action
4062  *   Port_id RTE action structure.
4063  * @param[in] attr
4064  *   Attributes of flow that includes this action.
4065  * @param[out] error
4066  *   Pointer to error structure.
4067  *
4068  * @return
4069  *   0 on success, a negative errno value otherwise and rte_errno is set.
4070  */
4071 static int
4072 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4073                                 uint64_t action_flags,
4074                                 const struct rte_flow_action *action,
4075                                 const struct rte_flow_attr *attr,
4076                                 struct rte_flow_error *error)
4077 {
4078         const struct rte_flow_action_port_id *port_id;
4079         struct mlx5_priv *act_priv;
4080         struct mlx5_priv *dev_priv;
4081         uint16_t port;
4082
4083         if (!attr->transfer)
4084                 return rte_flow_error_set(error, ENOTSUP,
4085                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4086                                           NULL,
4087                                           "port id action is valid in transfer"
4088                                           " mode only");
4089         if (!action || !action->conf)
4090                 return rte_flow_error_set(error, ENOTSUP,
4091                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4092                                           NULL,
4093                                           "port id action parameters must be"
4094                                           " specified");
4095         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4096                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4097                 return rte_flow_error_set(error, EINVAL,
4098                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4099                                           "can have only one fate actions in"
4100                                           " a flow");
4101         dev_priv = mlx5_dev_to_eswitch_info(dev);
4102         if (!dev_priv)
4103                 return rte_flow_error_set(error, rte_errno,
4104                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4105                                           NULL,
4106                                           "failed to obtain E-Switch info");
4107         port_id = action->conf;
4108         port = port_id->original ? dev->data->port_id : port_id->id;
4109         act_priv = mlx5_port_to_eswitch_info(port, false);
4110         if (!act_priv)
4111                 return rte_flow_error_set
4112                                 (error, rte_errno,
4113                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4114                                  "failed to obtain E-Switch port id for port");
4115         if (act_priv->domain_id != dev_priv->domain_id)
4116                 return rte_flow_error_set
4117                                 (error, EINVAL,
4118                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4119                                  "port does not belong to"
4120                                  " E-Switch being configured");
4121         return 0;
4122 }
4123
4124 /**
4125  * Get the maximum number of modify header actions.
4126  *
4127  * @param dev
4128  *   Pointer to rte_eth_dev structure.
4129  * @param flags
4130  *   Flags bits to check if root level.
4131  *
4132  * @return
4133  *   Max number of modify header actions device can support.
4134  */
4135 static inline unsigned int
4136 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4137                               uint64_t flags)
4138 {
4139         /*
4140          * There's no way to directly query the max capacity from FW.
4141          * The maximal value on root table should be assumed to be supported.
4142          */
4143         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4144                 return MLX5_MAX_MODIFY_NUM;
4145         else
4146                 return MLX5_ROOT_TBL_MODIFY_NUM;
4147 }
4148
4149 /**
4150  * Validate the meter action.
4151  *
4152  * @param[in] dev
4153  *   Pointer to rte_eth_dev structure.
4154  * @param[in] action_flags
4155  *   Bit-fields that holds the actions detected until now.
4156  * @param[in] action
4157  *   Pointer to the meter action.
4158  * @param[in] attr
4159  *   Attributes of flow that includes this action.
4160  * @param[out] error
4161  *   Pointer to error structure.
4162  *
4163  * @return
4164  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4165  */
4166 static int
4167 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4168                                 uint64_t action_flags,
4169                                 const struct rte_flow_action *action,
4170                                 const struct rte_flow_attr *attr,
4171                                 struct rte_flow_error *error)
4172 {
4173         struct mlx5_priv *priv = dev->data->dev_private;
4174         const struct rte_flow_action_meter *am = action->conf;
4175         struct mlx5_flow_meter *fm;
4176
4177         if (!am)
4178                 return rte_flow_error_set(error, EINVAL,
4179                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4180                                           "meter action conf is NULL");
4181
4182         if (action_flags & MLX5_FLOW_ACTION_METER)
4183                 return rte_flow_error_set(error, ENOTSUP,
4184                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4185                                           "meter chaining not support");
4186         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4187                 return rte_flow_error_set(error, ENOTSUP,
4188                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4189                                           "meter with jump not support");
4190         if (!priv->mtr_en)
4191                 return rte_flow_error_set(error, ENOTSUP,
4192                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4193                                           NULL,
4194                                           "meter action not supported");
4195         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4196         if (!fm)
4197                 return rte_flow_error_set(error, EINVAL,
4198                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4199                                           "Meter not found");
4200         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4201               (!fm->ingress && !attr->ingress && attr->egress) ||
4202               (!fm->egress && !attr->egress && attr->ingress))))
4203                 return rte_flow_error_set(error, EINVAL,
4204                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4205                                           "Flow attributes are either invalid "
4206                                           "or have a conflict with current "
4207                                           "meter attributes");
4208         return 0;
4209 }
4210
4211 /**
4212  * Validate the age action.
4213  *
4214  * @param[in] action_flags
4215  *   Holds the actions detected until now.
4216  * @param[in] action
4217  *   Pointer to the age action.
4218  * @param[in] dev
4219  *   Pointer to the Ethernet device structure.
4220  * @param[out] error
4221  *   Pointer to error structure.
4222  *
4223  * @return
4224  *   0 on success, a negative errno value otherwise and rte_errno is set.
4225  */
4226 static int
4227 flow_dv_validate_action_age(uint64_t action_flags,
4228                             const struct rte_flow_action *action,
4229                             struct rte_eth_dev *dev,
4230                             struct rte_flow_error *error)
4231 {
4232         struct mlx5_priv *priv = dev->data->dev_private;
4233         const struct rte_flow_action_age *age = action->conf;
4234
4235         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4236             !priv->sh->aso_age_mng))
4237                 return rte_flow_error_set(error, ENOTSUP,
4238                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4239                                           NULL,
4240                                           "age action not supported");
4241         if (!(action->conf))
4242                 return rte_flow_error_set(error, EINVAL,
4243                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4244                                           "configuration cannot be null");
4245         if (!(age->timeout))
4246                 return rte_flow_error_set(error, EINVAL,
4247                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4248                                           "invalid timeout value 0");
4249         if (action_flags & MLX5_FLOW_ACTION_AGE)
4250                 return rte_flow_error_set(error, EINVAL,
4251                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4252                                           "duplicate age actions set");
4253         return 0;
4254 }
4255
4256 /**
4257  * Validate the modify-header IPv4 DSCP actions.
4258  *
4259  * @param[in] action_flags
4260  *   Holds the actions detected until now.
4261  * @param[in] action
4262  *   Pointer to the modify action.
4263  * @param[in] item_flags
4264  *   Holds the items detected.
4265  * @param[out] error
4266  *   Pointer to error structure.
4267  *
4268  * @return
4269  *   0 on success, a negative errno value otherwise and rte_errno is set.
4270  */
4271 static int
4272 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4273                                          const struct rte_flow_action *action,
4274                                          const uint64_t item_flags,
4275                                          struct rte_flow_error *error)
4276 {
4277         int ret = 0;
4278
4279         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4280         if (!ret) {
4281                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4282                         return rte_flow_error_set(error, EINVAL,
4283                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4284                                                   NULL,
4285                                                   "no ipv4 item in pattern");
4286         }
4287         return ret;
4288 }
4289
4290 /**
4291  * Validate the modify-header IPv6 DSCP actions.
4292  *
4293  * @param[in] action_flags
4294  *   Holds the actions detected until now.
4295  * @param[in] action
4296  *   Pointer to the modify action.
4297  * @param[in] item_flags
4298  *   Holds the items detected.
4299  * @param[out] error
4300  *   Pointer to error structure.
4301  *
4302  * @return
4303  *   0 on success, a negative errno value otherwise and rte_errno is set.
4304  */
4305 static int
4306 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4307                                          const struct rte_flow_action *action,
4308                                          const uint64_t item_flags,
4309                                          struct rte_flow_error *error)
4310 {
4311         int ret = 0;
4312
4313         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4314         if (!ret) {
4315                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4316                         return rte_flow_error_set(error, EINVAL,
4317                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4318                                                   NULL,
4319                                                   "no ipv6 item in pattern");
4320         }
4321         return ret;
4322 }
4323
4324 /**
4325  * Match modify-header resource.
4326  *
4327  * @param list
4328  *   Pointer to the hash list.
4329  * @param entry
4330  *   Pointer to exist resource entry object.
4331  * @param key
4332  *   Key of the new entry.
4333  * @param ctx
4334  *   Pointer to new modify-header resource.
4335  *
4336  * @return
4337  *   0 on matching, non-zero otherwise.
4338  */
4339 int
4340 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4341                         struct mlx5_hlist_entry *entry,
4342                         uint64_t key __rte_unused, void *cb_ctx)
4343 {
4344         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4345         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4346         struct mlx5_flow_dv_modify_hdr_resource *resource =
4347                         container_of(entry, typeof(*resource), entry);
4348         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4349
4350         key_len += ref->actions_num * sizeof(ref->actions[0]);
4351         return ref->actions_num != resource->actions_num ||
4352                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4353 }
4354
4355 struct mlx5_hlist_entry *
4356 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4357                          void *cb_ctx)
4358 {
4359         struct mlx5_dev_ctx_shared *sh = list->ctx;
4360         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4361         struct mlx5dv_dr_domain *ns;
4362         struct mlx5_flow_dv_modify_hdr_resource *entry;
4363         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4364         int ret;
4365         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4366         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4367
4368         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4369                             SOCKET_ID_ANY);
4370         if (!entry) {
4371                 rte_flow_error_set(ctx->error, ENOMEM,
4372                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4373                                    "cannot allocate resource memory");
4374                 return NULL;
4375         }
4376         rte_memcpy(&entry->ft_type,
4377                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4378                    key_len + data_len);
4379         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4380                 ns = sh->fdb_domain;
4381         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4382                 ns = sh->tx_domain;
4383         else
4384                 ns = sh->rx_domain;
4385         ret = mlx5_flow_os_create_flow_action_modify_header
4386                                         (sh->ctx, ns, entry,
4387                                          data_len, &entry->action);
4388         if (ret) {
4389                 mlx5_free(entry);
4390                 rte_flow_error_set(ctx->error, ENOMEM,
4391                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4392                                    NULL, "cannot create modification action");
4393                 return NULL;
4394         }
4395         return &entry->entry;
4396 }
4397
4398 /**
4399  * Validate the sample action.
4400  *
4401  * @param[in] action_flags
4402  *   Holds the actions detected until now.
4403  * @param[in] action
4404  *   Pointer to the sample action.
4405  * @param[in] dev
4406  *   Pointer to the Ethernet device structure.
4407  * @param[in] attr
4408  *   Attributes of flow that includes this action.
4409  * @param[in] item_flags
4410  *   Holds the items detected.
4411  * @param[in] rss
4412  *   Pointer to the RSS action.
4413  * @param[out] sample_rss
4414  *   Pointer to the RSS action in sample action list.
4415  * @param[out] error
4416  *   Pointer to error structure.
4417  *
4418  * @return
4419  *   0 on success, a negative errno value otherwise and rte_errno is set.
4420  */
4421 static int
4422 flow_dv_validate_action_sample(uint64_t action_flags,
4423                                const struct rte_flow_action *action,
4424                                struct rte_eth_dev *dev,
4425                                const struct rte_flow_attr *attr,
4426                                uint64_t item_flags,
4427                                const struct rte_flow_action_rss *rss,
4428                                const struct rte_flow_action_rss **sample_rss,
4429                                struct rte_flow_error *error)
4430 {
4431         struct mlx5_priv *priv = dev->data->dev_private;
4432         struct mlx5_dev_config *dev_conf = &priv->config;
4433         const struct rte_flow_action_sample *sample = action->conf;
4434         const struct rte_flow_action *act;
4435         uint64_t sub_action_flags = 0;
4436         uint16_t queue_index = 0xFFFF;
4437         int actions_n = 0;
4438         int ret;
4439
4440         fdb_mirror_limit = 0;
4441         if (!sample)
4442                 return rte_flow_error_set(error, EINVAL,
4443                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4444                                           "configuration cannot be NULL");
4445         if (sample->ratio == 0)
4446                 return rte_flow_error_set(error, EINVAL,
4447                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4448                                           "ratio value starts from 1");
4449         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4450                 return rte_flow_error_set(error, ENOTSUP,
4451                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4452                                           NULL,
4453                                           "sample action not supported");
4454         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4455                 return rte_flow_error_set(error, EINVAL,
4456                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4457                                           "Multiple sample actions not "
4458                                           "supported");
4459         if (action_flags & MLX5_FLOW_ACTION_METER)
4460                 return rte_flow_error_set(error, EINVAL,
4461                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4462                                           "wrong action order, meter should "
4463                                           "be after sample action");
4464         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4465                 return rte_flow_error_set(error, EINVAL,
4466                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4467                                           "wrong action order, jump should "
4468                                           "be after sample action");
4469         act = sample->actions;
4470         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4471                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4472                         return rte_flow_error_set(error, ENOTSUP,
4473                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4474                                                   act, "too many actions");
4475                 switch (act->type) {
4476                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4477                         ret = mlx5_flow_validate_action_queue(act,
4478                                                               sub_action_flags,
4479                                                               dev,
4480                                                               attr, error);
4481                         if (ret < 0)
4482                                 return ret;
4483                         queue_index = ((const struct rte_flow_action_queue *)
4484                                                         (act->conf))->index;
4485                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4486                         ++actions_n;
4487                         break;
4488                 case RTE_FLOW_ACTION_TYPE_RSS:
4489                         *sample_rss = act->conf;
4490                         ret = mlx5_flow_validate_action_rss(act,
4491                                                             sub_action_flags,
4492                                                             dev, attr,
4493                                                             item_flags,
4494                                                             error);
4495                         if (ret < 0)
4496                                 return ret;
4497                         if (rss && *sample_rss &&
4498                             ((*sample_rss)->level != rss->level ||
4499                             (*sample_rss)->types != rss->types))
4500                                 return rte_flow_error_set(error, ENOTSUP,
4501                                         RTE_FLOW_ERROR_TYPE_ACTION,
4502                                         NULL,
4503                                         "Can't use the different RSS types "
4504                                         "or level in the same flow");
4505                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
4506                                 queue_index = (*sample_rss)->queue[0];
4507                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
4508                         ++actions_n;
4509                         break;
4510                 case RTE_FLOW_ACTION_TYPE_MARK:
4511                         ret = flow_dv_validate_action_mark(dev, act,
4512                                                            sub_action_flags,
4513                                                            attr, error);
4514                         if (ret < 0)
4515                                 return ret;
4516                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4517                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4518                                                 MLX5_FLOW_ACTION_MARK_EXT;
4519                         else
4520                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4521                         ++actions_n;
4522                         break;
4523                 case RTE_FLOW_ACTION_TYPE_COUNT:
4524                         ret = flow_dv_validate_action_count(dev, error);
4525                         if (ret < 0)
4526                                 return ret;
4527                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4528                         ++actions_n;
4529                         break;
4530                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4531                         ret = flow_dv_validate_action_port_id(dev,
4532                                                               sub_action_flags,
4533                                                               act,
4534                                                               attr,
4535                                                               error);
4536                         if (ret)
4537                                 return ret;
4538                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4539                         ++actions_n;
4540                         break;
4541                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4542                         ret = flow_dv_validate_action_raw_encap_decap
4543                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4544                                  &actions_n, action, item_flags, error);
4545                         if (ret < 0)
4546                                 return ret;
4547                         ++actions_n;
4548                         break;
4549                 default:
4550                         return rte_flow_error_set(error, ENOTSUP,
4551                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4552                                                   NULL,
4553                                                   "Doesn't support optional "
4554                                                   "action");
4555                 }
4556         }
4557         if (attr->ingress && !attr->transfer) {
4558                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
4559                                           MLX5_FLOW_ACTION_RSS)))
4560                         return rte_flow_error_set(error, EINVAL,
4561                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4562                                                   NULL,
4563                                                   "Ingress must has a dest "
4564                                                   "QUEUE for Sample");
4565         } else if (attr->egress && !attr->transfer) {
4566                 return rte_flow_error_set(error, ENOTSUP,
4567                                           RTE_FLOW_ERROR_TYPE_ACTION,
4568                                           NULL,
4569                                           "Sample Only support Ingress "
4570                                           "or E-Switch");
4571         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4572                 MLX5_ASSERT(attr->transfer);
4573                 if (sample->ratio > 1)
4574                         return rte_flow_error_set(error, ENOTSUP,
4575                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4576                                                   NULL,
4577                                                   "E-Switch doesn't support "
4578                                                   "any optional action "
4579                                                   "for sampling");
4580                 fdb_mirror_limit = 1;
4581                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4582                         return rte_flow_error_set(error, ENOTSUP,
4583                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4584                                                   NULL,
4585                                                   "unsupported action QUEUE");
4586                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
4587                         return rte_flow_error_set(error, ENOTSUP,
4588                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4589                                                   NULL,
4590                                                   "unsupported action QUEUE");
4591                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4592                         return rte_flow_error_set(error, EINVAL,
4593                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4594                                                   NULL,
4595                                                   "E-Switch must has a dest "
4596                                                   "port for mirroring");
4597         }
4598         /* Continue validation for Xcap actions.*/
4599         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4600             (queue_index == 0xFFFF ||
4601              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4602                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4603                      MLX5_FLOW_XCAP_ACTIONS)
4604                         return rte_flow_error_set(error, ENOTSUP,
4605                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4606                                                   NULL, "encap and decap "
4607                                                   "combination aren't "
4608                                                   "supported");
4609                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4610                                                         MLX5_FLOW_ACTION_ENCAP))
4611                         return rte_flow_error_set(error, ENOTSUP,
4612                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4613                                                   NULL, "encap is not supported"
4614                                                   " for ingress traffic");
4615         }
4616         return 0;
4617 }
4618
4619 /**
4620  * Find existing modify-header resource or create and register a new one.
4621  *
4622  * @param dev[in, out]
4623  *   Pointer to rte_eth_dev structure.
4624  * @param[in, out] resource
4625  *   Pointer to modify-header resource.
4626  * @parm[in, out] dev_flow
4627  *   Pointer to the dev_flow.
4628  * @param[out] error
4629  *   pointer to error structure.
4630  *
4631  * @return
4632  *   0 on success otherwise -errno and errno is set.
4633  */
4634 static int
4635 flow_dv_modify_hdr_resource_register
4636                         (struct rte_eth_dev *dev,
4637                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4638                          struct mlx5_flow *dev_flow,
4639                          struct rte_flow_error *error)
4640 {
4641         struct mlx5_priv *priv = dev->data->dev_private;
4642         struct mlx5_dev_ctx_shared *sh = priv->sh;
4643         uint32_t key_len = sizeof(*resource) -
4644                            offsetof(typeof(*resource), ft_type) +
4645                            resource->actions_num * sizeof(resource->actions[0]);
4646         struct mlx5_hlist_entry *entry;
4647         struct mlx5_flow_cb_ctx ctx = {
4648                 .error = error,
4649                 .data = resource,
4650         };
4651         uint64_t key64;
4652
4653         resource->flags = dev_flow->dv.group ? 0 :
4654                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4655         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4656                                     resource->flags))
4657                 return rte_flow_error_set(error, EOVERFLOW,
4658                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4659                                           "too many modify header items");
4660         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4661         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
4662         if (!entry)
4663                 return -rte_errno;
4664         resource = container_of(entry, typeof(*resource), entry);
4665         dev_flow->handle->dvh.modify_hdr = resource;
4666         return 0;
4667 }
4668
4669 /**
4670  * Get DV flow counter by index.
4671  *
4672  * @param[in] dev
4673  *   Pointer to the Ethernet device structure.
4674  * @param[in] idx
4675  *   mlx5 flow counter index in the container.
4676  * @param[out] ppool
4677  *   mlx5 flow counter pool in the container,
4678  *
4679  * @return
4680  *   Pointer to the counter, NULL otherwise.
4681  */
4682 static struct mlx5_flow_counter *
4683 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4684                            uint32_t idx,
4685                            struct mlx5_flow_counter_pool **ppool)
4686 {
4687         struct mlx5_priv *priv = dev->data->dev_private;
4688         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4689         struct mlx5_flow_counter_pool *pool;
4690
4691         /* Decrease to original index and clear shared bit. */
4692         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4693         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4694         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4695         MLX5_ASSERT(pool);
4696         if (ppool)
4697                 *ppool = pool;
4698         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4699 }
4700
4701 /**
4702  * Check the devx counter belongs to the pool.
4703  *
4704  * @param[in] pool
4705  *   Pointer to the counter pool.
4706  * @param[in] id
4707  *   The counter devx ID.
4708  *
4709  * @return
4710  *   True if counter belongs to the pool, false otherwise.
4711  */
4712 static bool
4713 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4714 {
4715         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4716                    MLX5_COUNTERS_PER_POOL;
4717
4718         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4719                 return true;
4720         return false;
4721 }
4722
4723 /**
4724  * Get a pool by devx counter ID.
4725  *
4726  * @param[in] cmng
4727  *   Pointer to the counter management.
4728  * @param[in] id
4729  *   The counter devx ID.
4730  *
4731  * @return
4732  *   The counter pool pointer if exists, NULL otherwise,
4733  */
4734 static struct mlx5_flow_counter_pool *
4735 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4736 {
4737         uint32_t i;
4738         struct mlx5_flow_counter_pool *pool = NULL;
4739
4740         rte_spinlock_lock(&cmng->pool_update_sl);
4741         /* Check last used pool. */
4742         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4743             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4744                 pool = cmng->pools[cmng->last_pool_idx];
4745                 goto out;
4746         }
4747         /* ID out of range means no suitable pool in the container. */
4748         if (id > cmng->max_id || id < cmng->min_id)
4749                 goto out;
4750         /*
4751          * Find the pool from the end of the container, since mostly counter
4752          * ID is sequence increasing, and the last pool should be the needed
4753          * one.
4754          */
4755         i = cmng->n_valid;
4756         while (i--) {
4757                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4758
4759                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4760                         pool = pool_tmp;
4761                         break;
4762                 }
4763         }
4764 out:
4765         rte_spinlock_unlock(&cmng->pool_update_sl);
4766         return pool;
4767 }
4768
4769 /**
4770  * Resize a counter container.
4771  *
4772  * @param[in] dev
4773  *   Pointer to the Ethernet device structure.
4774  *
4775  * @return
4776  *   0 on success, otherwise negative errno value and rte_errno is set.
4777  */
4778 static int
4779 flow_dv_container_resize(struct rte_eth_dev *dev)
4780 {
4781         struct mlx5_priv *priv = dev->data->dev_private;
4782         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4783         void *old_pools = cmng->pools;
4784         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4785         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4786         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4787
4788         if (!pools) {
4789                 rte_errno = ENOMEM;
4790                 return -ENOMEM;
4791         }
4792         if (old_pools)
4793                 memcpy(pools, old_pools, cmng->n *
4794                                        sizeof(struct mlx5_flow_counter_pool *));
4795         cmng->n = resize;
4796         cmng->pools = pools;
4797         if (old_pools)
4798                 mlx5_free(old_pools);
4799         return 0;
4800 }
4801
4802 /**
4803  * Query a devx flow counter.
4804  *
4805  * @param[in] dev
4806  *   Pointer to the Ethernet device structure.
4807  * @param[in] cnt
4808  *   Index to the flow counter.
4809  * @param[out] pkts
4810  *   The statistics value of packets.
4811  * @param[out] bytes
4812  *   The statistics value of bytes.
4813  *
4814  * @return
4815  *   0 on success, otherwise a negative errno value and rte_errno is set.
4816  */
4817 static inline int
4818 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4819                      uint64_t *bytes)
4820 {
4821         struct mlx5_priv *priv = dev->data->dev_private;
4822         struct mlx5_flow_counter_pool *pool = NULL;
4823         struct mlx5_flow_counter *cnt;
4824         int offset;
4825
4826         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4827         MLX5_ASSERT(pool);
4828         if (priv->sh->cmng.counter_fallback)
4829                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4830                                         0, pkts, bytes, 0, NULL, NULL, 0);
4831         rte_spinlock_lock(&pool->sl);
4832         if (!pool->raw) {
4833                 *pkts = 0;
4834                 *bytes = 0;
4835         } else {
4836                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4837                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4838                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4839         }
4840         rte_spinlock_unlock(&pool->sl);
4841         return 0;
4842 }
4843
4844 /**
4845  * Create and initialize a new counter pool.
4846  *
4847  * @param[in] dev
4848  *   Pointer to the Ethernet device structure.
4849  * @param[out] dcs
4850  *   The devX counter handle.
4851  * @param[in] age
4852  *   Whether the pool is for counter that was allocated for aging.
4853  * @param[in/out] cont_cur
4854  *   Pointer to the container pointer, it will be update in pool resize.
4855  *
4856  * @return
4857  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4858  */
4859 static struct mlx5_flow_counter_pool *
4860 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4861                     uint32_t age)
4862 {
4863         struct mlx5_priv *priv = dev->data->dev_private;
4864         struct mlx5_flow_counter_pool *pool;
4865         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4866         bool fallback = priv->sh->cmng.counter_fallback;
4867         uint32_t size = sizeof(*pool);
4868
4869         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4870         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4871         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4872         if (!pool) {
4873                 rte_errno = ENOMEM;
4874                 return NULL;
4875         }
4876         pool->raw = NULL;
4877         pool->is_aged = !!age;
4878         pool->query_gen = 0;
4879         pool->min_dcs = dcs;
4880         rte_spinlock_init(&pool->sl);
4881         rte_spinlock_init(&pool->csl);
4882         TAILQ_INIT(&pool->counters[0]);
4883         TAILQ_INIT(&pool->counters[1]);
4884         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4885         rte_spinlock_lock(&cmng->pool_update_sl);
4886         pool->index = cmng->n_valid;
4887         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4888                 mlx5_free(pool);
4889                 rte_spinlock_unlock(&cmng->pool_update_sl);
4890                 return NULL;
4891         }
4892         cmng->pools[pool->index] = pool;
4893         cmng->n_valid++;
4894         if (unlikely(fallback)) {
4895                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4896
4897                 if (base < cmng->min_id)
4898                         cmng->min_id = base;
4899                 if (base > cmng->max_id)
4900                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4901                 cmng->last_pool_idx = pool->index;
4902         }
4903         rte_spinlock_unlock(&cmng->pool_update_sl);
4904         return pool;
4905 }
4906
4907 /**
4908  * Prepare a new counter and/or a new counter pool.
4909  *
4910  * @param[in] dev
4911  *   Pointer to the Ethernet device structure.
4912  * @param[out] cnt_free
4913  *   Where to put the pointer of a new counter.
4914  * @param[in] age
4915  *   Whether the pool is for counter that was allocated for aging.
4916  *
4917  * @return
4918  *   The counter pool pointer and @p cnt_free is set on success,
4919  *   NULL otherwise and rte_errno is set.
4920  */
4921 static struct mlx5_flow_counter_pool *
4922 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4923                              struct mlx5_flow_counter **cnt_free,
4924                              uint32_t age)
4925 {
4926         struct mlx5_priv *priv = dev->data->dev_private;
4927         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4928         struct mlx5_flow_counter_pool *pool;
4929         struct mlx5_counters tmp_tq;
4930         struct mlx5_devx_obj *dcs = NULL;
4931         struct mlx5_flow_counter *cnt;
4932         enum mlx5_counter_type cnt_type =
4933                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4934         bool fallback = priv->sh->cmng.counter_fallback;
4935         uint32_t i;
4936
4937         if (fallback) {
4938                 /* bulk_bitmap must be 0 for single counter allocation. */
4939                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4940                 if (!dcs)
4941                         return NULL;
4942                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4943                 if (!pool) {
4944                         pool = flow_dv_pool_create(dev, dcs, age);
4945                         if (!pool) {
4946                                 mlx5_devx_cmd_destroy(dcs);
4947                                 return NULL;
4948                         }
4949                 }
4950                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4951                 cnt = MLX5_POOL_GET_CNT(pool, i);
4952                 cnt->pool = pool;
4953                 cnt->dcs_when_free = dcs;
4954                 *cnt_free = cnt;
4955                 return pool;
4956         }
4957         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4958         if (!dcs) {
4959                 rte_errno = ENODATA;
4960                 return NULL;
4961         }
4962         pool = flow_dv_pool_create(dev, dcs, age);
4963         if (!pool) {
4964                 mlx5_devx_cmd_destroy(dcs);
4965                 return NULL;
4966         }
4967         TAILQ_INIT(&tmp_tq);
4968         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4969                 cnt = MLX5_POOL_GET_CNT(pool, i);
4970                 cnt->pool = pool;
4971                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4972         }
4973         rte_spinlock_lock(&cmng->csl[cnt_type]);
4974         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4975         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4976         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4977         (*cnt_free)->pool = pool;
4978         return pool;
4979 }
4980
4981 /**
4982  * Allocate a flow counter.
4983  *
4984  * @param[in] dev
4985  *   Pointer to the Ethernet device structure.
4986  * @param[in] age
4987  *   Whether the counter was allocated for aging.
4988  *
4989  * @return
4990  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4991  */
4992 static uint32_t
4993 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4994 {
4995         struct mlx5_priv *priv = dev->data->dev_private;
4996         struct mlx5_flow_counter_pool *pool = NULL;
4997         struct mlx5_flow_counter *cnt_free = NULL;
4998         bool fallback = priv->sh->cmng.counter_fallback;
4999         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5000         enum mlx5_counter_type cnt_type =
5001                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5002         uint32_t cnt_idx;
5003
5004         if (!priv->config.devx) {
5005                 rte_errno = ENOTSUP;
5006                 return 0;
5007         }
5008         /* Get free counters from container. */
5009         rte_spinlock_lock(&cmng->csl[cnt_type]);
5010         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5011         if (cnt_free)
5012                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5013         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5014         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5015                 goto err;
5016         pool = cnt_free->pool;
5017         if (fallback)
5018                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5019         /* Create a DV counter action only in the first time usage. */
5020         if (!cnt_free->action) {
5021                 uint16_t offset;
5022                 struct mlx5_devx_obj *dcs;
5023                 int ret;
5024
5025                 if (!fallback) {
5026                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5027                         dcs = pool->min_dcs;
5028                 } else {
5029                         offset = 0;
5030                         dcs = cnt_free->dcs_when_free;
5031                 }
5032                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5033                                                             &cnt_free->action);
5034                 if (ret) {
5035                         rte_errno = errno;
5036                         goto err;
5037                 }
5038         }
5039         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5040                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5041         /* Update the counter reset values. */
5042         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5043                                  &cnt_free->bytes))
5044                 goto err;
5045         if (!fallback && !priv->sh->cmng.query_thread_on)
5046                 /* Start the asynchronous batch query by the host thread. */
5047                 mlx5_set_query_alarm(priv->sh);
5048         return cnt_idx;
5049 err:
5050         if (cnt_free) {
5051                 cnt_free->pool = pool;
5052                 if (fallback)
5053                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5054                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5055                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5056                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5057         }
5058         return 0;
5059 }
5060
5061 /**
5062  * Allocate a shared flow counter.
5063  *
5064  * @param[in] ctx
5065  *   Pointer to the shared counter configuration.
5066  * @param[in] data
5067  *   Pointer to save the allocated counter index.
5068  *
5069  * @return
5070  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5071  */
5072
5073 static int32_t
5074 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5075 {
5076         struct mlx5_shared_counter_conf *conf = ctx;
5077         struct rte_eth_dev *dev = conf->dev;
5078         struct mlx5_flow_counter *cnt;
5079
5080         data->dword = flow_dv_counter_alloc(dev, 0);
5081         data->dword |= MLX5_CNT_SHARED_OFFSET;
5082         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5083         cnt->shared_info.id = conf->id;
5084         return 0;
5085 }
5086
5087 /**
5088  * Get a shared flow counter.
5089  *
5090  * @param[in] dev
5091  *   Pointer to the Ethernet device structure.
5092  * @param[in] id
5093  *   Counter identifier.
5094  *
5095  * @return
5096  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5097  */
5098 static uint32_t
5099 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5100 {
5101         struct mlx5_priv *priv = dev->data->dev_private;
5102         struct mlx5_shared_counter_conf conf = {
5103                 .dev = dev,
5104                 .id = id,
5105         };
5106         union mlx5_l3t_data data = {
5107                 .dword = 0,
5108         };
5109
5110         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5111                                flow_dv_counter_alloc_shared_cb, &conf);
5112         return data.dword;
5113 }
5114
5115 /**
5116  * Get age param from counter index.
5117  *
5118  * @param[in] dev
5119  *   Pointer to the Ethernet device structure.
5120  * @param[in] counter
5121  *   Index to the counter handler.
5122  *
5123  * @return
5124  *   The aging parameter specified for the counter index.
5125  */
5126 static struct mlx5_age_param*
5127 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5128                                 uint32_t counter)
5129 {
5130         struct mlx5_flow_counter *cnt;
5131         struct mlx5_flow_counter_pool *pool = NULL;
5132
5133         flow_dv_counter_get_by_idx(dev, counter, &pool);
5134         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5135         cnt = MLX5_POOL_GET_CNT(pool, counter);
5136         return MLX5_CNT_TO_AGE(cnt);
5137 }
5138
5139 /**
5140  * Remove a flow counter from aged counter list.
5141  *
5142  * @param[in] dev
5143  *   Pointer to the Ethernet device structure.
5144  * @param[in] counter
5145  *   Index to the counter handler.
5146  * @param[in] cnt
5147  *   Pointer to the counter handler.
5148  */
5149 static void
5150 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5151                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5152 {
5153         struct mlx5_age_info *age_info;
5154         struct mlx5_age_param *age_param;
5155         struct mlx5_priv *priv = dev->data->dev_private;
5156         uint16_t expected = AGE_CANDIDATE;
5157
5158         age_info = GET_PORT_AGE_INFO(priv);
5159         age_param = flow_dv_counter_idx_get_age(dev, counter);
5160         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5161                                          AGE_FREE, false, __ATOMIC_RELAXED,
5162                                          __ATOMIC_RELAXED)) {
5163                 /**
5164                  * We need the lock even it is age timeout,
5165                  * since counter may still in process.
5166                  */
5167                 rte_spinlock_lock(&age_info->aged_sl);
5168                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5169                 rte_spinlock_unlock(&age_info->aged_sl);
5170                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5171         }
5172 }
5173
5174 /**
5175  * Release a flow counter.
5176  *
5177  * @param[in] dev
5178  *   Pointer to the Ethernet device structure.
5179  * @param[in] counter
5180  *   Index to the counter handler.
5181  */
5182 static void
5183 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5184 {
5185         struct mlx5_priv *priv = dev->data->dev_private;
5186         struct mlx5_flow_counter_pool *pool = NULL;
5187         struct mlx5_flow_counter *cnt;
5188         enum mlx5_counter_type cnt_type;
5189
5190         if (!counter)
5191                 return;
5192         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5193         MLX5_ASSERT(pool);
5194         if (IS_SHARED_CNT(counter) &&
5195             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5196                 return;
5197         if (pool->is_aged)
5198                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5199         cnt->pool = pool;
5200         /*
5201          * Put the counter back to list to be updated in none fallback mode.
5202          * Currently, we are using two list alternately, while one is in query,
5203          * add the freed counter to the other list based on the pool query_gen
5204          * value. After query finishes, add counter the list to the global
5205          * container counter list. The list changes while query starts. In
5206          * this case, lock will not be needed as query callback and release
5207          * function both operate with the different list.
5208          *
5209          */
5210         if (!priv->sh->cmng.counter_fallback) {
5211                 rte_spinlock_lock(&pool->csl);
5212                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5213                 rte_spinlock_unlock(&pool->csl);
5214         } else {
5215                 cnt->dcs_when_free = cnt->dcs_when_active;
5216                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5217                                            MLX5_COUNTER_TYPE_ORIGIN;
5218                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5219                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5220                                   cnt, next);
5221                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5222         }
5223 }
5224
5225 /**
5226  * Verify the @p attributes will be correctly understood by the NIC and store
5227  * them in the @p flow if everything is correct.
5228  *
5229  * @param[in] dev
5230  *   Pointer to dev struct.
5231  * @param[in] attributes
5232  *   Pointer to flow attributes
5233  * @param[in] external
5234  *   This flow rule is created by request external to PMD.
5235  * @param[out] error
5236  *   Pointer to error structure.
5237  *
5238  * @return
5239  *   - 0 on success and non root table.
5240  *   - 1 on success and root table.
5241  *   - a negative errno value otherwise and rte_errno is set.
5242  */
5243 static int
5244 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5245                             const struct mlx5_flow_tunnel *tunnel,
5246                             const struct rte_flow_attr *attributes,
5247                             const struct flow_grp_info *grp_info,
5248                             struct rte_flow_error *error)
5249 {
5250         struct mlx5_priv *priv = dev->data->dev_private;
5251         uint32_t priority_max = priv->config.flow_prio - 1;
5252         int ret = 0;
5253
5254 #ifndef HAVE_MLX5DV_DR
5255         RTE_SET_USED(tunnel);
5256         RTE_SET_USED(grp_info);
5257         if (attributes->group)
5258                 return rte_flow_error_set(error, ENOTSUP,
5259                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5260                                           NULL,
5261                                           "groups are not supported");
5262 #else
5263         uint32_t table = 0;
5264
5265         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5266                                        grp_info, error);
5267         if (ret)
5268                 return ret;
5269         if (!table)
5270                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5271 #endif
5272         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5273             attributes->priority >= priority_max)
5274                 return rte_flow_error_set(error, ENOTSUP,
5275                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5276                                           NULL,
5277                                           "priority out of range");
5278         if (attributes->transfer) {
5279                 if (!priv->config.dv_esw_en)
5280                         return rte_flow_error_set
5281                                 (error, ENOTSUP,
5282                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5283                                  "E-Switch dr is not supported");
5284                 if (!(priv->representor || priv->master))
5285                         return rte_flow_error_set
5286                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5287                                  NULL, "E-Switch configuration can only be"
5288                                  " done by a master or a representor device");
5289                 if (attributes->egress)
5290                         return rte_flow_error_set
5291                                 (error, ENOTSUP,
5292                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5293                                  "egress is not supported");
5294         }
5295         if (!(attributes->egress ^ attributes->ingress))
5296                 return rte_flow_error_set(error, ENOTSUP,
5297                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5298                                           "must specify exactly one of "
5299                                           "ingress or egress");
5300         return ret;
5301 }
5302
5303 /**
5304  * Internal validation function. For validating both actions and items.
5305  *
5306  * @param[in] dev
5307  *   Pointer to the rte_eth_dev structure.
5308  * @param[in] attr
5309  *   Pointer to the flow attributes.
5310  * @param[in] items
5311  *   Pointer to the list of items.
5312  * @param[in] actions
5313  *   Pointer to the list of actions.
5314  * @param[in] external
5315  *   This flow rule is created by request external to PMD.
5316  * @param[in] hairpin
5317  *   Number of hairpin TX actions, 0 means classic flow.
5318  * @param[out] error
5319  *   Pointer to the error structure.
5320  *
5321  * @return
5322  *   0 on success, a negative errno value otherwise and rte_errno is set.
5323  */
5324 static int
5325 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5326                  const struct rte_flow_item items[],
5327                  const struct rte_flow_action actions[],
5328                  bool external, int hairpin, struct rte_flow_error *error)
5329 {
5330         int ret;
5331         uint64_t action_flags = 0;
5332         uint64_t item_flags = 0;
5333         uint64_t last_item = 0;
5334         uint8_t next_protocol = 0xff;
5335         uint16_t ether_type = 0;
5336         int actions_n = 0;
5337         uint8_t item_ipv6_proto = 0;
5338         const struct rte_flow_item *geneve_item = NULL;
5339         const struct rte_flow_item *gre_item = NULL;
5340         const struct rte_flow_item *gtp_item = NULL;
5341         const struct rte_flow_action_raw_decap *decap;
5342         const struct rte_flow_action_raw_encap *encap;
5343         const struct rte_flow_action_rss *rss = NULL;
5344         const struct rte_flow_action_rss *sample_rss = NULL;
5345         const struct rte_flow_item_tcp nic_tcp_mask = {
5346                 .hdr = {
5347                         .tcp_flags = 0xFF,
5348                         .src_port = RTE_BE16(UINT16_MAX),
5349                         .dst_port = RTE_BE16(UINT16_MAX),
5350                 }
5351         };
5352         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5353                 .hdr = {
5354                         .src_addr =
5355                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5356                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5357                         .dst_addr =
5358                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5359                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5360                         .vtc_flow = RTE_BE32(0xffffffff),
5361                         .proto = 0xff,
5362                         .hop_limits = 0xff,
5363                 },
5364                 .has_frag_ext = 1,
5365         };
5366         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5367                 .hdr = {
5368                         .common = {
5369                                 .u32 =
5370                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5371                                         .type = 0xFF,
5372                                         }).u32),
5373                         },
5374                         .dummy[0] = 0xffffffff,
5375                 },
5376         };
5377         struct mlx5_priv *priv = dev->data->dev_private;
5378         struct mlx5_dev_config *dev_conf = &priv->config;
5379         uint16_t queue_index = 0xFFFF;
5380         const struct rte_flow_item_vlan *vlan_m = NULL;
5381         int16_t rw_act_num = 0;
5382         uint64_t is_root;
5383         const struct mlx5_flow_tunnel *tunnel;
5384         struct flow_grp_info grp_info = {
5385                 .external = !!external,
5386                 .transfer = !!attr->transfer,
5387                 .fdb_def_rule = !!priv->fdb_def_rule,
5388         };
5389         const struct rte_eth_hairpin_conf *conf;
5390
5391         if (items == NULL)
5392                 return -1;
5393         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5394                 tunnel = flow_items_to_tunnel(items);
5395                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5396                                 MLX5_FLOW_ACTION_DECAP;
5397         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5398                 tunnel = flow_actions_to_tunnel(actions);
5399                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5400         } else {
5401                 tunnel = NULL;
5402         }
5403         if (tunnel && priv->representor)
5404                 return rte_flow_error_set(error, ENOTSUP,
5405                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5406                                           "decap not supported "
5407                                           "for VF representor");
5408         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5409                                 (dev, tunnel, attr, items, actions);
5410         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
5411         if (ret < 0)
5412                 return ret;
5413         is_root = (uint64_t)ret;
5414         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5415                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5416                 int type = items->type;
5417
5418                 if (!mlx5_flow_os_item_supported(type))
5419                         return rte_flow_error_set(error, ENOTSUP,
5420                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5421                                                   NULL, "item not supported");
5422                 switch (type) {
5423                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5424                         if (items[0].type != (typeof(items[0].type))
5425                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5426                                 return rte_flow_error_set
5427                                                 (error, EINVAL,
5428                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5429                                                 NULL, "MLX5 private items "
5430                                                 "must be the first");
5431                         break;
5432                 case RTE_FLOW_ITEM_TYPE_VOID:
5433                         break;
5434                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5435                         ret = flow_dv_validate_item_port_id
5436                                         (dev, items, attr, item_flags, error);
5437                         if (ret < 0)
5438                                 return ret;
5439                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5440                         break;
5441                 case RTE_FLOW_ITEM_TYPE_ETH:
5442                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5443                                                           true, error);
5444                         if (ret < 0)
5445                                 return ret;
5446                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5447                                              MLX5_FLOW_LAYER_OUTER_L2;
5448                         if (items->mask != NULL && items->spec != NULL) {
5449                                 ether_type =
5450                                         ((const struct rte_flow_item_eth *)
5451                                          items->spec)->type;
5452                                 ether_type &=
5453                                         ((const struct rte_flow_item_eth *)
5454                                          items->mask)->type;
5455                                 ether_type = rte_be_to_cpu_16(ether_type);
5456                         } else {
5457                                 ether_type = 0;
5458                         }
5459                         break;
5460                 case RTE_FLOW_ITEM_TYPE_VLAN:
5461                         ret = flow_dv_validate_item_vlan(items, item_flags,
5462                                                          dev, error);
5463                         if (ret < 0)
5464                                 return ret;
5465                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5466                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5467                         if (items->mask != NULL && items->spec != NULL) {
5468                                 ether_type =
5469                                         ((const struct rte_flow_item_vlan *)
5470                                          items->spec)->inner_type;
5471                                 ether_type &=
5472                                         ((const struct rte_flow_item_vlan *)
5473                                          items->mask)->inner_type;
5474                                 ether_type = rte_be_to_cpu_16(ether_type);
5475                         } else {
5476                                 ether_type = 0;
5477                         }
5478                         /* Store outer VLAN mask for of_push_vlan action. */
5479                         if (!tunnel)
5480                                 vlan_m = items->mask;
5481                         break;
5482                 case RTE_FLOW_ITEM_TYPE_IPV4:
5483                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5484                                                   &item_flags, &tunnel);
5485                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5486                                                          last_item, ether_type,
5487                                                          error);
5488                         if (ret < 0)
5489                                 return ret;
5490                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5491                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5492                         if (items->mask != NULL &&
5493                             ((const struct rte_flow_item_ipv4 *)
5494                              items->mask)->hdr.next_proto_id) {
5495                                 next_protocol =
5496                                         ((const struct rte_flow_item_ipv4 *)
5497                                          (items->spec))->hdr.next_proto_id;
5498                                 next_protocol &=
5499                                         ((const struct rte_flow_item_ipv4 *)
5500                                          (items->mask))->hdr.next_proto_id;
5501                         } else {
5502                                 /* Reset for inner layer. */
5503                                 next_protocol = 0xff;
5504                         }
5505                         break;
5506                 case RTE_FLOW_ITEM_TYPE_IPV6:
5507                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5508                                                   &item_flags, &tunnel);
5509                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5510                                                            last_item,
5511                                                            ether_type,
5512                                                            &nic_ipv6_mask,
5513                                                            error);
5514                         if (ret < 0)
5515                                 return ret;
5516                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5517                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5518                         if (items->mask != NULL &&
5519                             ((const struct rte_flow_item_ipv6 *)
5520                              items->mask)->hdr.proto) {
5521                                 item_ipv6_proto =
5522                                         ((const struct rte_flow_item_ipv6 *)
5523                                          items->spec)->hdr.proto;
5524                                 next_protocol =
5525                                         ((const struct rte_flow_item_ipv6 *)
5526                                          items->spec)->hdr.proto;
5527                                 next_protocol &=
5528                                         ((const struct rte_flow_item_ipv6 *)
5529                                          items->mask)->hdr.proto;
5530                         } else {
5531                                 /* Reset for inner layer. */
5532                                 next_protocol = 0xff;
5533                         }
5534                         break;
5535                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5536                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5537                                                                   item_flags,
5538                                                                   error);
5539                         if (ret < 0)
5540                                 return ret;
5541                         last_item = tunnel ?
5542                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5543                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5544                         if (items->mask != NULL &&
5545                             ((const struct rte_flow_item_ipv6_frag_ext *)
5546                              items->mask)->hdr.next_header) {
5547                                 next_protocol =
5548                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5549                                  items->spec)->hdr.next_header;
5550                                 next_protocol &=
5551                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5552                                  items->mask)->hdr.next_header;
5553                         } else {
5554                                 /* Reset for inner layer. */
5555                                 next_protocol = 0xff;
5556                         }
5557                         break;
5558                 case RTE_FLOW_ITEM_TYPE_TCP:
5559                         ret = mlx5_flow_validate_item_tcp
5560                                                 (items, item_flags,
5561                                                  next_protocol,
5562                                                  &nic_tcp_mask,
5563                                                  error);
5564                         if (ret < 0)
5565                                 return ret;
5566                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5567                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5568                         break;
5569                 case RTE_FLOW_ITEM_TYPE_UDP:
5570                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5571                                                           next_protocol,
5572                                                           error);
5573                         if (ret < 0)
5574                                 return ret;
5575                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5576                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5577                         break;
5578                 case RTE_FLOW_ITEM_TYPE_GRE:
5579                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5580                                                           next_protocol, error);
5581                         if (ret < 0)
5582                                 return ret;
5583                         gre_item = items;
5584                         last_item = MLX5_FLOW_LAYER_GRE;
5585                         break;
5586                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5587                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5588                                                             next_protocol,
5589                                                             error);
5590                         if (ret < 0)
5591                                 return ret;
5592                         last_item = MLX5_FLOW_LAYER_NVGRE;
5593                         break;
5594                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5595                         ret = mlx5_flow_validate_item_gre_key
5596                                 (items, item_flags, gre_item, error);
5597                         if (ret < 0)
5598                                 return ret;
5599                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5600                         break;
5601                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5602                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5603                                                             error);
5604                         if (ret < 0)
5605                                 return ret;
5606                         last_item = MLX5_FLOW_LAYER_VXLAN;
5607                         break;
5608                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5609                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5610                                                                 item_flags, dev,
5611                                                                 error);
5612                         if (ret < 0)
5613                                 return ret;
5614                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5615                         break;
5616                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5617                         ret = mlx5_flow_validate_item_geneve(items,
5618                                                              item_flags, dev,
5619                                                              error);
5620                         if (ret < 0)
5621                                 return ret;
5622                         geneve_item = items;
5623                         last_item = MLX5_FLOW_LAYER_GENEVE;
5624                         break;
5625                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
5626                         ret = mlx5_flow_validate_item_geneve_opt(items,
5627                                                                  last_item,
5628                                                                  geneve_item,
5629                                                                  dev,
5630                                                                  error);
5631                         if (ret < 0)
5632                                 return ret;
5633                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
5634                         break;
5635                 case RTE_FLOW_ITEM_TYPE_MPLS:
5636                         ret = mlx5_flow_validate_item_mpls(dev, items,
5637                                                            item_flags,
5638                                                            last_item, error);
5639                         if (ret < 0)
5640                                 return ret;
5641                         last_item = MLX5_FLOW_LAYER_MPLS;
5642                         break;
5643
5644                 case RTE_FLOW_ITEM_TYPE_MARK:
5645                         ret = flow_dv_validate_item_mark(dev, items, attr,
5646                                                          error);
5647                         if (ret < 0)
5648                                 return ret;
5649                         last_item = MLX5_FLOW_ITEM_MARK;
5650                         break;
5651                 case RTE_FLOW_ITEM_TYPE_META:
5652                         ret = flow_dv_validate_item_meta(dev, items, attr,
5653                                                          error);
5654                         if (ret < 0)
5655                                 return ret;
5656                         last_item = MLX5_FLOW_ITEM_METADATA;
5657                         break;
5658                 case RTE_FLOW_ITEM_TYPE_ICMP:
5659                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5660                                                            next_protocol,
5661                                                            error);
5662                         if (ret < 0)
5663                                 return ret;
5664                         last_item = MLX5_FLOW_LAYER_ICMP;
5665                         break;
5666                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5667                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5668                                                             next_protocol,
5669                                                             error);
5670                         if (ret < 0)
5671                                 return ret;
5672                         item_ipv6_proto = IPPROTO_ICMPV6;
5673                         last_item = MLX5_FLOW_LAYER_ICMP6;
5674                         break;
5675                 case RTE_FLOW_ITEM_TYPE_TAG:
5676                         ret = flow_dv_validate_item_tag(dev, items,
5677                                                         attr, error);
5678                         if (ret < 0)
5679                                 return ret;
5680                         last_item = MLX5_FLOW_ITEM_TAG;
5681                         break;
5682                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5683                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5684                         break;
5685                 case RTE_FLOW_ITEM_TYPE_GTP:
5686                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5687                                                         error);
5688                         if (ret < 0)
5689                                 return ret;
5690                         gtp_item = items;
5691                         last_item = MLX5_FLOW_LAYER_GTP;
5692                         break;
5693                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
5694                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
5695                                                             gtp_item, attr,
5696                                                             error);
5697                         if (ret < 0)
5698                                 return ret;
5699                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
5700                         break;
5701                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5702                         /* Capacity will be checked in the translate stage. */
5703                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5704                                                             last_item,
5705                                                             ether_type,
5706                                                             &nic_ecpri_mask,
5707                                                             error);
5708                         if (ret < 0)
5709                                 return ret;
5710                         last_item = MLX5_FLOW_LAYER_ECPRI;
5711                         break;
5712                 default:
5713                         return rte_flow_error_set(error, ENOTSUP,
5714                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5715                                                   NULL, "item not supported");
5716                 }
5717                 item_flags |= last_item;
5718         }
5719         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5720                 int type = actions->type;
5721
5722                 if (!mlx5_flow_os_action_supported(type))
5723                         return rte_flow_error_set(error, ENOTSUP,
5724                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5725                                                   actions,
5726                                                   "action not supported");
5727                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5728                         return rte_flow_error_set(error, ENOTSUP,
5729                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5730                                                   actions, "too many actions");
5731                 switch (type) {
5732                 case RTE_FLOW_ACTION_TYPE_VOID:
5733                         break;
5734                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5735                         ret = flow_dv_validate_action_port_id(dev,
5736                                                               action_flags,
5737                                                               actions,
5738                                                               attr,
5739                                                               error);
5740                         if (ret)
5741                                 return ret;
5742                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5743                         ++actions_n;
5744                         break;
5745                 case RTE_FLOW_ACTION_TYPE_FLAG:
5746                         ret = flow_dv_validate_action_flag(dev, action_flags,
5747                                                            attr, error);
5748                         if (ret < 0)
5749                                 return ret;
5750                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5751                                 /* Count all modify-header actions as one. */
5752                                 if (!(action_flags &
5753                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5754                                         ++actions_n;
5755                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5756                                                 MLX5_FLOW_ACTION_MARK_EXT;
5757                         } else {
5758                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5759                                 ++actions_n;
5760                         }
5761                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5762                         break;
5763                 case RTE_FLOW_ACTION_TYPE_MARK:
5764                         ret = flow_dv_validate_action_mark(dev, actions,
5765                                                            action_flags,
5766                                                            attr, error);
5767                         if (ret < 0)
5768                                 return ret;
5769                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5770                                 /* Count all modify-header actions as one. */
5771                                 if (!(action_flags &
5772                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5773                                         ++actions_n;
5774                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5775                                                 MLX5_FLOW_ACTION_MARK_EXT;
5776                         } else {
5777                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5778                                 ++actions_n;
5779                         }
5780                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5781                         break;
5782                 case RTE_FLOW_ACTION_TYPE_SET_META:
5783                         ret = flow_dv_validate_action_set_meta(dev, actions,
5784                                                                action_flags,
5785                                                                attr, error);
5786                         if (ret < 0)
5787                                 return ret;
5788                         /* Count all modify-header actions as one action. */
5789                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5790                                 ++actions_n;
5791                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5792                         rw_act_num += MLX5_ACT_NUM_SET_META;
5793                         break;
5794                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5795                         ret = flow_dv_validate_action_set_tag(dev, actions,
5796                                                               action_flags,
5797                                                               attr, error);
5798                         if (ret < 0)
5799                                 return ret;
5800                         /* Count all modify-header actions as one action. */
5801                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5802                                 ++actions_n;
5803                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5804                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5805                         break;
5806                 case RTE_FLOW_ACTION_TYPE_DROP:
5807                         ret = mlx5_flow_validate_action_drop(action_flags,
5808                                                              attr, error);
5809                         if (ret < 0)
5810                                 return ret;
5811                         action_flags |= MLX5_FLOW_ACTION_DROP;
5812                         ++actions_n;
5813                         break;
5814                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5815                         ret = mlx5_flow_validate_action_queue(actions,
5816                                                               action_flags, dev,
5817                                                               attr, error);
5818                         if (ret < 0)
5819                                 return ret;
5820                         queue_index = ((const struct rte_flow_action_queue *)
5821                                                         (actions->conf))->index;
5822                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5823                         ++actions_n;
5824                         break;
5825                 case RTE_FLOW_ACTION_TYPE_RSS:
5826                         rss = actions->conf;
5827                         ret = mlx5_flow_validate_action_rss(actions,
5828                                                             action_flags, dev,
5829                                                             attr, item_flags,
5830                                                             error);
5831                         if (ret < 0)
5832                                 return ret;
5833                         if (rss && sample_rss &&
5834                             (sample_rss->level != rss->level ||
5835                             sample_rss->types != rss->types))
5836                                 return rte_flow_error_set(error, ENOTSUP,
5837                                         RTE_FLOW_ERROR_TYPE_ACTION,
5838                                         NULL,
5839                                         "Can't use the different RSS types "
5840                                         "or level in the same flow");
5841                         if (rss != NULL && rss->queue_num)
5842                                 queue_index = rss->queue[0];
5843                         action_flags |= MLX5_FLOW_ACTION_RSS;
5844                         ++actions_n;
5845                         break;
5846                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5847                         ret =
5848                         mlx5_flow_validate_action_default_miss(action_flags,
5849                                         attr, error);
5850                         if (ret < 0)
5851                                 return ret;
5852                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5853                         ++actions_n;
5854                         break;
5855                 case RTE_FLOW_ACTION_TYPE_COUNT:
5856                         ret = flow_dv_validate_action_count(dev, error);
5857                         if (ret < 0)
5858                                 return ret;
5859                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5860                         ++actions_n;
5861                         break;
5862                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5863                         if (flow_dv_validate_action_pop_vlan(dev,
5864                                                              action_flags,
5865                                                              actions,
5866                                                              item_flags, attr,
5867                                                              error))
5868                                 return -rte_errno;
5869                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5870                         ++actions_n;
5871                         break;
5872                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5873                         ret = flow_dv_validate_action_push_vlan(dev,
5874                                                                 action_flags,
5875                                                                 vlan_m,
5876                                                                 actions, attr,
5877                                                                 error);
5878                         if (ret < 0)
5879                                 return ret;
5880                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5881                         ++actions_n;
5882                         break;
5883                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5884                         ret = flow_dv_validate_action_set_vlan_pcp
5885                                                 (action_flags, actions, error);
5886                         if (ret < 0)
5887                                 return ret;
5888                         /* Count PCP with push_vlan command. */
5889                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5890                         break;
5891                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5892                         ret = flow_dv_validate_action_set_vlan_vid
5893                                                 (item_flags, action_flags,
5894                                                  actions, error);
5895                         if (ret < 0)
5896                                 return ret;
5897                         /* Count VID with push_vlan command. */
5898                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5899                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5900                         break;
5901                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5902                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5903                         ret = flow_dv_validate_action_l2_encap(dev,
5904                                                                action_flags,
5905                                                                actions, attr,
5906                                                                error);
5907                         if (ret < 0)
5908                                 return ret;
5909                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5910                         ++actions_n;
5911                         break;
5912                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5913                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5914                         ret = flow_dv_validate_action_decap(dev, action_flags,
5915                                                             actions, item_flags,
5916                                                             attr, error);
5917                         if (ret < 0)
5918                                 return ret;
5919                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5920                         ++actions_n;
5921                         break;
5922                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5923                         ret = flow_dv_validate_action_raw_encap_decap
5924                                 (dev, NULL, actions->conf, attr, &action_flags,
5925                                  &actions_n, actions, item_flags, error);
5926                         if (ret < 0)
5927                                 return ret;
5928                         break;
5929                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5930                         decap = actions->conf;
5931                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5932                                 ;
5933                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5934                                 encap = NULL;
5935                                 actions--;
5936                         } else {
5937                                 encap = actions->conf;
5938                         }
5939                         ret = flow_dv_validate_action_raw_encap_decap
5940                                            (dev,
5941                                             decap ? decap : &empty_decap, encap,
5942                                             attr, &action_flags, &actions_n,
5943                                             actions, item_flags, error);
5944                         if (ret < 0)
5945                                 return ret;
5946                         break;
5947                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5948                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5949                         ret = flow_dv_validate_action_modify_mac(action_flags,
5950                                                                  actions,
5951                                                                  item_flags,
5952                                                                  error);
5953                         if (ret < 0)
5954                                 return ret;
5955                         /* Count all modify-header actions as one action. */
5956                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5957                                 ++actions_n;
5958                         action_flags |= actions->type ==
5959                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5960                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5961                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5962                         /*
5963                          * Even if the source and destination MAC addresses have
5964                          * overlap in the header with 4B alignment, the convert
5965                          * function will handle them separately and 4 SW actions
5966                          * will be created. And 2 actions will be added each
5967                          * time no matter how many bytes of address will be set.
5968                          */
5969                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5970                         break;
5971                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5972                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5973                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5974                                                                   actions,
5975                                                                   item_flags,
5976                                                                   error);
5977                         if (ret < 0)
5978                                 return ret;
5979                         /* Count all modify-header actions as one action. */
5980                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5981                                 ++actions_n;
5982                         action_flags |= actions->type ==
5983                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5984                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5985                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5986                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5987                         break;
5988                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5989                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5990                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5991                                                                   actions,
5992                                                                   item_flags,
5993                                                                   error);
5994                         if (ret < 0)
5995                                 return ret;
5996                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5997                                 return rte_flow_error_set(error, ENOTSUP,
5998                                         RTE_FLOW_ERROR_TYPE_ACTION,
5999                                         actions,
6000                                         "Can't change header "
6001                                         "with ICMPv6 proto");
6002                         /* Count all modify-header actions as one action. */
6003                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6004                                 ++actions_n;
6005                         action_flags |= actions->type ==
6006                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
6007                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
6008                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
6009                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
6010                         break;
6011                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
6012                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
6013                         ret = flow_dv_validate_action_modify_tp(action_flags,
6014                                                                 actions,
6015                                                                 item_flags,
6016                                                                 error);
6017                         if (ret < 0)
6018                                 return ret;
6019                         /* Count all modify-header actions as one action. */
6020                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6021                                 ++actions_n;
6022                         action_flags |= actions->type ==
6023                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
6024                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
6025                                                 MLX5_FLOW_ACTION_SET_TP_DST;
6026                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
6027                         break;
6028                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
6029                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
6030                         ret = flow_dv_validate_action_modify_ttl(action_flags,
6031                                                                  actions,
6032                                                                  item_flags,
6033                                                                  error);
6034                         if (ret < 0)
6035                                 return ret;
6036                         /* Count all modify-header actions as one action. */
6037                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6038                                 ++actions_n;
6039                         action_flags |= actions->type ==
6040                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
6041                                                 MLX5_FLOW_ACTION_SET_TTL :
6042                                                 MLX5_FLOW_ACTION_DEC_TTL;
6043                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
6044                         break;
6045                 case RTE_FLOW_ACTION_TYPE_JUMP:
6046                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
6047                                                            action_flags,
6048                                                            attr, external,
6049                                                            error);
6050                         if (ret)
6051                                 return ret;
6052                         ++actions_n;
6053                         action_flags |= MLX5_FLOW_ACTION_JUMP;
6054                         break;
6055                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
6056                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
6057                         ret = flow_dv_validate_action_modify_tcp_seq
6058                                                                 (action_flags,
6059                                                                  actions,
6060                                                                  item_flags,
6061                                                                  error);
6062                         if (ret < 0)
6063                                 return ret;
6064                         /* Count all modify-header actions as one action. */
6065                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6066                                 ++actions_n;
6067                         action_flags |= actions->type ==
6068                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
6069                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
6070                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
6071                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
6072                         break;
6073                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
6074                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
6075                         ret = flow_dv_validate_action_modify_tcp_ack
6076                                                                 (action_flags,
6077                                                                  actions,
6078                                                                  item_flags,
6079                                                                  error);
6080                         if (ret < 0)
6081                                 return ret;
6082                         /* Count all modify-header actions as one action. */
6083                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6084                                 ++actions_n;
6085                         action_flags |= actions->type ==
6086                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
6087                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
6088                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
6089                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
6090                         break;
6091                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
6092                         break;
6093                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
6094                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
6095                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6096                         break;
6097                 case RTE_FLOW_ACTION_TYPE_METER:
6098                         ret = mlx5_flow_validate_action_meter(dev,
6099                                                               action_flags,
6100                                                               actions, attr,
6101                                                               error);
6102                         if (ret < 0)
6103                                 return ret;
6104                         action_flags |= MLX5_FLOW_ACTION_METER;
6105                         ++actions_n;
6106                         /* Meter action will add one more TAG action. */
6107                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6108                         break;
6109                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
6110                         if (!attr->transfer && !attr->group)
6111                                 return rte_flow_error_set(error, ENOTSUP,
6112                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6113                                                                            NULL,
6114                           "Shared ASO age action is not supported for group 0");
6115                         action_flags |= MLX5_FLOW_ACTION_AGE;
6116                         ++actions_n;
6117                         break;
6118                 case RTE_FLOW_ACTION_TYPE_AGE:
6119                         ret = flow_dv_validate_action_age(action_flags,
6120                                                           actions, dev,
6121                                                           error);
6122                         if (ret < 0)
6123                                 return ret;
6124                         action_flags |= MLX5_FLOW_ACTION_AGE;
6125                         ++actions_n;
6126                         break;
6127                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
6128                         ret = flow_dv_validate_action_modify_ipv4_dscp
6129                                                          (action_flags,
6130                                                           actions,
6131                                                           item_flags,
6132                                                           error);
6133                         if (ret < 0)
6134                                 return ret;
6135                         /* Count all modify-header actions as one action. */
6136                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6137                                 ++actions_n;
6138                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
6139                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6140                         break;
6141                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
6142                         ret = flow_dv_validate_action_modify_ipv6_dscp
6143                                                                 (action_flags,
6144                                                                  actions,
6145                                                                  item_flags,
6146                                                                  error);
6147                         if (ret < 0)
6148                                 return ret;
6149                         /* Count all modify-header actions as one action. */
6150                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6151                                 ++actions_n;
6152                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6153                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6154                         break;
6155                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6156                         ret = flow_dv_validate_action_sample(action_flags,
6157                                                              actions, dev,
6158                                                              attr, item_flags,
6159                                                              rss, &sample_rss,
6160                                                              error);
6161                         if (ret < 0)
6162                                 return ret;
6163                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6164                         ++actions_n;
6165                         break;
6166                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6167                         if (actions[0].type != (typeof(actions[0].type))
6168                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6169                                 return rte_flow_error_set
6170                                                 (error, EINVAL,
6171                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6172                                                 NULL, "MLX5 private action "
6173                                                 "must be the first");
6174
6175                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6176                         break;
6177                 default:
6178                         return rte_flow_error_set(error, ENOTSUP,
6179                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6180                                                   actions,
6181                                                   "action not supported");
6182                 }
6183         }
6184         /*
6185          * Validate actions in flow rules
6186          * - Explicit decap action is prohibited by the tunnel offload API.
6187          * - Drop action in tunnel steer rule is prohibited by the API.
6188          * - Application cannot use MARK action because it's value can mask
6189          *   tunnel default miss nitification.
6190          * - JUMP in tunnel match rule has no support in current PMD
6191          *   implementation.
6192          * - TAG & META are reserved for future uses.
6193          */
6194         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6195                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6196                                             MLX5_FLOW_ACTION_MARK     |
6197                                             MLX5_FLOW_ACTION_SET_TAG  |
6198                                             MLX5_FLOW_ACTION_SET_META |
6199                                             MLX5_FLOW_ACTION_DROP;
6200
6201                 if (action_flags & bad_actions_mask)
6202                         return rte_flow_error_set
6203                                         (error, EINVAL,
6204                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6205                                         "Invalid RTE action in tunnel "
6206                                         "set decap rule");
6207                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6208                         return rte_flow_error_set
6209                                         (error, EINVAL,
6210                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6211                                         "tunnel set decap rule must terminate "
6212                                         "with JUMP");
6213                 if (!attr->ingress)
6214                         return rte_flow_error_set
6215                                         (error, EINVAL,
6216                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6217                                         "tunnel flows for ingress traffic only");
6218         }
6219         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6220                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6221                                             MLX5_FLOW_ACTION_MARK    |
6222                                             MLX5_FLOW_ACTION_SET_TAG |
6223                                             MLX5_FLOW_ACTION_SET_META;
6224
6225                 if (action_flags & bad_actions_mask)
6226                         return rte_flow_error_set
6227                                         (error, EINVAL,
6228                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6229                                         "Invalid RTE action in tunnel "
6230                                         "set match rule");
6231         }
6232         /*
6233          * Validate the drop action mutual exclusion with other actions.
6234          * Drop action is mutually-exclusive with any other action, except for
6235          * Count action.
6236          */
6237         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6238             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6239                 return rte_flow_error_set(error, EINVAL,
6240                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6241                                           "Drop action is mutually-exclusive "
6242                                           "with any other action, except for "
6243                                           "Count action");
6244         /* Eswitch has few restrictions on using items and actions */
6245         if (attr->transfer) {
6246                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6247                     action_flags & MLX5_FLOW_ACTION_FLAG)
6248                         return rte_flow_error_set(error, ENOTSUP,
6249                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6250                                                   NULL,
6251                                                   "unsupported action FLAG");
6252                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6253                     action_flags & MLX5_FLOW_ACTION_MARK)
6254                         return rte_flow_error_set(error, ENOTSUP,
6255                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6256                                                   NULL,
6257                                                   "unsupported action MARK");
6258                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6259                         return rte_flow_error_set(error, ENOTSUP,
6260                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6261                                                   NULL,
6262                                                   "unsupported action QUEUE");
6263                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6264                         return rte_flow_error_set(error, ENOTSUP,
6265                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6266                                                   NULL,
6267                                                   "unsupported action RSS");
6268                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6269                         return rte_flow_error_set(error, EINVAL,
6270                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6271                                                   actions,
6272                                                   "no fate action is found");
6273         } else {
6274                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6275                         return rte_flow_error_set(error, EINVAL,
6276                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6277                                                   actions,
6278                                                   "no fate action is found");
6279         }
6280         /*
6281          * Continue validation for Xcap and VLAN actions.
6282          * If hairpin is working in explicit TX rule mode, there is no actions
6283          * splitting and the validation of hairpin ingress flow should be the
6284          * same as other standard flows.
6285          */
6286         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6287                              MLX5_FLOW_VLAN_ACTIONS)) &&
6288             (queue_index == 0xFFFF ||
6289              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6290              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6291              conf->tx_explicit != 0))) {
6292                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6293                     MLX5_FLOW_XCAP_ACTIONS)
6294                         return rte_flow_error_set(error, ENOTSUP,
6295                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6296                                                   NULL, "encap and decap "
6297                                                   "combination aren't supported");
6298                 if (!attr->transfer && attr->ingress) {
6299                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6300                                 return rte_flow_error_set
6301                                                 (error, ENOTSUP,
6302                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6303                                                  NULL, "encap is not supported"
6304                                                  " for ingress traffic");
6305                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6306                                 return rte_flow_error_set
6307                                                 (error, ENOTSUP,
6308                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6309                                                  NULL, "push VLAN action not "
6310                                                  "supported for ingress");
6311                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6312                                         MLX5_FLOW_VLAN_ACTIONS)
6313                                 return rte_flow_error_set
6314                                                 (error, ENOTSUP,
6315                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6316                                                  NULL, "no support for "
6317                                                  "multiple VLAN actions");
6318                 }
6319         }
6320         /*
6321          * Hairpin flow will add one more TAG action in TX implicit mode.
6322          * In TX explicit mode, there will be no hairpin flow ID.
6323          */
6324         if (hairpin > 0)
6325                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6326         /* extra metadata enabled: one more TAG action will be add. */
6327         if (dev_conf->dv_flow_en &&
6328             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6329             mlx5_flow_ext_mreg_supported(dev))
6330                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6331         if ((uint32_t)rw_act_num >
6332                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6333                 return rte_flow_error_set(error, ENOTSUP,
6334                                           RTE_FLOW_ERROR_TYPE_ACTION,
6335                                           NULL, "too many header modify"
6336                                           " actions to support");
6337         }
6338         return 0;
6339 }
6340
6341 /**
6342  * Internal preparation function. Allocates the DV flow size,
6343  * this size is constant.
6344  *
6345  * @param[in] dev
6346  *   Pointer to the rte_eth_dev structure.
6347  * @param[in] attr
6348  *   Pointer to the flow attributes.
6349  * @param[in] items
6350  *   Pointer to the list of items.
6351  * @param[in] actions
6352  *   Pointer to the list of actions.
6353  * @param[out] error
6354  *   Pointer to the error structure.
6355  *
6356  * @return
6357  *   Pointer to mlx5_flow object on success,
6358  *   otherwise NULL and rte_errno is set.
6359  */
6360 static struct mlx5_flow *
6361 flow_dv_prepare(struct rte_eth_dev *dev,
6362                 const struct rte_flow_attr *attr __rte_unused,
6363                 const struct rte_flow_item items[] __rte_unused,
6364                 const struct rte_flow_action actions[] __rte_unused,
6365                 struct rte_flow_error *error)
6366 {
6367         uint32_t handle_idx = 0;
6368         struct mlx5_flow *dev_flow;
6369         struct mlx5_flow_handle *dev_handle;
6370         struct mlx5_priv *priv = dev->data->dev_private;
6371         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6372
6373         MLX5_ASSERT(wks);
6374         /* In case of corrupting the memory. */
6375         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6376                 rte_flow_error_set(error, ENOSPC,
6377                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6378                                    "not free temporary device flow");
6379                 return NULL;
6380         }
6381         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6382                                    &handle_idx);
6383         if (!dev_handle) {
6384                 rte_flow_error_set(error, ENOMEM,
6385                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6386                                    "not enough memory to create flow handle");
6387                 return NULL;
6388         }
6389         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
6390         dev_flow = &wks->flows[wks->flow_idx++];
6391         memset(dev_flow, 0, sizeof(*dev_flow));
6392         dev_flow->handle = dev_handle;
6393         dev_flow->handle_idx = handle_idx;
6394         /*
6395          * In some old rdma-core releases, before continuing, a check of the
6396          * length of matching parameter will be done at first. It needs to use
6397          * the length without misc4 param. If the flow has misc4 support, then
6398          * the length needs to be adjusted accordingly. Each param member is
6399          * aligned with a 64B boundary naturally.
6400          */
6401         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6402                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6403         dev_flow->ingress = attr->ingress;
6404         dev_flow->dv.transfer = attr->transfer;
6405         return dev_flow;
6406 }
6407
6408 #ifdef RTE_LIBRTE_MLX5_DEBUG
6409 /**
6410  * Sanity check for match mask and value. Similar to check_valid_spec() in
6411  * kernel driver. If unmasked bit is present in value, it returns failure.
6412  *
6413  * @param match_mask
6414  *   pointer to match mask buffer.
6415  * @param match_value
6416  *   pointer to match value buffer.
6417  *
6418  * @return
6419  *   0 if valid, -EINVAL otherwise.
6420  */
6421 static int
6422 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6423 {
6424         uint8_t *m = match_mask;
6425         uint8_t *v = match_value;
6426         unsigned int i;
6427
6428         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6429                 if (v[i] & ~m[i]) {
6430                         DRV_LOG(ERR,
6431                                 "match_value differs from match_criteria"
6432                                 " %p[%u] != %p[%u]",
6433                                 match_value, i, match_mask, i);
6434                         return -EINVAL;
6435                 }
6436         }
6437         return 0;
6438 }
6439 #endif
6440
6441 /**
6442  * Add match of ip_version.
6443  *
6444  * @param[in] group
6445  *   Flow group.
6446  * @param[in] headers_v
6447  *   Values header pointer.
6448  * @param[in] headers_m
6449  *   Masks header pointer.
6450  * @param[in] ip_version
6451  *   The IP version to set.
6452  */
6453 static inline void
6454 flow_dv_set_match_ip_version(uint32_t group,
6455                              void *headers_v,
6456                              void *headers_m,
6457                              uint8_t ip_version)
6458 {
6459         if (group == 0)
6460                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6461         else
6462                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6463                          ip_version);
6464         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6465         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6466         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6467 }
6468
6469 /**
6470  * Add Ethernet item to matcher and to the value.
6471  *
6472  * @param[in, out] matcher
6473  *   Flow matcher.
6474  * @param[in, out] key
6475  *   Flow matcher value.
6476  * @param[in] item
6477  *   Flow pattern to translate.
6478  * @param[in] inner
6479  *   Item is inner pattern.
6480  */
6481 static void
6482 flow_dv_translate_item_eth(void *matcher, void *key,
6483                            const struct rte_flow_item *item, int inner,
6484                            uint32_t group)
6485 {
6486         const struct rte_flow_item_eth *eth_m = item->mask;
6487         const struct rte_flow_item_eth *eth_v = item->spec;
6488         const struct rte_flow_item_eth nic_mask = {
6489                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6490                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6491                 .type = RTE_BE16(0xffff),
6492                 .has_vlan = 0,
6493         };
6494         void *hdrs_m;
6495         void *hdrs_v;
6496         char *l24_v;
6497         unsigned int i;
6498
6499         if (!eth_v)
6500                 return;
6501         if (!eth_m)
6502                 eth_m = &nic_mask;
6503         if (inner) {
6504                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6505                                          inner_headers);
6506                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6507         } else {
6508                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6509                                          outer_headers);
6510                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6511         }
6512         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6513                &eth_m->dst, sizeof(eth_m->dst));
6514         /* The value must be in the range of the mask. */
6515         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6516         for (i = 0; i < sizeof(eth_m->dst); ++i)
6517                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6518         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6519                &eth_m->src, sizeof(eth_m->src));
6520         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6521         /* The value must be in the range of the mask. */
6522         for (i = 0; i < sizeof(eth_m->dst); ++i)
6523                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6524         /*
6525          * HW supports match on one Ethertype, the Ethertype following the last
6526          * VLAN tag of the packet (see PRM).
6527          * Set match on ethertype only if ETH header is not followed by VLAN.
6528          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6529          * ethertype, and use ip_version field instead.
6530          * eCPRI over Ether layer will use type value 0xAEFE.
6531          */
6532         if (eth_m->type == 0xFFFF) {
6533                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6534                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6535                 switch (eth_v->type) {
6536                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6537                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6538                         return;
6539                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6540                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6541                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6542                         return;
6543                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6544                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6545                         return;
6546                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6547                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6548                         return;
6549                 default:
6550                         break;
6551                 }
6552         }
6553         if (eth_m->has_vlan) {
6554                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6555                 if (eth_v->has_vlan) {
6556                         /*
6557                          * Here, when also has_more_vlan field in VLAN item is
6558                          * not set, only single-tagged packets will be matched.
6559                          */
6560                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6561                         return;
6562                 }
6563         }
6564         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6565                  rte_be_to_cpu_16(eth_m->type));
6566         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6567         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6568 }
6569
6570 /**
6571  * Add VLAN item to matcher and to the value.
6572  *
6573  * @param[in, out] dev_flow
6574  *   Flow descriptor.
6575  * @param[in, out] matcher
6576  *   Flow matcher.
6577  * @param[in, out] key
6578  *   Flow matcher value.
6579  * @param[in] item
6580  *   Flow pattern to translate.
6581  * @param[in] inner
6582  *   Item is inner pattern.
6583  */
6584 static void
6585 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6586                             void *matcher, void *key,
6587                             const struct rte_flow_item *item,
6588                             int inner, uint32_t group)
6589 {
6590         const struct rte_flow_item_vlan *vlan_m = item->mask;
6591         const struct rte_flow_item_vlan *vlan_v = item->spec;
6592         void *hdrs_m;
6593         void *hdrs_v;
6594         uint16_t tci_m;
6595         uint16_t tci_v;
6596
6597         if (inner) {
6598                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6599                                          inner_headers);
6600                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6601         } else {
6602                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6603                                          outer_headers);
6604                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6605                 /*
6606                  * This is workaround, masks are not supported,
6607                  * and pre-validated.
6608                  */
6609                 if (vlan_v)
6610                         dev_flow->handle->vf_vlan.tag =
6611                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6612         }
6613         /*
6614          * When VLAN item exists in flow, mark packet as tagged,
6615          * even if TCI is not specified.
6616          */
6617         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6618                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6619                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6620         }
6621         if (!vlan_v)
6622                 return;
6623         if (!vlan_m)
6624                 vlan_m = &rte_flow_item_vlan_mask;
6625         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6626         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6627         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6628         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6629         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6630         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6631         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6632         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6633         /*
6634          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6635          * ethertype, and use ip_version field instead.
6636          */
6637         if (vlan_m->inner_type == 0xFFFF) {
6638                 switch (vlan_v->inner_type) {
6639                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6640                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6641                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6642                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6643                         return;
6644                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6645                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6646                         return;
6647                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6648                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6649                         return;
6650                 default:
6651                         break;
6652                 }
6653         }
6654         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6655                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6656                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6657                 /* Only one vlan_tag bit can be set. */
6658                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6659                 return;
6660         }
6661         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6662                  rte_be_to_cpu_16(vlan_m->inner_type));
6663         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6664                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6665 }
6666
6667 /**
6668  * Add IPV4 item to matcher and to the value.
6669  *
6670  * @param[in, out] matcher
6671  *   Flow matcher.
6672  * @param[in, out] key
6673  *   Flow matcher value.
6674  * @param[in] item
6675  *   Flow pattern to translate.
6676  * @param[in] inner
6677  *   Item is inner pattern.
6678  * @param[in] group
6679  *   The group to insert the rule.
6680  */
6681 static void
6682 flow_dv_translate_item_ipv4(void *matcher, void *key,
6683                             const struct rte_flow_item *item,
6684                             int inner, uint32_t group)
6685 {
6686         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6687         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6688         const struct rte_flow_item_ipv4 nic_mask = {
6689                 .hdr = {
6690                         .src_addr = RTE_BE32(0xffffffff),
6691                         .dst_addr = RTE_BE32(0xffffffff),
6692                         .type_of_service = 0xff,
6693                         .next_proto_id = 0xff,
6694                         .time_to_live = 0xff,
6695                 },
6696         };
6697         void *headers_m;
6698         void *headers_v;
6699         char *l24_m;
6700         char *l24_v;
6701         uint8_t tos;
6702
6703         if (inner) {
6704                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6705                                          inner_headers);
6706                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6707         } else {
6708                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6709                                          outer_headers);
6710                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6711         }
6712         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6713         if (!ipv4_v)
6714                 return;
6715         if (!ipv4_m)
6716                 ipv4_m = &nic_mask;
6717         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6718                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6719         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6720                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6721         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6722         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6723         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6724                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6725         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6726                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6727         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6728         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6729         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6730         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6731                  ipv4_m->hdr.type_of_service);
6732         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6733         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6734                  ipv4_m->hdr.type_of_service >> 2);
6735         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6736         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6737                  ipv4_m->hdr.next_proto_id);
6738         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6739                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6740         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6741                  ipv4_m->hdr.time_to_live);
6742         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6743                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6744         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6745                  !!(ipv4_m->hdr.fragment_offset));
6746         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6747                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6748 }
6749
6750 /**
6751  * Add IPV6 item to matcher and to the value.
6752  *
6753  * @param[in, out] matcher
6754  *   Flow matcher.
6755  * @param[in, out] key
6756  *   Flow matcher value.
6757  * @param[in] item
6758  *   Flow pattern to translate.
6759  * @param[in] inner
6760  *   Item is inner pattern.
6761  * @param[in] group
6762  *   The group to insert the rule.
6763  */
6764 static void
6765 flow_dv_translate_item_ipv6(void *matcher, void *key,
6766                             const struct rte_flow_item *item,
6767                             int inner, uint32_t group)
6768 {
6769         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6770         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6771         const struct rte_flow_item_ipv6 nic_mask = {
6772                 .hdr = {
6773                         .src_addr =
6774                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6775                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6776                         .dst_addr =
6777                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6778                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6779                         .vtc_flow = RTE_BE32(0xffffffff),
6780                         .proto = 0xff,
6781                         .hop_limits = 0xff,
6782                 },
6783         };
6784         void *headers_m;
6785         void *headers_v;
6786         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6787         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6788         char *l24_m;
6789         char *l24_v;
6790         uint32_t vtc_m;
6791         uint32_t vtc_v;
6792         int i;
6793         int size;
6794
6795         if (inner) {
6796                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6797                                          inner_headers);
6798                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6799         } else {
6800                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6801                                          outer_headers);
6802                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6803         }
6804         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6805         if (!ipv6_v)
6806                 return;
6807         if (!ipv6_m)
6808                 ipv6_m = &nic_mask;
6809         size = sizeof(ipv6_m->hdr.dst_addr);
6810         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6811                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6812         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6813                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6814         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6815         for (i = 0; i < size; ++i)
6816                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6817         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6818                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6819         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6820                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6821         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6822         for (i = 0; i < size; ++i)
6823                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6824         /* TOS. */
6825         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6826         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6827         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6828         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6829         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6830         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6831         /* Label. */
6832         if (inner) {
6833                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6834                          vtc_m);
6835                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6836                          vtc_v);
6837         } else {
6838                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6839                          vtc_m);
6840                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6841                          vtc_v);
6842         }
6843         /* Protocol. */
6844         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6845                  ipv6_m->hdr.proto);
6846         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6847                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6848         /* Hop limit. */
6849         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6850                  ipv6_m->hdr.hop_limits);
6851         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6852                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6853         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6854                  !!(ipv6_m->has_frag_ext));
6855         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6856                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6857 }
6858
6859 /**
6860  * Add IPV6 fragment extension item to matcher and to the value.
6861  *
6862  * @param[in, out] matcher
6863  *   Flow matcher.
6864  * @param[in, out] key
6865  *   Flow matcher value.
6866  * @param[in] item
6867  *   Flow pattern to translate.
6868  * @param[in] inner
6869  *   Item is inner pattern.
6870  */
6871 static void
6872 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6873                                      const struct rte_flow_item *item,
6874                                      int inner)
6875 {
6876         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6877         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6878         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6879                 .hdr = {
6880                         .next_header = 0xff,
6881                         .frag_data = RTE_BE16(0xffff),
6882                 },
6883         };
6884         void *headers_m;
6885         void *headers_v;
6886
6887         if (inner) {
6888                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6889                                          inner_headers);
6890                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6891         } else {
6892                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6893                                          outer_headers);
6894                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6895         }
6896         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6897         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6898         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6899         if (!ipv6_frag_ext_v)
6900                 return;
6901         if (!ipv6_frag_ext_m)
6902                 ipv6_frag_ext_m = &nic_mask;
6903         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6904                  ipv6_frag_ext_m->hdr.next_header);
6905         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6906                  ipv6_frag_ext_v->hdr.next_header &
6907                  ipv6_frag_ext_m->hdr.next_header);
6908 }
6909
6910 /**
6911  * Add TCP item to matcher and to the value.
6912  *
6913  * @param[in, out] matcher
6914  *   Flow matcher.
6915  * @param[in, out] key
6916  *   Flow matcher value.
6917  * @param[in] item
6918  *   Flow pattern to translate.
6919  * @param[in] inner
6920  *   Item is inner pattern.
6921  */
6922 static void
6923 flow_dv_translate_item_tcp(void *matcher, void *key,
6924                            const struct rte_flow_item *item,
6925                            int inner)
6926 {
6927         const struct rte_flow_item_tcp *tcp_m = item->mask;
6928         const struct rte_flow_item_tcp *tcp_v = item->spec;
6929         void *headers_m;
6930         void *headers_v;
6931
6932         if (inner) {
6933                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6934                                          inner_headers);
6935                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6936         } else {
6937                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6938                                          outer_headers);
6939                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6940         }
6941         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6942         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6943         if (!tcp_v)
6944                 return;
6945         if (!tcp_m)
6946                 tcp_m = &rte_flow_item_tcp_mask;
6947         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6948                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6949         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6950                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6951         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6952                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6953         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6954                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6955         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6956                  tcp_m->hdr.tcp_flags);
6957         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6958                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6959 }
6960
6961 /**
6962  * Add UDP item to matcher and to the value.
6963  *
6964  * @param[in, out] matcher
6965  *   Flow matcher.
6966  * @param[in, out] key
6967  *   Flow matcher value.
6968  * @param[in] item
6969  *   Flow pattern to translate.
6970  * @param[in] inner
6971  *   Item is inner pattern.
6972  */
6973 static void
6974 flow_dv_translate_item_udp(void *matcher, void *key,
6975                            const struct rte_flow_item *item,
6976                            int inner)
6977 {
6978         const struct rte_flow_item_udp *udp_m = item->mask;
6979         const struct rte_flow_item_udp *udp_v = item->spec;
6980         void *headers_m;
6981         void *headers_v;
6982
6983         if (inner) {
6984                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6985                                          inner_headers);
6986                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6987         } else {
6988                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6989                                          outer_headers);
6990                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6991         }
6992         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6993         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6994         if (!udp_v)
6995                 return;
6996         if (!udp_m)
6997                 udp_m = &rte_flow_item_udp_mask;
6998         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6999                  rte_be_to_cpu_16(udp_m->hdr.src_port));
7000         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
7001                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
7002         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
7003                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
7004         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7005                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
7006 }
7007
7008 /**
7009  * Add GRE optional Key item to matcher and to the value.
7010  *
7011  * @param[in, out] matcher
7012  *   Flow matcher.
7013  * @param[in, out] key
7014  *   Flow matcher value.
7015  * @param[in] item
7016  *   Flow pattern to translate.
7017  * @param[in] inner
7018  *   Item is inner pattern.
7019  */
7020 static void
7021 flow_dv_translate_item_gre_key(void *matcher, void *key,
7022                                    const struct rte_flow_item *item)
7023 {
7024         const rte_be32_t *key_m = item->mask;
7025         const rte_be32_t *key_v = item->spec;
7026         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7027         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7028         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
7029
7030         /* GRE K bit must be on and should already be validated */
7031         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
7032         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
7033         if (!key_v)
7034                 return;
7035         if (!key_m)
7036                 key_m = &gre_key_default_mask;
7037         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
7038                  rte_be_to_cpu_32(*key_m) >> 8);
7039         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
7040                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
7041         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
7042                  rte_be_to_cpu_32(*key_m) & 0xFF);
7043         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
7044                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
7045 }
7046
7047 /**
7048  * Add GRE item to matcher and to the value.
7049  *
7050  * @param[in, out] matcher
7051  *   Flow matcher.
7052  * @param[in, out] key
7053  *   Flow matcher value.
7054  * @param[in] item
7055  *   Flow pattern to translate.
7056  * @param[in] inner
7057  *   Item is inner pattern.
7058  */
7059 static void
7060 flow_dv_translate_item_gre(void *matcher, void *key,
7061                            const struct rte_flow_item *item,
7062                            int inner)
7063 {
7064         const struct rte_flow_item_gre *gre_m = item->mask;
7065         const struct rte_flow_item_gre *gre_v = item->spec;
7066         void *headers_m;
7067         void *headers_v;
7068         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7069         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7070         struct {
7071                 union {
7072                         __extension__
7073                         struct {
7074                                 uint16_t version:3;
7075                                 uint16_t rsvd0:9;
7076                                 uint16_t s_present:1;
7077                                 uint16_t k_present:1;
7078                                 uint16_t rsvd_bit1:1;
7079                                 uint16_t c_present:1;
7080                         };
7081                         uint16_t value;
7082                 };
7083         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
7084
7085         if (inner) {
7086                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7087                                          inner_headers);
7088                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7089         } else {
7090                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7091                                          outer_headers);
7092                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7093         }
7094         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7095         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
7096         if (!gre_v)
7097                 return;
7098         if (!gre_m)
7099                 gre_m = &rte_flow_item_gre_mask;
7100         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
7101                  rte_be_to_cpu_16(gre_m->protocol));
7102         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7103                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
7104         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
7105         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
7106         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
7107                  gre_crks_rsvd0_ver_m.c_present);
7108         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
7109                  gre_crks_rsvd0_ver_v.c_present &
7110                  gre_crks_rsvd0_ver_m.c_present);
7111         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
7112                  gre_crks_rsvd0_ver_m.k_present);
7113         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
7114                  gre_crks_rsvd0_ver_v.k_present &
7115                  gre_crks_rsvd0_ver_m.k_present);
7116         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
7117                  gre_crks_rsvd0_ver_m.s_present);
7118         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
7119                  gre_crks_rsvd0_ver_v.s_present &
7120                  gre_crks_rsvd0_ver_m.s_present);
7121 }
7122
7123 /**
7124  * Add NVGRE item to matcher and to the value.
7125  *
7126  * @param[in, out] matcher
7127  *   Flow matcher.
7128  * @param[in, out] key
7129  *   Flow matcher value.
7130  * @param[in] item
7131  *   Flow pattern to translate.
7132  * @param[in] inner
7133  *   Item is inner pattern.
7134  */
7135 static void
7136 flow_dv_translate_item_nvgre(void *matcher, void *key,
7137                              const struct rte_flow_item *item,
7138                              int inner)
7139 {
7140         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
7141         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
7142         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7143         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7144         const char *tni_flow_id_m;
7145         const char *tni_flow_id_v;
7146         char *gre_key_m;
7147         char *gre_key_v;
7148         int size;
7149         int i;
7150
7151         /* For NVGRE, GRE header fields must be set with defined values. */
7152         const struct rte_flow_item_gre gre_spec = {
7153                 .c_rsvd0_ver = RTE_BE16(0x2000),
7154                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7155         };
7156         const struct rte_flow_item_gre gre_mask = {
7157                 .c_rsvd0_ver = RTE_BE16(0xB000),
7158                 .protocol = RTE_BE16(UINT16_MAX),
7159         };
7160         const struct rte_flow_item gre_item = {
7161                 .spec = &gre_spec,
7162                 .mask = &gre_mask,
7163                 .last = NULL,
7164         };
7165         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7166         if (!nvgre_v)
7167                 return;
7168         if (!nvgre_m)
7169                 nvgre_m = &rte_flow_item_nvgre_mask;
7170         tni_flow_id_m = (const char *)nvgre_m->tni;
7171         tni_flow_id_v = (const char *)nvgre_v->tni;
7172         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7173         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7174         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7175         memcpy(gre_key_m, tni_flow_id_m, size);
7176         for (i = 0; i < size; ++i)
7177                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7178 }
7179
7180 /**
7181  * Add VXLAN item to matcher and to the value.
7182  *
7183  * @param[in, out] matcher
7184  *   Flow matcher.
7185  * @param[in, out] key
7186  *   Flow matcher value.
7187  * @param[in] item
7188  *   Flow pattern to translate.
7189  * @param[in] inner
7190  *   Item is inner pattern.
7191  */
7192 static void
7193 flow_dv_translate_item_vxlan(void *matcher, void *key,
7194                              const struct rte_flow_item *item,
7195                              int inner)
7196 {
7197         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7198         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7199         void *headers_m;
7200         void *headers_v;
7201         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7202         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7203         char *vni_m;
7204         char *vni_v;
7205         uint16_t dport;
7206         int size;
7207         int i;
7208
7209         if (inner) {
7210                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7211                                          inner_headers);
7212                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7213         } else {
7214                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7215                                          outer_headers);
7216                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7217         }
7218         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7219                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7220         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7221                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7222                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7223         }
7224         if (!vxlan_v)
7225                 return;
7226         if (!vxlan_m)
7227                 vxlan_m = &rte_flow_item_vxlan_mask;
7228         size = sizeof(vxlan_m->vni);
7229         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7230         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7231         memcpy(vni_m, vxlan_m->vni, size);
7232         for (i = 0; i < size; ++i)
7233                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7234 }
7235
7236 /**
7237  * Add VXLAN-GPE item to matcher and to the value.
7238  *
7239  * @param[in, out] matcher
7240  *   Flow matcher.
7241  * @param[in, out] key
7242  *   Flow matcher value.
7243  * @param[in] item
7244  *   Flow pattern to translate.
7245  * @param[in] inner
7246  *   Item is inner pattern.
7247  */
7248
7249 static void
7250 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7251                                  const struct rte_flow_item *item, int inner)
7252 {
7253         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7254         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7255         void *headers_m;
7256         void *headers_v;
7257         void *misc_m =
7258                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7259         void *misc_v =
7260                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7261         char *vni_m;
7262         char *vni_v;
7263         uint16_t dport;
7264         int size;
7265         int i;
7266         uint8_t flags_m = 0xff;
7267         uint8_t flags_v = 0xc;
7268
7269         if (inner) {
7270                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7271                                          inner_headers);
7272                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7273         } else {
7274                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7275                                          outer_headers);
7276                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7277         }
7278         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7279                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7280         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7281                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7282                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7283         }
7284         if (!vxlan_v)
7285                 return;
7286         if (!vxlan_m)
7287                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7288         size = sizeof(vxlan_m->vni);
7289         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7290         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7291         memcpy(vni_m, vxlan_m->vni, size);
7292         for (i = 0; i < size; ++i)
7293                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7294         if (vxlan_m->flags) {
7295                 flags_m = vxlan_m->flags;
7296                 flags_v = vxlan_v->flags;
7297         }
7298         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7299         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7300         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7301                  vxlan_m->protocol);
7302         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7303                  vxlan_v->protocol);
7304 }
7305
7306 /**
7307  * Add Geneve item to matcher and to the value.
7308  *
7309  * @param[in, out] matcher
7310  *   Flow matcher.
7311  * @param[in, out] key
7312  *   Flow matcher value.
7313  * @param[in] item
7314  *   Flow pattern to translate.
7315  * @param[in] inner
7316  *   Item is inner pattern.
7317  */
7318
7319 static void
7320 flow_dv_translate_item_geneve(void *matcher, void *key,
7321                               const struct rte_flow_item *item, int inner)
7322 {
7323         const struct rte_flow_item_geneve *geneve_m = item->mask;
7324         const struct rte_flow_item_geneve *geneve_v = item->spec;
7325         void *headers_m;
7326         void *headers_v;
7327         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7328         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7329         uint16_t dport;
7330         uint16_t gbhdr_m;
7331         uint16_t gbhdr_v;
7332         char *vni_m;
7333         char *vni_v;
7334         size_t size, i;
7335
7336         if (inner) {
7337                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7338                                          inner_headers);
7339                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7340         } else {
7341                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7342                                          outer_headers);
7343                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7344         }
7345         dport = MLX5_UDP_PORT_GENEVE;
7346         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7347                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7348                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7349         }
7350         if (!geneve_v)
7351                 return;
7352         if (!geneve_m)
7353                 geneve_m = &rte_flow_item_geneve_mask;
7354         size = sizeof(geneve_m->vni);
7355         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7356         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7357         memcpy(vni_m, geneve_m->vni, size);
7358         for (i = 0; i < size; ++i)
7359                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7360         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7361                  rte_be_to_cpu_16(geneve_m->protocol));
7362         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7363                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7364         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7365         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7366         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7367                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7368         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7369                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7370         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7371                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7372         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7373                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7374                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7375 }
7376
7377 /**
7378  * Create Geneve TLV option resource.
7379  *
7380  * @param dev[in, out]
7381  *   Pointer to rte_eth_dev structure.
7382  * @param[in, out] tag_be24
7383  *   Tag value in big endian then R-shift 8.
7384  * @parm[in, out] dev_flow
7385  *   Pointer to the dev_flow.
7386  * @param[out] error
7387  *   pointer to error structure.
7388  *
7389  * @return
7390  *   0 on success otherwise -errno and errno is set.
7391  */
7392
7393 int
7394 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
7395                                              const struct rte_flow_item *item,
7396                                              struct rte_flow_error *error)
7397 {
7398         struct mlx5_priv *priv = dev->data->dev_private;
7399         struct mlx5_dev_ctx_shared *sh = priv->sh;
7400         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
7401                         sh->geneve_tlv_option_resource;
7402         struct mlx5_devx_obj *obj;
7403         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
7404         int ret = 0;
7405
7406         if (!geneve_opt_v)
7407                 return -1;
7408         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
7409         if (geneve_opt_resource != NULL) {
7410                 if (geneve_opt_resource->option_class ==
7411                         geneve_opt_v->option_class &&
7412                         geneve_opt_resource->option_type ==
7413                         geneve_opt_v->option_type &&
7414                         geneve_opt_resource->length ==
7415                         geneve_opt_v->option_len) {
7416                         /* We already have GENVE TLV option obj allocated. */
7417                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
7418                                            __ATOMIC_RELAXED);
7419                 } else {
7420                         ret = rte_flow_error_set(error, ENOMEM,
7421                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7422                                 "Only one GENEVE TLV option supported");
7423                         goto exit;
7424                 }
7425         } else {
7426                 /* Create a GENEVE TLV object and resource. */
7427                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
7428                                 geneve_opt_v->option_class,
7429                                 geneve_opt_v->option_type,
7430                                 geneve_opt_v->option_len);
7431                 if (!obj) {
7432                         ret = rte_flow_error_set(error, ENODATA,
7433                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7434                                 "Failed to create GENEVE TLV Devx object");
7435                         goto exit;
7436                 }
7437                 sh->geneve_tlv_option_resource =
7438                                 mlx5_malloc(MLX5_MEM_ZERO,
7439                                                 sizeof(*geneve_opt_resource),
7440                                                 0, SOCKET_ID_ANY);
7441                 if (!sh->geneve_tlv_option_resource) {
7442                         claim_zero(mlx5_devx_cmd_destroy(obj));
7443                         ret = rte_flow_error_set(error, ENOMEM,
7444                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7445                                 "GENEVE TLV object memory allocation failed");
7446                         goto exit;
7447                 }
7448                 geneve_opt_resource = sh->geneve_tlv_option_resource;
7449                 geneve_opt_resource->obj = obj;
7450                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
7451                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
7452                 geneve_opt_resource->length = geneve_opt_v->option_len;
7453                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
7454                                 __ATOMIC_RELAXED);
7455         }
7456 exit:
7457         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
7458         return ret;
7459 }
7460
7461 /**
7462  * Add Geneve TLV option item to matcher.
7463  *
7464  * @param[in, out] dev
7465  *   Pointer to rte_eth_dev structure.
7466  * @param[in, out] matcher
7467  *   Flow matcher.
7468  * @param[in, out] key
7469  *   Flow matcher value.
7470  * @param[in] item
7471  *   Flow pattern to translate.
7472  * @param[out] error
7473  *   Pointer to error structure.
7474  */
7475 static int
7476 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
7477                                   void *key, const struct rte_flow_item *item,
7478                                   struct rte_flow_error *error)
7479 {
7480         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
7481         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
7482         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7483         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7484         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7485                         misc_parameters_3);
7486         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7487         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
7488         int ret = 0;
7489
7490         if (!geneve_opt_v)
7491                 return -1;
7492         if (!geneve_opt_m)
7493                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
7494         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
7495                                                            error);
7496         if (ret) {
7497                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
7498                 return ret;
7499         }
7500         /*
7501          * Set the option length in GENEVE header if not requested.
7502          * The GENEVE TLV option length is expressed by the option length field
7503          * in the GENEVE header.
7504          * If the option length was not requested but the GENEVE TLV option item
7505          * is present we set the option length field implicitly.
7506          */
7507         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
7508                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7509                          MLX5_GENEVE_OPTLEN_MASK);
7510                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7511                          geneve_opt_v->option_len + 1);
7512         }
7513         /* Set the data. */
7514         if (geneve_opt_v->data) {
7515                 memcpy(&opt_data_key, geneve_opt_v->data,
7516                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
7517                                 sizeof(opt_data_key)));
7518                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
7519                                 sizeof(opt_data_key));
7520                 memcpy(&opt_data_mask, geneve_opt_m->data,
7521                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
7522                                 sizeof(opt_data_mask)));
7523                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
7524                                 sizeof(opt_data_mask));
7525                 MLX5_SET(fte_match_set_misc3, misc3_m,
7526                                 geneve_tlv_option_0_data,
7527                                 rte_be_to_cpu_32(opt_data_mask));
7528                 MLX5_SET(fte_match_set_misc3, misc3_v,
7529                                 geneve_tlv_option_0_data,
7530                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
7531         }
7532         return ret;
7533 }
7534
7535 /**
7536  * Add MPLS item to matcher and to the value.
7537  *
7538  * @param[in, out] matcher
7539  *   Flow matcher.
7540  * @param[in, out] key
7541  *   Flow matcher value.
7542  * @param[in] item
7543  *   Flow pattern to translate.
7544  * @param[in] prev_layer
7545  *   The protocol layer indicated in previous item.
7546  * @param[in] inner
7547  *   Item is inner pattern.
7548  */
7549 static void
7550 flow_dv_translate_item_mpls(void *matcher, void *key,
7551                             const struct rte_flow_item *item,
7552                             uint64_t prev_layer,
7553                             int inner)
7554 {
7555         const uint32_t *in_mpls_m = item->mask;
7556         const uint32_t *in_mpls_v = item->spec;
7557         uint32_t *out_mpls_m = 0;
7558         uint32_t *out_mpls_v = 0;
7559         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7560         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7561         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7562                                      misc_parameters_2);
7563         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7564         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7565         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7566
7567         switch (prev_layer) {
7568         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7569                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7570                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7571                          MLX5_UDP_PORT_MPLS);
7572                 break;
7573         case MLX5_FLOW_LAYER_GRE:
7574                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7575                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7576                          RTE_ETHER_TYPE_MPLS);
7577                 break;
7578         default:
7579                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7580                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7581                          IPPROTO_MPLS);
7582                 break;
7583         }
7584         if (!in_mpls_v)
7585                 return;
7586         if (!in_mpls_m)
7587                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7588         switch (prev_layer) {
7589         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7590                 out_mpls_m =
7591                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7592                                                  outer_first_mpls_over_udp);
7593                 out_mpls_v =
7594                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7595                                                  outer_first_mpls_over_udp);
7596                 break;
7597         case MLX5_FLOW_LAYER_GRE:
7598                 out_mpls_m =
7599                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7600                                                  outer_first_mpls_over_gre);
7601                 out_mpls_v =
7602                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7603                                                  outer_first_mpls_over_gre);
7604                 break;
7605         default:
7606                 /* Inner MPLS not over GRE is not supported. */
7607                 if (!inner) {
7608                         out_mpls_m =
7609                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7610                                                          misc2_m,
7611                                                          outer_first_mpls);
7612                         out_mpls_v =
7613                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7614                                                          misc2_v,
7615                                                          outer_first_mpls);
7616                 }
7617                 break;
7618         }
7619         if (out_mpls_m && out_mpls_v) {
7620                 *out_mpls_m = *in_mpls_m;
7621                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7622         }
7623 }
7624
7625 /**
7626  * Add metadata register item to matcher
7627  *
7628  * @param[in, out] matcher
7629  *   Flow matcher.
7630  * @param[in, out] key
7631  *   Flow matcher value.
7632  * @param[in] reg_type
7633  *   Type of device metadata register
7634  * @param[in] value
7635  *   Register value
7636  * @param[in] mask
7637  *   Register mask
7638  */
7639 static void
7640 flow_dv_match_meta_reg(void *matcher, void *key,
7641                        enum modify_reg reg_type,
7642                        uint32_t data, uint32_t mask)
7643 {
7644         void *misc2_m =
7645                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7646         void *misc2_v =
7647                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7648         uint32_t temp;
7649
7650         data &= mask;
7651         switch (reg_type) {
7652         case REG_A:
7653                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7654                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7655                 break;
7656         case REG_B:
7657                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7658                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7659                 break;
7660         case REG_C_0:
7661                 /*
7662                  * The metadata register C0 field might be divided into
7663                  * source vport index and META item value, we should set
7664                  * this field according to specified mask, not as whole one.
7665                  */
7666                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7667                 temp |= mask;
7668                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7669                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7670                 temp &= ~mask;
7671                 temp |= data;
7672                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7673                 break;
7674         case REG_C_1:
7675                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7676                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7677                 break;
7678         case REG_C_2:
7679                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7680                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7681                 break;
7682         case REG_C_3:
7683                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7684                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7685                 break;
7686         case REG_C_4:
7687                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7688                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7689                 break;
7690         case REG_C_5:
7691                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7692                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7693                 break;
7694         case REG_C_6:
7695                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7696                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7697                 break;
7698         case REG_C_7:
7699                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7700                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7701                 break;
7702         default:
7703                 MLX5_ASSERT(false);
7704                 break;
7705         }
7706 }
7707
7708 /**
7709  * Add MARK item to matcher
7710  *
7711  * @param[in] dev
7712  *   The device to configure through.
7713  * @param[in, out] matcher
7714  *   Flow matcher.
7715  * @param[in, out] key
7716  *   Flow matcher value.
7717  * @param[in] item
7718  *   Flow pattern to translate.
7719  */
7720 static void
7721 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7722                             void *matcher, void *key,
7723                             const struct rte_flow_item *item)
7724 {
7725         struct mlx5_priv *priv = dev->data->dev_private;
7726         const struct rte_flow_item_mark *mark;
7727         uint32_t value;
7728         uint32_t mask;
7729
7730         mark = item->mask ? (const void *)item->mask :
7731                             &rte_flow_item_mark_mask;
7732         mask = mark->id & priv->sh->dv_mark_mask;
7733         mark = (const void *)item->spec;
7734         MLX5_ASSERT(mark);
7735         value = mark->id & priv->sh->dv_mark_mask & mask;
7736         if (mask) {
7737                 enum modify_reg reg;
7738
7739                 /* Get the metadata register index for the mark. */
7740                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7741                 MLX5_ASSERT(reg > 0);
7742                 if (reg == REG_C_0) {
7743                         struct mlx5_priv *priv = dev->data->dev_private;
7744                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7745                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7746
7747                         mask &= msk_c0;
7748                         mask <<= shl_c0;
7749                         value <<= shl_c0;
7750                 }
7751                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7752         }
7753 }
7754
7755 /**
7756  * Add META item to matcher
7757  *
7758  * @param[in] dev
7759  *   The devich to configure through.
7760  * @param[in, out] matcher
7761  *   Flow matcher.
7762  * @param[in, out] key
7763  *   Flow matcher value.
7764  * @param[in] attr
7765  *   Attributes of flow that includes this item.
7766  * @param[in] item
7767  *   Flow pattern to translate.
7768  */
7769 static void
7770 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7771                             void *matcher, void *key,
7772                             const struct rte_flow_attr *attr,
7773                             const struct rte_flow_item *item)
7774 {
7775         const struct rte_flow_item_meta *meta_m;
7776         const struct rte_flow_item_meta *meta_v;
7777
7778         meta_m = (const void *)item->mask;
7779         if (!meta_m)
7780                 meta_m = &rte_flow_item_meta_mask;
7781         meta_v = (const void *)item->spec;
7782         if (meta_v) {
7783                 int reg;
7784                 uint32_t value = meta_v->data;
7785                 uint32_t mask = meta_m->data;
7786
7787                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7788                 if (reg < 0)
7789                         return;
7790                 MLX5_ASSERT(reg != REG_NON);
7791                 /*
7792                  * In datapath code there is no endianness
7793                  * coversions for perfromance reasons, all
7794                  * pattern conversions are done in rte_flow.
7795                  */
7796                 value = rte_cpu_to_be_32(value);
7797                 mask = rte_cpu_to_be_32(mask);
7798                 if (reg == REG_C_0) {
7799                         struct mlx5_priv *priv = dev->data->dev_private;
7800                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7801                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7802 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7803                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7804
7805                         value >>= shr_c0;
7806                         mask >>= shr_c0;
7807 #endif
7808                         value <<= shl_c0;
7809                         mask <<= shl_c0;
7810                         MLX5_ASSERT(msk_c0);
7811                         MLX5_ASSERT(!(~msk_c0 & mask));
7812                 }
7813                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7814         }
7815 }
7816
7817 /**
7818  * Add vport metadata Reg C0 item to matcher
7819  *
7820  * @param[in, out] matcher
7821  *   Flow matcher.
7822  * @param[in, out] key
7823  *   Flow matcher value.
7824  * @param[in] reg
7825  *   Flow pattern to translate.
7826  */
7827 static void
7828 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7829                                   uint32_t value, uint32_t mask)
7830 {
7831         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7832 }
7833
7834 /**
7835  * Add tag item to matcher
7836  *
7837  * @param[in] dev
7838  *   The devich to configure through.
7839  * @param[in, out] matcher
7840  *   Flow matcher.
7841  * @param[in, out] key
7842  *   Flow matcher value.
7843  * @param[in] item
7844  *   Flow pattern to translate.
7845  */
7846 static void
7847 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7848                                 void *matcher, void *key,
7849                                 const struct rte_flow_item *item)
7850 {
7851         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7852         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7853         uint32_t mask, value;
7854
7855         MLX5_ASSERT(tag_v);
7856         value = tag_v->data;
7857         mask = tag_m ? tag_m->data : UINT32_MAX;
7858         if (tag_v->id == REG_C_0) {
7859                 struct mlx5_priv *priv = dev->data->dev_private;
7860                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7861                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7862
7863                 mask &= msk_c0;
7864                 mask <<= shl_c0;
7865                 value <<= shl_c0;
7866         }
7867         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7868 }
7869
7870 /**
7871  * Add TAG item to matcher
7872  *
7873  * @param[in] dev
7874  *   The devich to configure through.
7875  * @param[in, out] matcher
7876  *   Flow matcher.
7877  * @param[in, out] key
7878  *   Flow matcher value.
7879  * @param[in] item
7880  *   Flow pattern to translate.
7881  */
7882 static void
7883 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7884                            void *matcher, void *key,
7885                            const struct rte_flow_item *item)
7886 {
7887         const struct rte_flow_item_tag *tag_v = item->spec;
7888         const struct rte_flow_item_tag *tag_m = item->mask;
7889         enum modify_reg reg;
7890
7891         MLX5_ASSERT(tag_v);
7892         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7893         /* Get the metadata register index for the tag. */
7894         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7895         MLX5_ASSERT(reg > 0);
7896         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7897 }
7898
7899 /**
7900  * Add source vport match to the specified matcher.
7901  *
7902  * @param[in, out] matcher
7903  *   Flow matcher.
7904  * @param[in, out] key
7905  *   Flow matcher value.
7906  * @param[in] port
7907  *   Source vport value to match
7908  * @param[in] mask
7909  *   Mask
7910  */
7911 static void
7912 flow_dv_translate_item_source_vport(void *matcher, void *key,
7913                                     int16_t port, uint16_t mask)
7914 {
7915         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7916         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7917
7918         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7919         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7920 }
7921
7922 /**
7923  * Translate port-id item to eswitch match on  port-id.
7924  *
7925  * @param[in] dev
7926  *   The devich to configure through.
7927  * @param[in, out] matcher
7928  *   Flow matcher.
7929  * @param[in, out] key
7930  *   Flow matcher value.
7931  * @param[in] item
7932  *   Flow pattern to translate.
7933  * @param[in]
7934  *   Flow attributes.
7935  *
7936  * @return
7937  *   0 on success, a negative errno value otherwise.
7938  */
7939 static int
7940 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7941                                void *key, const struct rte_flow_item *item,
7942                                const struct rte_flow_attr *attr)
7943 {
7944         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7945         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7946         struct mlx5_priv *priv;
7947         uint16_t mask, id;
7948
7949         mask = pid_m ? pid_m->id : 0xffff;
7950         id = pid_v ? pid_v->id : dev->data->port_id;
7951         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7952         if (!priv)
7953                 return -rte_errno;
7954         /*
7955          * Translate to vport field or to metadata, depending on mode.
7956          * Kernel can use either misc.source_port or half of C0 metadata
7957          * register.
7958          */
7959         if (priv->vport_meta_mask) {
7960                 /*
7961                  * Provide the hint for SW steering library
7962                  * to insert the flow into ingress domain and
7963                  * save the extra vport match.
7964                  */
7965                 if (mask == 0xffff && priv->vport_id == 0xffff &&
7966                     priv->pf_bond < 0 && attr->transfer)
7967                         flow_dv_translate_item_source_vport
7968                                 (matcher, key, priv->vport_id, mask);
7969                 else
7970                         flow_dv_translate_item_meta_vport
7971                                 (matcher, key,
7972                                  priv->vport_meta_tag,
7973                                  priv->vport_meta_mask);
7974         } else {
7975                 flow_dv_translate_item_source_vport(matcher, key,
7976                                                     priv->vport_id, mask);
7977         }
7978         return 0;
7979 }
7980
7981 /**
7982  * Add ICMP6 item to matcher and to the value.
7983  *
7984  * @param[in, out] matcher
7985  *   Flow matcher.
7986  * @param[in, out] key
7987  *   Flow matcher value.
7988  * @param[in] item
7989  *   Flow pattern to translate.
7990  * @param[in] inner
7991  *   Item is inner pattern.
7992  */
7993 static void
7994 flow_dv_translate_item_icmp6(void *matcher, void *key,
7995                               const struct rte_flow_item *item,
7996                               int inner)
7997 {
7998         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7999         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
8000         void *headers_m;
8001         void *headers_v;
8002         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8003                                      misc_parameters_3);
8004         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8005         if (inner) {
8006                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8007                                          inner_headers);
8008                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8009         } else {
8010                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8011                                          outer_headers);
8012                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8013         }
8014         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8015         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
8016         if (!icmp6_v)
8017                 return;
8018         if (!icmp6_m)
8019                 icmp6_m = &rte_flow_item_icmp6_mask;
8020         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
8021         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
8022                  icmp6_v->type & icmp6_m->type);
8023         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
8024         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
8025                  icmp6_v->code & icmp6_m->code);
8026 }
8027
8028 /**
8029  * Add ICMP item to matcher and to the value.
8030  *
8031  * @param[in, out] matcher
8032  *   Flow matcher.
8033  * @param[in, out] key
8034  *   Flow matcher value.
8035  * @param[in] item
8036  *   Flow pattern to translate.
8037  * @param[in] inner
8038  *   Item is inner pattern.
8039  */
8040 static void
8041 flow_dv_translate_item_icmp(void *matcher, void *key,
8042                             const struct rte_flow_item *item,
8043                             int inner)
8044 {
8045         const struct rte_flow_item_icmp *icmp_m = item->mask;
8046         const struct rte_flow_item_icmp *icmp_v = item->spec;
8047         uint32_t icmp_header_data_m = 0;
8048         uint32_t icmp_header_data_v = 0;
8049         void *headers_m;
8050         void *headers_v;
8051         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8052                                      misc_parameters_3);
8053         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8054         if (inner) {
8055                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8056                                          inner_headers);
8057                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8058         } else {
8059                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8060                                          outer_headers);
8061                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8062         }
8063         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8064         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
8065         if (!icmp_v)
8066                 return;
8067         if (!icmp_m)
8068                 icmp_m = &rte_flow_item_icmp_mask;
8069         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
8070                  icmp_m->hdr.icmp_type);
8071         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
8072                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
8073         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
8074                  icmp_m->hdr.icmp_code);
8075         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
8076                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
8077         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
8078         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
8079         if (icmp_header_data_m) {
8080                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
8081                 icmp_header_data_v |=
8082                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
8083                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
8084                          icmp_header_data_m);
8085                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
8086                          icmp_header_data_v & icmp_header_data_m);
8087         }
8088 }
8089
8090 /**
8091  * Add GTP item to matcher and to the value.
8092  *
8093  * @param[in, out] matcher
8094  *   Flow matcher.
8095  * @param[in, out] key
8096  *   Flow matcher value.
8097  * @param[in] item
8098  *   Flow pattern to translate.
8099  * @param[in] inner
8100  *   Item is inner pattern.
8101  */
8102 static void
8103 flow_dv_translate_item_gtp(void *matcher, void *key,
8104                            const struct rte_flow_item *item, int inner)
8105 {
8106         const struct rte_flow_item_gtp *gtp_m = item->mask;
8107         const struct rte_flow_item_gtp *gtp_v = item->spec;
8108         void *headers_m;
8109         void *headers_v;
8110         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8111                                      misc_parameters_3);
8112         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8113         uint16_t dport = RTE_GTPU_UDP_PORT;
8114
8115         if (inner) {
8116                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8117                                          inner_headers);
8118                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8119         } else {
8120                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8121                                          outer_headers);
8122                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8123         }
8124         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8125                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8126                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8127         }
8128         if (!gtp_v)
8129                 return;
8130         if (!gtp_m)
8131                 gtp_m = &rte_flow_item_gtp_mask;
8132         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
8133                  gtp_m->v_pt_rsv_flags);
8134         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
8135                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
8136         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
8137         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
8138                  gtp_v->msg_type & gtp_m->msg_type);
8139         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
8140                  rte_be_to_cpu_32(gtp_m->teid));
8141         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
8142                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
8143 }
8144
8145 /**
8146  * Add GTP PSC item to matcher.
8147  *
8148  * @param[in, out] matcher
8149  *   Flow matcher.
8150  * @param[in, out] key
8151  *   Flow matcher value.
8152  * @param[in] item
8153  *   Flow pattern to translate.
8154  */
8155 static int
8156 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
8157                                const struct rte_flow_item *item)
8158 {
8159         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
8160         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
8161         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8162                         misc_parameters_3);
8163         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8164         union {
8165                 uint32_t w32;
8166                 struct {
8167                         uint16_t seq_num;
8168                         uint8_t npdu_num;
8169                         uint8_t next_ext_header_type;
8170                 };
8171         } dw_2;
8172         uint8_t gtp_flags;
8173
8174         /* Always set E-flag match on one, regardless of GTP item settings. */
8175         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
8176         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8177         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
8178         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
8179         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8180         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
8181         /*Set next extension header type. */
8182         dw_2.seq_num = 0;
8183         dw_2.npdu_num = 0;
8184         dw_2.next_ext_header_type = 0xff;
8185         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
8186                  rte_cpu_to_be_32(dw_2.w32));
8187         dw_2.seq_num = 0;
8188         dw_2.npdu_num = 0;
8189         dw_2.next_ext_header_type = 0x85;
8190         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
8191                  rte_cpu_to_be_32(dw_2.w32));
8192         if (gtp_psc_v) {
8193                 union {
8194                         uint32_t w32;
8195                         struct {
8196                                 uint8_t len;
8197                                 uint8_t type_flags;
8198                                 uint8_t qfi;
8199                                 uint8_t reserved;
8200                         };
8201                 } dw_0;
8202
8203                 /*Set extension header PDU type and Qos. */
8204                 if (!gtp_psc_m)
8205                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
8206                 dw_0.w32 = 0;
8207                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
8208                 dw_0.qfi = gtp_psc_m->qfi;
8209                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
8210                          rte_cpu_to_be_32(dw_0.w32));
8211                 dw_0.w32 = 0;
8212                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
8213                                                         gtp_psc_m->pdu_type);
8214                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
8215                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
8216                          rte_cpu_to_be_32(dw_0.w32));
8217         }
8218         return 0;
8219 }
8220
8221 /**
8222  * Add eCPRI item to matcher and to the value.
8223  *
8224  * @param[in] dev
8225  *   The devich to configure through.
8226  * @param[in, out] matcher
8227  *   Flow matcher.
8228  * @param[in, out] key
8229  *   Flow matcher value.
8230  * @param[in] item
8231  *   Flow pattern to translate.
8232  * @param[in] samples
8233  *   Sample IDs to be used in the matching.
8234  */
8235 static void
8236 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
8237                              void *key, const struct rte_flow_item *item)
8238 {
8239         struct mlx5_priv *priv = dev->data->dev_private;
8240         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
8241         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
8242         struct rte_ecpri_common_hdr common;
8243         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
8244                                      misc_parameters_4);
8245         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
8246         uint32_t *samples;
8247         void *dw_m;
8248         void *dw_v;
8249
8250         if (!ecpri_v)
8251                 return;
8252         if (!ecpri_m)
8253                 ecpri_m = &rte_flow_item_ecpri_mask;
8254         /*
8255          * Maximal four DW samples are supported in a single matching now.
8256          * Two are used now for a eCPRI matching:
8257          * 1. Type: one byte, mask should be 0x00ff0000 in network order
8258          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
8259          *    if any.
8260          */
8261         if (!ecpri_m->hdr.common.u32)
8262                 return;
8263         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
8264         /* Need to take the whole DW as the mask to fill the entry. */
8265         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
8266                             prog_sample_field_value_0);
8267         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
8268                             prog_sample_field_value_0);
8269         /* Already big endian (network order) in the header. */
8270         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
8271         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
8272         /* Sample#0, used for matching type, offset 0. */
8273         MLX5_SET(fte_match_set_misc4, misc4_m,
8274                  prog_sample_field_id_0, samples[0]);
8275         /* It makes no sense to set the sample ID in the mask field. */
8276         MLX5_SET(fte_match_set_misc4, misc4_v,
8277                  prog_sample_field_id_0, samples[0]);
8278         /*
8279          * Checking if message body part needs to be matched.
8280          * Some wildcard rules only matching type field should be supported.
8281          */
8282         if (ecpri_m->hdr.dummy[0]) {
8283                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
8284                 switch (common.type) {
8285                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
8286                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
8287                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
8288                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
8289                                             prog_sample_field_value_1);
8290                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
8291                                             prog_sample_field_value_1);
8292                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
8293                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
8294                                             ecpri_m->hdr.dummy[0];
8295                         /* Sample#1, to match message body, offset 4. */
8296                         MLX5_SET(fte_match_set_misc4, misc4_m,
8297                                  prog_sample_field_id_1, samples[1]);
8298                         MLX5_SET(fte_match_set_misc4, misc4_v,
8299                                  prog_sample_field_id_1, samples[1]);
8300                         break;
8301                 default:
8302                         /* Others, do not match any sample ID. */
8303                         break;
8304                 }
8305         }
8306 }
8307
8308 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
8309
8310 #define HEADER_IS_ZERO(match_criteria, headers)                              \
8311         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
8312                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
8313
8314 /**
8315  * Calculate flow matcher enable bitmap.
8316  *
8317  * @param match_criteria
8318  *   Pointer to flow matcher criteria.
8319  *
8320  * @return
8321  *   Bitmap of enabled fields.
8322  */
8323 static uint8_t
8324 flow_dv_matcher_enable(uint32_t *match_criteria)
8325 {
8326         uint8_t match_criteria_enable;
8327
8328         match_criteria_enable =
8329                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
8330                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
8331         match_criteria_enable |=
8332                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
8333                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
8334         match_criteria_enable |=
8335                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
8336                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
8337         match_criteria_enable |=
8338                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
8339                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
8340         match_criteria_enable |=
8341                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
8342                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
8343         match_criteria_enable |=
8344                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
8345                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
8346         return match_criteria_enable;
8347 }
8348
8349 struct mlx5_hlist_entry *
8350 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
8351 {
8352         struct mlx5_dev_ctx_shared *sh = list->ctx;
8353         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8354         struct rte_eth_dev *dev = ctx->dev;
8355         struct mlx5_flow_tbl_data_entry *tbl_data;
8356         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
8357         struct rte_flow_error *error = ctx->error;
8358         union mlx5_flow_tbl_key key = { .v64 = key64 };
8359         struct mlx5_flow_tbl_resource *tbl;
8360         void *domain;
8361         uint32_t idx = 0;
8362         int ret;
8363
8364         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
8365         if (!tbl_data) {
8366                 rte_flow_error_set(error, ENOMEM,
8367                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8368                                    NULL,
8369                                    "cannot allocate flow table data entry");
8370                 return NULL;
8371         }
8372         tbl_data->idx = idx;
8373         tbl_data->tunnel = tt_prm->tunnel;
8374         tbl_data->group_id = tt_prm->group_id;
8375         tbl_data->external = !!tt_prm->external;
8376         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
8377         tbl_data->is_egress = !!key.direction;
8378         tbl_data->is_transfer = !!key.domain;
8379         tbl_data->dummy = !!key.dummy;
8380         tbl_data->table_id = key.table_id;
8381         tbl = &tbl_data->tbl;
8382         if (key.dummy)
8383                 return &tbl_data->entry;
8384         if (key.domain)
8385                 domain = sh->fdb_domain;
8386         else if (key.direction)
8387                 domain = sh->tx_domain;
8388         else
8389                 domain = sh->rx_domain;
8390         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
8391         if (ret) {
8392                 rte_flow_error_set(error, ENOMEM,
8393                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8394                                    NULL, "cannot create flow table object");
8395                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8396                 return NULL;
8397         }
8398         if (key.table_id) {
8399                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
8400                                         (tbl->obj, &tbl_data->jump.action);
8401                 if (ret) {
8402                         rte_flow_error_set(error, ENOMEM,
8403                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8404                                            NULL,
8405                                            "cannot create flow jump action");
8406                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
8407                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8408                         return NULL;
8409                 }
8410         }
8411         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
8412               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
8413               key.table_id);
8414         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8415                              flow_dv_matcher_create_cb,
8416                              flow_dv_matcher_match_cb,
8417                              flow_dv_matcher_remove_cb);
8418         return &tbl_data->entry;
8419 }
8420
8421 int
8422 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
8423                      struct mlx5_hlist_entry *entry, uint64_t key64,
8424                      void *cb_ctx __rte_unused)
8425 {
8426         struct mlx5_flow_tbl_data_entry *tbl_data =
8427                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8428         union mlx5_flow_tbl_key key = { .v64 = key64 };
8429
8430         return tbl_data->table_id != key.table_id ||
8431                tbl_data->dummy != key.dummy ||
8432                tbl_data->is_transfer != key.domain ||
8433                tbl_data->is_egress != key.direction;
8434 }
8435
8436 /**
8437  * Get a flow table.
8438  *
8439  * @param[in, out] dev
8440  *   Pointer to rte_eth_dev structure.
8441  * @param[in] table_id
8442  *   Table id to use.
8443  * @param[in] egress
8444  *   Direction of the table.
8445  * @param[in] transfer
8446  *   E-Switch or NIC flow.
8447  * @param[in] dummy
8448  *   Dummy entry for dv API.
8449  * @param[out] error
8450  *   pointer to error structure.
8451  *
8452  * @return
8453  *   Returns tables resource based on the index, NULL in case of failed.
8454  */
8455 struct mlx5_flow_tbl_resource *
8456 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8457                          uint32_t table_id, uint8_t egress,
8458                          uint8_t transfer,
8459                          bool external,
8460                          const struct mlx5_flow_tunnel *tunnel,
8461                          uint32_t group_id, uint8_t dummy,
8462                          struct rte_flow_error *error)
8463 {
8464         struct mlx5_priv *priv = dev->data->dev_private;
8465         union mlx5_flow_tbl_key table_key = {
8466                 {
8467                         .table_id = table_id,
8468                         .dummy = dummy,
8469                         .domain = !!transfer,
8470                         .direction = !!egress,
8471                 }
8472         };
8473         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8474                 .tunnel = tunnel,
8475                 .group_id = group_id,
8476                 .external = external,
8477         };
8478         struct mlx5_flow_cb_ctx ctx = {
8479                 .dev = dev,
8480                 .error = error,
8481                 .data = &tt_prm,
8482         };
8483         struct mlx5_hlist_entry *entry;
8484         struct mlx5_flow_tbl_data_entry *tbl_data;
8485
8486         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8487         if (!entry) {
8488                 rte_flow_error_set(error, ENOMEM,
8489                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8490                                    "cannot get table");
8491                 return NULL;
8492         }
8493         DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
8494                 table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
8495         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8496         return &tbl_data->tbl;
8497 }
8498
8499 void
8500 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8501                       struct mlx5_hlist_entry *entry)
8502 {
8503         struct mlx5_dev_ctx_shared *sh = list->ctx;
8504         struct mlx5_flow_tbl_data_entry *tbl_data =
8505                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8506
8507         MLX5_ASSERT(entry && sh);
8508         if (tbl_data->jump.action)
8509                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8510         if (tbl_data->tbl.obj)
8511                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8512         if (tbl_data->tunnel_offload && tbl_data->external) {
8513                 struct mlx5_hlist_entry *he;
8514                 struct mlx5_hlist *tunnel_grp_hash;
8515                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8516                 union tunnel_tbl_key tunnel_key = {
8517                         .tunnel_id = tbl_data->tunnel ?
8518                                         tbl_data->tunnel->tunnel_id : 0,
8519                         .group = tbl_data->group_id
8520                 };
8521                 uint32_t table_id = tbl_data->table_id;
8522
8523                 tunnel_grp_hash = tbl_data->tunnel ?
8524                                         tbl_data->tunnel->groups :
8525                                         thub->groups;
8526                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8527                 if (he)
8528                         mlx5_hlist_unregister(tunnel_grp_hash, he);
8529                 DRV_LOG(DEBUG,
8530                         "Table_id %u tunnel %u group %u released.",
8531                         table_id,
8532                         tbl_data->tunnel ?
8533                         tbl_data->tunnel->tunnel_id : 0,
8534                         tbl_data->group_id);
8535         }
8536         mlx5_cache_list_destroy(&tbl_data->matchers);
8537         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8538 }
8539
8540 /**
8541  * Release a flow table.
8542  *
8543  * @param[in] sh
8544  *   Pointer to device shared structure.
8545  * @param[in] tbl
8546  *   Table resource to be released.
8547  *
8548  * @return
8549  *   Returns 0 if table was released, else return 1;
8550  */
8551 static int
8552 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8553                              struct mlx5_flow_tbl_resource *tbl)
8554 {
8555         struct mlx5_flow_tbl_data_entry *tbl_data =
8556                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8557
8558         if (!tbl)
8559                 return 0;
8560         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8561 }
8562
8563 int
8564 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8565                          struct mlx5_cache_entry *entry, void *cb_ctx)
8566 {
8567         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8568         struct mlx5_flow_dv_matcher *ref = ctx->data;
8569         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8570                                                         entry);
8571
8572         return cur->crc != ref->crc ||
8573                cur->priority != ref->priority ||
8574                memcmp((const void *)cur->mask.buf,
8575                       (const void *)ref->mask.buf, ref->mask.size);
8576 }
8577
8578 struct mlx5_cache_entry *
8579 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8580                           struct mlx5_cache_entry *entry __rte_unused,
8581                           void *cb_ctx)
8582 {
8583         struct mlx5_dev_ctx_shared *sh = list->ctx;
8584         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8585         struct mlx5_flow_dv_matcher *ref = ctx->data;
8586         struct mlx5_flow_dv_matcher *cache;
8587         struct mlx5dv_flow_matcher_attr dv_attr = {
8588                 .type = IBV_FLOW_ATTR_NORMAL,
8589                 .match_mask = (void *)&ref->mask,
8590         };
8591         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8592                                                             typeof(*tbl), tbl);
8593         int ret;
8594
8595         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8596         if (!cache) {
8597                 rte_flow_error_set(ctx->error, ENOMEM,
8598                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8599                                    "cannot create matcher");
8600                 return NULL;
8601         }
8602         *cache = *ref;
8603         dv_attr.match_criteria_enable =
8604                 flow_dv_matcher_enable(cache->mask.buf);
8605         dv_attr.priority = ref->priority;
8606         if (tbl->is_egress)
8607                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8608         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8609                                                &cache->matcher_object);
8610         if (ret) {
8611                 mlx5_free(cache);
8612                 rte_flow_error_set(ctx->error, ENOMEM,
8613                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8614                                    "cannot create matcher");
8615                 return NULL;
8616         }
8617         return &cache->entry;
8618 }
8619
8620 /**
8621  * Register the flow matcher.
8622  *
8623  * @param[in, out] dev
8624  *   Pointer to rte_eth_dev structure.
8625  * @param[in, out] matcher
8626  *   Pointer to flow matcher.
8627  * @param[in, out] key
8628  *   Pointer to flow table key.
8629  * @parm[in, out] dev_flow
8630  *   Pointer to the dev_flow.
8631  * @param[out] error
8632  *   pointer to error structure.
8633  *
8634  * @return
8635  *   0 on success otherwise -errno and errno is set.
8636  */
8637 static int
8638 flow_dv_matcher_register(struct rte_eth_dev *dev,
8639                          struct mlx5_flow_dv_matcher *ref,
8640                          union mlx5_flow_tbl_key *key,
8641                          struct mlx5_flow *dev_flow,
8642                          const struct mlx5_flow_tunnel *tunnel,
8643                          uint32_t group_id,
8644                          struct rte_flow_error *error)
8645 {
8646         struct mlx5_cache_entry *entry;
8647         struct mlx5_flow_dv_matcher *cache;
8648         struct mlx5_flow_tbl_resource *tbl;
8649         struct mlx5_flow_tbl_data_entry *tbl_data;
8650         struct mlx5_flow_cb_ctx ctx = {
8651                 .error = error,
8652                 .data = ref,
8653         };
8654
8655         /**
8656          * tunnel offload API requires this registration for cases when
8657          * tunnel match rule was inserted before tunnel set rule.
8658          */
8659         tbl = flow_dv_tbl_resource_get(dev, key->table_id,
8660                                        key->direction, key->domain,
8661                                        dev_flow->external, tunnel,
8662                                        group_id, 0, error);
8663         if (!tbl)
8664                 return -rte_errno;      /* No need to refill the error info */
8665         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8666         ref->tbl = tbl;
8667         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8668         if (!entry) {
8669                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8670                 return rte_flow_error_set(error, ENOMEM,
8671                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8672                                           "cannot allocate ref memory");
8673         }
8674         cache = container_of(entry, typeof(*cache), entry);
8675         dev_flow->handle->dvh.matcher = cache;
8676         return 0;
8677 }
8678
8679 struct mlx5_hlist_entry *
8680 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8681 {
8682         struct mlx5_dev_ctx_shared *sh = list->ctx;
8683         struct rte_flow_error *error = ctx;
8684         struct mlx5_flow_dv_tag_resource *entry;
8685         uint32_t idx = 0;
8686         int ret;
8687
8688         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8689         if (!entry) {
8690                 rte_flow_error_set(error, ENOMEM,
8691                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8692                                    "cannot allocate resource memory");
8693                 return NULL;
8694         }
8695         entry->idx = idx;
8696         entry->tag_id = key;
8697         ret = mlx5_flow_os_create_flow_action_tag(key,
8698                                                   &entry->action);
8699         if (ret) {
8700                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8701                 rte_flow_error_set(error, ENOMEM,
8702                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8703                                    NULL, "cannot create action");
8704                 return NULL;
8705         }
8706         return &entry->entry;
8707 }
8708
8709 int
8710 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
8711                      struct mlx5_hlist_entry *entry, uint64_t key,
8712                      void *cb_ctx __rte_unused)
8713 {
8714         struct mlx5_flow_dv_tag_resource *tag =
8715                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8716
8717         return key != tag->tag_id;
8718 }
8719
8720 /**
8721  * Find existing tag resource or create and register a new one.
8722  *
8723  * @param dev[in, out]
8724  *   Pointer to rte_eth_dev structure.
8725  * @param[in, out] tag_be24
8726  *   Tag value in big endian then R-shift 8.
8727  * @parm[in, out] dev_flow
8728  *   Pointer to the dev_flow.
8729  * @param[out] error
8730  *   pointer to error structure.
8731  *
8732  * @return
8733  *   0 on success otherwise -errno and errno is set.
8734  */
8735 static int
8736 flow_dv_tag_resource_register
8737                         (struct rte_eth_dev *dev,
8738                          uint32_t tag_be24,
8739                          struct mlx5_flow *dev_flow,
8740                          struct rte_flow_error *error)
8741 {
8742         struct mlx5_priv *priv = dev->data->dev_private;
8743         struct mlx5_flow_dv_tag_resource *cache_resource;
8744         struct mlx5_hlist_entry *entry;
8745
8746         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8747         if (entry) {
8748                 cache_resource = container_of
8749                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8750                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8751                 dev_flow->dv.tag_resource = cache_resource;
8752                 return 0;
8753         }
8754         return -rte_errno;
8755 }
8756
8757 void
8758 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8759                       struct mlx5_hlist_entry *entry)
8760 {
8761         struct mlx5_dev_ctx_shared *sh = list->ctx;
8762         struct mlx5_flow_dv_tag_resource *tag =
8763                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8764
8765         MLX5_ASSERT(tag && sh && tag->action);
8766         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8767         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8768         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8769 }
8770
8771 /**
8772  * Release the tag.
8773  *
8774  * @param dev
8775  *   Pointer to Ethernet device.
8776  * @param tag_idx
8777  *   Tag index.
8778  *
8779  * @return
8780  *   1 while a reference on it exists, 0 when freed.
8781  */
8782 static int
8783 flow_dv_tag_release(struct rte_eth_dev *dev,
8784                     uint32_t tag_idx)
8785 {
8786         struct mlx5_priv *priv = dev->data->dev_private;
8787         struct mlx5_flow_dv_tag_resource *tag;
8788
8789         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8790         if (!tag)
8791                 return 0;
8792         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8793                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8794         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8795 }
8796
8797 /**
8798  * Translate port ID action to vport.
8799  *
8800  * @param[in] dev
8801  *   Pointer to rte_eth_dev structure.
8802  * @param[in] action
8803  *   Pointer to the port ID action.
8804  * @param[out] dst_port_id
8805  *   The target port ID.
8806  * @param[out] error
8807  *   Pointer to the error structure.
8808  *
8809  * @return
8810  *   0 on success, a negative errno value otherwise and rte_errno is set.
8811  */
8812 static int
8813 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8814                                  const struct rte_flow_action *action,
8815                                  uint32_t *dst_port_id,
8816                                  struct rte_flow_error *error)
8817 {
8818         uint32_t port;
8819         struct mlx5_priv *priv;
8820         const struct rte_flow_action_port_id *conf =
8821                         (const struct rte_flow_action_port_id *)action->conf;
8822
8823         port = conf->original ? dev->data->port_id : conf->id;
8824         priv = mlx5_port_to_eswitch_info(port, false);
8825         if (!priv)
8826                 return rte_flow_error_set(error, -rte_errno,
8827                                           RTE_FLOW_ERROR_TYPE_ACTION,
8828                                           NULL,
8829                                           "No eswitch info was found for port");
8830 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8831         /*
8832          * This parameter is transferred to
8833          * mlx5dv_dr_action_create_dest_ib_port().
8834          */
8835         *dst_port_id = priv->dev_port;
8836 #else
8837         /*
8838          * Legacy mode, no LAG configurations is supported.
8839          * This parameter is transferred to
8840          * mlx5dv_dr_action_create_dest_vport().
8841          */
8842         *dst_port_id = priv->vport_id;
8843 #endif
8844         return 0;
8845 }
8846
8847 /**
8848  * Create a counter with aging configuration.
8849  *
8850  * @param[in] dev
8851  *   Pointer to rte_eth_dev structure.
8852  * @param[out] count
8853  *   Pointer to the counter action configuration.
8854  * @param[in] age
8855  *   Pointer to the aging action configuration.
8856  *
8857  * @return
8858  *   Index to flow counter on success, 0 otherwise.
8859  */
8860 static uint32_t
8861 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8862                                 struct mlx5_flow *dev_flow,
8863                                 const struct rte_flow_action_count *count,
8864                                 const struct rte_flow_action_age *age)
8865 {
8866         uint32_t counter;
8867         struct mlx5_age_param *age_param;
8868
8869         if (count && count->shared)
8870                 counter = flow_dv_counter_get_shared(dev, count->id);
8871         else
8872                 counter = flow_dv_counter_alloc(dev, !!age);
8873         if (!counter || age == NULL)
8874                 return counter;
8875         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8876         age_param->context = age->context ? age->context :
8877                 (void *)(uintptr_t)(dev_flow->flow_idx);
8878         age_param->timeout = age->timeout;
8879         age_param->port_id = dev->data->port_id;
8880         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8881         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8882         return counter;
8883 }
8884
8885 /**
8886  * Add Tx queue matcher
8887  *
8888  * @param[in] dev
8889  *   Pointer to the dev struct.
8890  * @param[in, out] matcher
8891  *   Flow matcher.
8892  * @param[in, out] key
8893  *   Flow matcher value.
8894  * @param[in] item
8895  *   Flow pattern to translate.
8896  * @param[in] inner
8897  *   Item is inner pattern.
8898  */
8899 static void
8900 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8901                                 void *matcher, void *key,
8902                                 const struct rte_flow_item *item)
8903 {
8904         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8905         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8906         void *misc_m =
8907                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8908         void *misc_v =
8909                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8910         struct mlx5_txq_ctrl *txq;
8911         uint32_t queue;
8912
8913
8914         queue_m = (const void *)item->mask;
8915         if (!queue_m)
8916                 return;
8917         queue_v = (const void *)item->spec;
8918         if (!queue_v)
8919                 return;
8920         txq = mlx5_txq_get(dev, queue_v->queue);
8921         if (!txq)
8922                 return;
8923         queue = txq->obj->sq->id;
8924         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8925         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8926                  queue & queue_m->queue);
8927         mlx5_txq_release(dev, queue_v->queue);
8928 }
8929
8930 /**
8931  * Set the hash fields according to the @p flow information.
8932  *
8933  * @param[in] dev_flow
8934  *   Pointer to the mlx5_flow.
8935  * @param[in] rss_desc
8936  *   Pointer to the mlx5_flow_rss_desc.
8937  */
8938 static void
8939 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8940                        struct mlx5_flow_rss_desc *rss_desc)
8941 {
8942         uint64_t items = dev_flow->handle->layers;
8943         int rss_inner = 0;
8944         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8945
8946         dev_flow->hash_fields = 0;
8947 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8948         if (rss_desc->level >= 2) {
8949                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8950                 rss_inner = 1;
8951         }
8952 #endif
8953         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8954             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8955                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8956                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8957                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8958                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8959                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8960                         else
8961                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8962                 }
8963         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8964                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8965                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8966                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8967                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8968                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8969                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8970                         else
8971                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8972                 }
8973         }
8974         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8975             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8976                 if (rss_types & ETH_RSS_UDP) {
8977                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8978                                 dev_flow->hash_fields |=
8979                                                 IBV_RX_HASH_SRC_PORT_UDP;
8980                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8981                                 dev_flow->hash_fields |=
8982                                                 IBV_RX_HASH_DST_PORT_UDP;
8983                         else
8984                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8985                 }
8986         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8987                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8988                 if (rss_types & ETH_RSS_TCP) {
8989                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8990                                 dev_flow->hash_fields |=
8991                                                 IBV_RX_HASH_SRC_PORT_TCP;
8992                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8993                                 dev_flow->hash_fields |=
8994                                                 IBV_RX_HASH_DST_PORT_TCP;
8995                         else
8996                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8997                 }
8998         }
8999 }
9000
9001 /**
9002  * Prepare an Rx Hash queue.
9003  *
9004  * @param dev
9005  *   Pointer to Ethernet device.
9006  * @param[in] dev_flow
9007  *   Pointer to the mlx5_flow.
9008  * @param[in] rss_desc
9009  *   Pointer to the mlx5_flow_rss_desc.
9010  * @param[out] hrxq_idx
9011  *   Hash Rx queue index.
9012  *
9013  * @return
9014  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
9015  */
9016 static struct mlx5_hrxq *
9017 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
9018                      struct mlx5_flow *dev_flow,
9019                      struct mlx5_flow_rss_desc *rss_desc,
9020                      uint32_t *hrxq_idx)
9021 {
9022         struct mlx5_priv *priv = dev->data->dev_private;
9023         struct mlx5_flow_handle *dh = dev_flow->handle;
9024         struct mlx5_hrxq *hrxq;
9025
9026         MLX5_ASSERT(rss_desc->queue_num);
9027         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
9028         rss_desc->hash_fields = dev_flow->hash_fields;
9029         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
9030         rss_desc->shared_rss = 0;
9031         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
9032         if (!*hrxq_idx)
9033                 return NULL;
9034         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
9035                               *hrxq_idx);
9036         return hrxq;
9037 }
9038
9039 /**
9040  * Release sample sub action resource.
9041  *
9042  * @param[in, out] dev
9043  *   Pointer to rte_eth_dev structure.
9044  * @param[in] act_res
9045  *   Pointer to sample sub action resource.
9046  */
9047 static void
9048 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
9049                                    struct mlx5_flow_sub_actions_idx *act_res)
9050 {
9051         if (act_res->rix_hrxq) {
9052                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
9053                 act_res->rix_hrxq = 0;
9054         }
9055         if (act_res->rix_encap_decap) {
9056                 flow_dv_encap_decap_resource_release(dev,
9057                                                      act_res->rix_encap_decap);
9058                 act_res->rix_encap_decap = 0;
9059         }
9060         if (act_res->rix_port_id_action) {
9061                 flow_dv_port_id_action_resource_release(dev,
9062                                                 act_res->rix_port_id_action);
9063                 act_res->rix_port_id_action = 0;
9064         }
9065         if (act_res->rix_tag) {
9066                 flow_dv_tag_release(dev, act_res->rix_tag);
9067                 act_res->rix_tag = 0;
9068         }
9069         if (act_res->cnt) {
9070                 flow_dv_counter_free(dev, act_res->cnt);
9071                 act_res->cnt = 0;
9072         }
9073         if (act_res->rix_jump) {
9074                 flow_dv_jump_tbl_resource_release(dev, act_res->rix_jump);
9075                 act_res->rix_jump = 0;
9076         }
9077 }
9078
9079 int
9080 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
9081                         struct mlx5_cache_entry *entry, void *cb_ctx)
9082 {
9083         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9084         struct rte_eth_dev *dev = ctx->dev;
9085         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9086         struct mlx5_flow_dv_sample_resource *cache_resource =
9087                         container_of(entry, typeof(*cache_resource), entry);
9088
9089         if (resource->ratio == cache_resource->ratio &&
9090             resource->ft_type == cache_resource->ft_type &&
9091             resource->ft_id == cache_resource->ft_id &&
9092             resource->set_action == cache_resource->set_action &&
9093             !memcmp((void *)&resource->sample_act,
9094                     (void *)&cache_resource->sample_act,
9095                     sizeof(struct mlx5_flow_sub_actions_list))) {
9096                 /*
9097                  * Existing sample action should release the prepared
9098                  * sub-actions reference counter.
9099                  */
9100                 flow_dv_sample_sub_actions_release(dev,
9101                                                 &resource->sample_idx);
9102                 return 0;
9103         }
9104         return 1;
9105 }
9106
9107 struct mlx5_cache_entry *
9108 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
9109                          struct mlx5_cache_entry *entry __rte_unused,
9110                          void *cb_ctx)
9111 {
9112         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9113         struct rte_eth_dev *dev = ctx->dev;
9114         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9115         void **sample_dv_actions = resource->sub_actions;
9116         struct mlx5_flow_dv_sample_resource *cache_resource;
9117         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
9118         struct mlx5_priv *priv = dev->data->dev_private;
9119         struct mlx5_dev_ctx_shared *sh = priv->sh;
9120         struct mlx5_flow_tbl_resource *tbl;
9121         uint32_t idx = 0;
9122         const uint32_t next_ft_step = 1;
9123         uint32_t next_ft_id = resource->ft_id + next_ft_step;
9124         uint8_t is_egress = 0;
9125         uint8_t is_transfer = 0;
9126         struct rte_flow_error *error = ctx->error;
9127
9128         /* Register new sample resource. */
9129         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
9130         if (!cache_resource) {
9131                 rte_flow_error_set(error, ENOMEM,
9132                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9133                                           NULL,
9134                                           "cannot allocate resource memory");
9135                 return NULL;
9136         }
9137         *cache_resource = *resource;
9138         /* Create normal path table level */
9139         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
9140                 is_transfer = 1;
9141         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
9142                 is_egress = 1;
9143         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
9144                                         is_egress, is_transfer,
9145                                         true, NULL, 0, 0, error);
9146         if (!tbl) {
9147                 rte_flow_error_set(error, ENOMEM,
9148                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9149                                           NULL,
9150                                           "fail to create normal path table "
9151                                           "for sample");
9152                 goto error;
9153         }
9154         int ret;
9155
9156         cache_resource->normal_path_tbl = tbl;
9157         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
9158                 ret = mlx5_flow_os_create_flow_action_default_miss
9159                         (&cache_resource->default_miss);
9160                 if (!ret) {
9161                         rte_flow_error_set(error, ENOMEM,
9162                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9163                                                 NULL,
9164                                                 "cannot create default miss "
9165                                                 "action");
9166                         goto error;
9167                 }
9168                 sample_dv_actions[resource->sample_act.actions_num++] =
9169                                                 cache_resource->default_miss;
9170         }
9171         /* Create a DR sample action */
9172         sampler_attr.sample_ratio = cache_resource->ratio;
9173         sampler_attr.default_next_table = tbl->obj;
9174         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
9175         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
9176                                                         &sample_dv_actions[0];
9177         sampler_attr.action = cache_resource->set_action;
9178         if (mlx5_os_flow_dr_create_flow_action_sampler
9179                         (&sampler_attr, &cache_resource->verbs_action)) {
9180                 rte_flow_error_set(error, ENOMEM,
9181                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9182                                         NULL, "cannot create sample action");
9183                 goto error;
9184         }
9185         cache_resource->idx = idx;
9186         cache_resource->dev = dev;
9187         return &cache_resource->entry;
9188 error:
9189         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
9190             cache_resource->default_miss)
9191                 claim_zero(mlx5_flow_os_destroy_flow_action
9192                                 (cache_resource->default_miss));
9193         else
9194                 flow_dv_sample_sub_actions_release(dev,
9195                                                    &cache_resource->sample_idx);
9196         if (cache_resource->normal_path_tbl)
9197                 flow_dv_tbl_resource_release(MLX5_SH(dev),
9198                                 cache_resource->normal_path_tbl);
9199         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
9200         return NULL;
9201
9202 }
9203
9204 /**
9205  * Find existing sample resource or create and register a new one.
9206  *
9207  * @param[in, out] dev
9208  *   Pointer to rte_eth_dev structure.
9209  * @param[in] resource
9210  *   Pointer to sample resource.
9211  * @parm[in, out] dev_flow
9212  *   Pointer to the dev_flow.
9213  * @param[out] error
9214  *   pointer to error structure.
9215  *
9216  * @return
9217  *   0 on success otherwise -errno and errno is set.
9218  */
9219 static int
9220 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
9221                          struct mlx5_flow_dv_sample_resource *resource,
9222                          struct mlx5_flow *dev_flow,
9223                          struct rte_flow_error *error)
9224 {
9225         struct mlx5_flow_dv_sample_resource *cache_resource;
9226         struct mlx5_cache_entry *entry;
9227         struct mlx5_priv *priv = dev->data->dev_private;
9228         struct mlx5_flow_cb_ctx ctx = {
9229                 .dev = dev,
9230                 .error = error,
9231                 .data = resource,
9232         };
9233
9234         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
9235         if (!entry)
9236                 return -rte_errno;
9237         cache_resource = container_of(entry, typeof(*cache_resource), entry);
9238         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
9239         dev_flow->dv.sample_res = cache_resource;
9240         return 0;
9241 }
9242
9243 int
9244 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
9245                             struct mlx5_cache_entry *entry, void *cb_ctx)
9246 {
9247         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9248         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
9249         struct rte_eth_dev *dev = ctx->dev;
9250         struct mlx5_flow_dv_dest_array_resource *cache_resource =
9251                         container_of(entry, typeof(*cache_resource), entry);
9252         uint32_t idx = 0;
9253
9254         if (resource->num_of_dest == cache_resource->num_of_dest &&
9255             resource->ft_type == cache_resource->ft_type &&
9256             !memcmp((void *)cache_resource->sample_act,
9257                     (void *)resource->sample_act,
9258                    (resource->num_of_dest *
9259                    sizeof(struct mlx5_flow_sub_actions_list)))) {
9260                 /*
9261                  * Existing sample action should release the prepared
9262                  * sub-actions reference counter.
9263                  */
9264                 for (idx = 0; idx < resource->num_of_dest; idx++)
9265                         flow_dv_sample_sub_actions_release(dev,
9266                                         &resource->sample_idx[idx]);
9267                 return 0;
9268         }
9269         return 1;
9270 }
9271
9272 struct mlx5_cache_entry *
9273 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
9274                          struct mlx5_cache_entry *entry __rte_unused,
9275                          void *cb_ctx)
9276 {
9277         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9278         struct rte_eth_dev *dev = ctx->dev;
9279         struct mlx5_flow_dv_dest_array_resource *cache_resource;
9280         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
9281         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
9282         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
9283         struct mlx5_priv *priv = dev->data->dev_private;
9284         struct mlx5_dev_ctx_shared *sh = priv->sh;
9285         struct mlx5_flow_sub_actions_list *sample_act;
9286         struct mlx5dv_dr_domain *domain;
9287         uint32_t idx = 0, res_idx = 0;
9288         struct rte_flow_error *error = ctx->error;
9289         uint64_t action_flags;
9290         int ret;
9291
9292         /* Register new destination array resource. */
9293         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
9294                                             &res_idx);
9295         if (!cache_resource) {
9296                 rte_flow_error_set(error, ENOMEM,
9297                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9298                                           NULL,
9299                                           "cannot allocate resource memory");
9300                 return NULL;
9301         }
9302         *cache_resource = *resource;
9303         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
9304                 domain = sh->fdb_domain;
9305         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
9306                 domain = sh->rx_domain;
9307         else
9308                 domain = sh->tx_domain;
9309         for (idx = 0; idx < resource->num_of_dest; idx++) {
9310                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
9311                                  mlx5_malloc(MLX5_MEM_ZERO,
9312                                  sizeof(struct mlx5dv_dr_action_dest_attr),
9313                                  0, SOCKET_ID_ANY);
9314                 if (!dest_attr[idx]) {
9315                         rte_flow_error_set(error, ENOMEM,
9316                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9317                                            NULL,
9318                                            "cannot allocate resource memory");
9319                         goto error;
9320                 }
9321                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
9322                 sample_act = &resource->sample_act[idx];
9323                 action_flags = sample_act->action_flags;
9324                 switch (action_flags) {
9325                 case MLX5_FLOW_ACTION_QUEUE:
9326                         dest_attr[idx]->dest = sample_act->dr_queue_action;
9327                         break;
9328                 case (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP):
9329                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
9330                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
9331                         dest_attr[idx]->dest_reformat->reformat =
9332                                         sample_act->dr_encap_action;
9333                         dest_attr[idx]->dest_reformat->dest =
9334                                         sample_act->dr_port_id_action;
9335                         break;
9336                 case MLX5_FLOW_ACTION_PORT_ID:
9337                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
9338                         break;
9339                 case MLX5_FLOW_ACTION_JUMP:
9340                         dest_attr[idx]->dest = sample_act->dr_jump_action;
9341                         break;
9342                 default:
9343                         rte_flow_error_set(error, EINVAL,
9344                                            RTE_FLOW_ERROR_TYPE_ACTION,
9345                                            NULL,
9346                                            "unsupported actions type");
9347                         goto error;
9348                 }
9349         }
9350         /* create a dest array actioin */
9351         ret = mlx5_os_flow_dr_create_flow_action_dest_array
9352                                                 (domain,
9353                                                  cache_resource->num_of_dest,
9354                                                  dest_attr,
9355                                                  &cache_resource->action);
9356         if (ret) {
9357                 rte_flow_error_set(error, ENOMEM,
9358                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9359                                    NULL,
9360                                    "cannot create destination array action");
9361                 goto error;
9362         }
9363         cache_resource->idx = res_idx;
9364         cache_resource->dev = dev;
9365         for (idx = 0; idx < resource->num_of_dest; idx++)
9366                 mlx5_free(dest_attr[idx]);
9367         return &cache_resource->entry;
9368 error:
9369         for (idx = 0; idx < resource->num_of_dest; idx++) {
9370                 struct mlx5_flow_sub_actions_idx *act_res =
9371                                         &cache_resource->sample_idx[idx];
9372                 if (act_res->rix_hrxq &&
9373                     !mlx5_hrxq_release(dev,
9374                                 act_res->rix_hrxq))
9375                         act_res->rix_hrxq = 0;
9376                 if (act_res->rix_encap_decap &&
9377                         !flow_dv_encap_decap_resource_release(dev,
9378                                 act_res->rix_encap_decap))
9379                         act_res->rix_encap_decap = 0;
9380                 if (act_res->rix_port_id_action &&
9381                         !flow_dv_port_id_action_resource_release(dev,
9382                                 act_res->rix_port_id_action))
9383                         act_res->rix_port_id_action = 0;
9384                 if (act_res->rix_jump &&
9385                         !flow_dv_jump_tbl_resource_release(dev,
9386                                 act_res->rix_jump))
9387                         act_res->rix_jump = 0;
9388                 if (dest_attr[idx])
9389                         mlx5_free(dest_attr[idx]);
9390         }
9391
9392         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
9393         return NULL;
9394 }
9395
9396 /**
9397  * Find existing destination array resource or create and register a new one.
9398  *
9399  * @param[in, out] dev
9400  *   Pointer to rte_eth_dev structure.
9401  * @param[in] resource
9402  *   Pointer to destination array resource.
9403  * @parm[in, out] dev_flow
9404  *   Pointer to the dev_flow.
9405  * @param[out] error
9406  *   pointer to error structure.
9407  *
9408  * @return
9409  *   0 on success otherwise -errno and errno is set.
9410  */
9411 static int
9412 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
9413                          struct mlx5_flow_dv_dest_array_resource *resource,
9414                          struct mlx5_flow *dev_flow,
9415                          struct rte_flow_error *error)
9416 {
9417         struct mlx5_flow_dv_dest_array_resource *cache_resource;
9418         struct mlx5_priv *priv = dev->data->dev_private;
9419         struct mlx5_cache_entry *entry;
9420         struct mlx5_flow_cb_ctx ctx = {
9421                 .dev = dev,
9422                 .error = error,
9423                 .data = resource,
9424         };
9425
9426         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
9427         if (!entry)
9428                 return -rte_errno;
9429         cache_resource = container_of(entry, typeof(*cache_resource), entry);
9430         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
9431         dev_flow->dv.dest_array_res = cache_resource;
9432         return 0;
9433 }
9434
9435 /**
9436  * Convert Sample action to DV specification.
9437  *
9438  * @param[in] dev
9439  *   Pointer to rte_eth_dev structure.
9440  * @param[in] action
9441  *   Pointer to sample action structure.
9442  * @param[in, out] dev_flow
9443  *   Pointer to the mlx5_flow.
9444  * @param[in] attr
9445  *   Pointer to the flow attributes.
9446  * @param[in, out] num_of_dest
9447  *   Pointer to the num of destination.
9448  * @param[in, out] sample_actions
9449  *   Pointer to sample actions list.
9450  * @param[in, out] res
9451  *   Pointer to sample resource.
9452  * @param[out] error
9453  *   Pointer to the error structure.
9454  *
9455  * @return
9456  *   0 on success, a negative errno value otherwise and rte_errno is set.
9457  */
9458 static int
9459 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
9460                                 const struct rte_flow_action_sample *action,
9461                                 struct mlx5_flow *dev_flow,
9462                                 const struct rte_flow_attr *attr,
9463                                 uint32_t *num_of_dest,
9464                                 void **sample_actions,
9465                                 struct mlx5_flow_dv_sample_resource *res,
9466                                 struct rte_flow_error *error)
9467 {
9468         struct mlx5_priv *priv = dev->data->dev_private;
9469         const struct rte_flow_action *sub_actions;
9470         struct mlx5_flow_sub_actions_list *sample_act;
9471         struct mlx5_flow_sub_actions_idx *sample_idx;
9472         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9473         struct mlx5_flow_rss_desc *rss_desc;
9474         uint64_t action_flags = 0;
9475
9476         MLX5_ASSERT(wks);
9477         rss_desc = &wks->rss_desc;
9478         sample_act = &res->sample_act;
9479         sample_idx = &res->sample_idx;
9480         res->ratio = action->ratio;
9481         sub_actions = action->actions;
9482         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
9483                 int type = sub_actions->type;
9484                 uint32_t pre_rix = 0;
9485                 void *pre_r;
9486                 switch (type) {
9487                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9488                 {
9489                         const struct rte_flow_action_queue *queue;
9490                         struct mlx5_hrxq *hrxq;
9491                         uint32_t hrxq_idx;
9492
9493                         queue = sub_actions->conf;
9494                         rss_desc->queue_num = 1;
9495                         rss_desc->queue[0] = queue->index;
9496                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9497                                                     rss_desc, &hrxq_idx);
9498                         if (!hrxq)
9499                                 return rte_flow_error_set
9500                                         (error, rte_errno,
9501                                          RTE_FLOW_ERROR_TYPE_ACTION,
9502                                          NULL,
9503                                          "cannot create fate queue");
9504                         sample_act->dr_queue_action = hrxq->action;
9505                         sample_idx->rix_hrxq = hrxq_idx;
9506                         sample_actions[sample_act->actions_num++] =
9507                                                 hrxq->action;
9508                         (*num_of_dest)++;
9509                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9510                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9511                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9512                         dev_flow->handle->fate_action =
9513                                         MLX5_FLOW_FATE_QUEUE;
9514                         break;
9515                 }
9516                 case RTE_FLOW_ACTION_TYPE_RSS:
9517                 {
9518                         struct mlx5_hrxq *hrxq;
9519                         uint32_t hrxq_idx;
9520                         const struct rte_flow_action_rss *rss;
9521                         const uint8_t *rss_key;
9522
9523                         rss = sub_actions->conf;
9524                         memcpy(rss_desc->queue, rss->queue,
9525                                rss->queue_num * sizeof(uint16_t));
9526                         rss_desc->queue_num = rss->queue_num;
9527                         /* NULL RSS key indicates default RSS key. */
9528                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9529                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9530                         /*
9531                          * rss->level and rss.types should be set in advance
9532                          * when expanding items for RSS.
9533                          */
9534                         flow_dv_hashfields_set(dev_flow, rss_desc);
9535                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9536                                                     rss_desc, &hrxq_idx);
9537                         if (!hrxq)
9538                                 return rte_flow_error_set
9539                                         (error, rte_errno,
9540                                          RTE_FLOW_ERROR_TYPE_ACTION,
9541                                          NULL,
9542                                          "cannot create fate queue");
9543                         sample_act->dr_queue_action = hrxq->action;
9544                         sample_idx->rix_hrxq = hrxq_idx;
9545                         sample_actions[sample_act->actions_num++] =
9546                                                 hrxq->action;
9547                         (*num_of_dest)++;
9548                         action_flags |= MLX5_FLOW_ACTION_RSS;
9549                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9550                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9551                         dev_flow->handle->fate_action =
9552                                         MLX5_FLOW_FATE_QUEUE;
9553                         break;
9554                 }
9555                 case RTE_FLOW_ACTION_TYPE_MARK:
9556                 {
9557                         uint32_t tag_be = mlx5_flow_mark_set
9558                                 (((const struct rte_flow_action_mark *)
9559                                 (sub_actions->conf))->id);
9560
9561                         dev_flow->handle->mark = 1;
9562                         pre_rix = dev_flow->handle->dvh.rix_tag;
9563                         /* Save the mark resource before sample */
9564                         pre_r = dev_flow->dv.tag_resource;
9565                         if (flow_dv_tag_resource_register(dev, tag_be,
9566                                                   dev_flow, error))
9567                                 return -rte_errno;
9568                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9569                         sample_act->dr_tag_action =
9570                                 dev_flow->dv.tag_resource->action;
9571                         sample_idx->rix_tag =
9572                                 dev_flow->handle->dvh.rix_tag;
9573                         sample_actions[sample_act->actions_num++] =
9574                                                 sample_act->dr_tag_action;
9575                         /* Recover the mark resource after sample */
9576                         dev_flow->dv.tag_resource = pre_r;
9577                         dev_flow->handle->dvh.rix_tag = pre_rix;
9578                         action_flags |= MLX5_FLOW_ACTION_MARK;
9579                         break;
9580                 }
9581                 case RTE_FLOW_ACTION_TYPE_COUNT:
9582                 {
9583                         uint32_t counter;
9584
9585                         counter = flow_dv_translate_create_counter(dev,
9586                                         dev_flow, sub_actions->conf, 0);
9587                         if (!counter)
9588                                 return rte_flow_error_set
9589                                                 (error, rte_errno,
9590                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9591                                                  NULL,
9592                                                  "cannot create counter"
9593                                                  " object.");
9594                         sample_idx->cnt = counter;
9595                         sample_act->dr_cnt_action =
9596                                   (flow_dv_counter_get_by_idx(dev,
9597                                   counter, NULL))->action;
9598                         sample_actions[sample_act->actions_num++] =
9599                                                 sample_act->dr_cnt_action;
9600                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9601                         break;
9602                 }
9603                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9604                 {
9605                         struct mlx5_flow_dv_port_id_action_resource
9606                                         port_id_resource;
9607                         uint32_t port_id = 0;
9608
9609                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9610                         /* Save the port id resource before sample */
9611                         pre_rix = dev_flow->handle->rix_port_id_action;
9612                         pre_r = dev_flow->dv.port_id_action;
9613                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9614                                                              &port_id, error))
9615                                 return -rte_errno;
9616                         port_id_resource.port_id = port_id;
9617                         if (flow_dv_port_id_action_resource_register
9618                             (dev, &port_id_resource, dev_flow, error))
9619                                 return -rte_errno;
9620                         sample_act->dr_port_id_action =
9621                                 dev_flow->dv.port_id_action->action;
9622                         sample_idx->rix_port_id_action =
9623                                 dev_flow->handle->rix_port_id_action;
9624                         sample_actions[sample_act->actions_num++] =
9625                                                 sample_act->dr_port_id_action;
9626                         /* Recover the port id resource after sample */
9627                         dev_flow->dv.port_id_action = pre_r;
9628                         dev_flow->handle->rix_port_id_action = pre_rix;
9629                         (*num_of_dest)++;
9630                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9631                         break;
9632                 }
9633                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9634                         /* Save the encap resource before sample */
9635                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9636                         pre_r = dev_flow->dv.encap_decap;
9637                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9638                                                            dev_flow,
9639                                                            attr->transfer,
9640                                                            error))
9641                                 return -rte_errno;
9642                         sample_act->dr_encap_action =
9643                                 dev_flow->dv.encap_decap->action;
9644                         sample_idx->rix_encap_decap =
9645                                 dev_flow->handle->dvh.rix_encap_decap;
9646                         sample_actions[sample_act->actions_num++] =
9647                                                 sample_act->dr_encap_action;
9648                         /* Recover the encap resource after sample */
9649                         dev_flow->dv.encap_decap = pre_r;
9650                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9651                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9652                         break;
9653                 default:
9654                         return rte_flow_error_set(error, EINVAL,
9655                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9656                                 NULL,
9657                                 "Not support for sampler action");
9658                 }
9659         }
9660         sample_act->action_flags = action_flags;
9661         res->ft_id = dev_flow->dv.group;
9662         if (attr->transfer) {
9663                 union {
9664                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9665                         uint64_t set_action;
9666                 } action_ctx = { .set_action = 0 };
9667
9668                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9669                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9670                          MLX5_MODIFICATION_TYPE_SET);
9671                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9672                          MLX5_MODI_META_REG_C_0);
9673                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9674                          priv->vport_meta_tag);
9675                 res->set_action = action_ctx.set_action;
9676         } else if (attr->ingress) {
9677                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9678         } else {
9679                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9680         }
9681         return 0;
9682 }
9683
9684 /**
9685  * Convert Sample action to DV specification.
9686  *
9687  * @param[in] dev
9688  *   Pointer to rte_eth_dev structure.
9689  * @param[in, out] dev_flow
9690  *   Pointer to the mlx5_flow.
9691  * @param[in] num_of_dest
9692  *   The num of destination.
9693  * @param[in, out] res
9694  *   Pointer to sample resource.
9695  * @param[in, out] mdest_res
9696  *   Pointer to destination array resource.
9697  * @param[in] sample_actions
9698  *   Pointer to sample path actions list.
9699  * @param[in] action_flags
9700  *   Holds the actions detected until now.
9701  * @param[out] error
9702  *   Pointer to the error structure.
9703  *
9704  * @return
9705  *   0 on success, a negative errno value otherwise and rte_errno is set.
9706  */
9707 static int
9708 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9709                              struct mlx5_flow *dev_flow,
9710                              uint32_t num_of_dest,
9711                              struct mlx5_flow_dv_sample_resource *res,
9712                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9713                              void **sample_actions,
9714                              uint64_t action_flags,
9715                              struct rte_flow_error *error)
9716 {
9717         /* update normal path action resource into last index of array */
9718         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9719         struct mlx5_flow_sub_actions_list *sample_act =
9720                                         &mdest_res->sample_act[dest_index];
9721         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9722         struct mlx5_flow_rss_desc *rss_desc;
9723         uint32_t normal_idx = 0;
9724         struct mlx5_hrxq *hrxq;
9725         uint32_t hrxq_idx;
9726
9727         MLX5_ASSERT(wks);
9728         rss_desc = &wks->rss_desc;
9729         if (num_of_dest > 1) {
9730                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9731                         /* Handle QP action for mirroring */
9732                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9733                                                     rss_desc, &hrxq_idx);
9734                         if (!hrxq)
9735                                 return rte_flow_error_set
9736                                      (error, rte_errno,
9737                                       RTE_FLOW_ERROR_TYPE_ACTION,
9738                                       NULL,
9739                                       "cannot create rx queue");
9740                         normal_idx++;
9741                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9742                         sample_act->dr_queue_action = hrxq->action;
9743                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9744                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9745                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9746                 }
9747                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9748                         normal_idx++;
9749                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9750                                 dev_flow->handle->dvh.rix_encap_decap;
9751                         sample_act->dr_encap_action =
9752                                 dev_flow->dv.encap_decap->action;
9753                 }
9754                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9755                         normal_idx++;
9756                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9757                                 dev_flow->handle->rix_port_id_action;
9758                         sample_act->dr_port_id_action =
9759                                 dev_flow->dv.port_id_action->action;
9760                 }
9761                 if (sample_act->action_flags & MLX5_FLOW_ACTION_JUMP) {
9762                         normal_idx++;
9763                         mdest_res->sample_idx[dest_index].rix_jump =
9764                                 dev_flow->handle->rix_jump;
9765                         sample_act->dr_jump_action =
9766                                 dev_flow->dv.jump->action;
9767                         dev_flow->handle->rix_jump = 0;
9768                 }
9769                 sample_act->actions_num = normal_idx;
9770                 /* update sample action resource into first index of array */
9771                 mdest_res->ft_type = res->ft_type;
9772                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9773                                 sizeof(struct mlx5_flow_sub_actions_idx));
9774                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9775                                 sizeof(struct mlx5_flow_sub_actions_list));
9776                 mdest_res->num_of_dest = num_of_dest;
9777                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9778                                                          dev_flow, error))
9779                         return rte_flow_error_set(error, EINVAL,
9780                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9781                                                   NULL, "can't create sample "
9782                                                   "action");
9783         } else {
9784                 res->sub_actions = sample_actions;
9785                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
9786                         return rte_flow_error_set(error, EINVAL,
9787                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9788                                                   NULL,
9789                                                   "can't create sample action");
9790         }
9791         return 0;
9792 }
9793
9794 /**
9795  * Remove an ASO age action from age actions list.
9796  *
9797  * @param[in] dev
9798  *   Pointer to the Ethernet device structure.
9799  * @param[in] age
9800  *   Pointer to the aso age action handler.
9801  */
9802 static void
9803 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
9804                                 struct mlx5_aso_age_action *age)
9805 {
9806         struct mlx5_age_info *age_info;
9807         struct mlx5_age_param *age_param = &age->age_params;
9808         struct mlx5_priv *priv = dev->data->dev_private;
9809         uint16_t expected = AGE_CANDIDATE;
9810
9811         age_info = GET_PORT_AGE_INFO(priv);
9812         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
9813                                          AGE_FREE, false, __ATOMIC_RELAXED,
9814                                          __ATOMIC_RELAXED)) {
9815                 /**
9816                  * We need the lock even it is age timeout,
9817                  * since age action may still in process.
9818                  */
9819                 rte_spinlock_lock(&age_info->aged_sl);
9820                 LIST_REMOVE(age, next);
9821                 rte_spinlock_unlock(&age_info->aged_sl);
9822                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
9823         }
9824 }
9825
9826 /**
9827  * Release an ASO age action.
9828  *
9829  * @param[in] dev
9830  *   Pointer to the Ethernet device structure.
9831  * @param[in] age_idx
9832  *   Index of ASO age action to release.
9833  * @param[in] flow
9834  *   True if the release operation is during flow destroy operation.
9835  *   False if the release operation is during action destroy operation.
9836  *
9837  * @return
9838  *   0 when age action was removed, otherwise the number of references.
9839  */
9840 static int
9841 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
9842 {
9843         struct mlx5_priv *priv = dev->data->dev_private;
9844         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9845         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
9846         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
9847
9848         if (!ret) {
9849                 flow_dv_aso_age_remove_from_age(dev, age);
9850                 rte_spinlock_lock(&mng->free_sl);
9851                 LIST_INSERT_HEAD(&mng->free, age, next);
9852                 rte_spinlock_unlock(&mng->free_sl);
9853         }
9854         return ret;
9855 }
9856
9857 /**
9858  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
9859  *
9860  * @param[in] dev
9861  *   Pointer to the Ethernet device structure.
9862  *
9863  * @return
9864  *   0 on success, otherwise negative errno value and rte_errno is set.
9865  */
9866 static int
9867 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
9868 {
9869         struct mlx5_priv *priv = dev->data->dev_private;
9870         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9871         void *old_pools = mng->pools;
9872         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
9873         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
9874         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
9875
9876         if (!pools) {
9877                 rte_errno = ENOMEM;
9878                 return -ENOMEM;
9879         }
9880         if (old_pools) {
9881                 memcpy(pools, old_pools,
9882                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
9883                 mlx5_free(old_pools);
9884         } else {
9885                 /* First ASO flow hit allocation - starting ASO data-path. */
9886                 int ret = mlx5_aso_queue_start(priv->sh);
9887
9888                 if (ret) {
9889                         mlx5_free(pools);
9890                         return ret;
9891                 }
9892         }
9893         mng->n = resize;
9894         mng->pools = pools;
9895         return 0;
9896 }
9897
9898 /**
9899  * Create and initialize a new ASO aging pool.
9900  *
9901  * @param[in] dev
9902  *   Pointer to the Ethernet device structure.
9903  * @param[out] age_free
9904  *   Where to put the pointer of a new age action.
9905  *
9906  * @return
9907  *   The age actions pool pointer and @p age_free is set on success,
9908  *   NULL otherwise and rte_errno is set.
9909  */
9910 static struct mlx5_aso_age_pool *
9911 flow_dv_age_pool_create(struct rte_eth_dev *dev,
9912                         struct mlx5_aso_age_action **age_free)
9913 {
9914         struct mlx5_priv *priv = dev->data->dev_private;
9915         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9916         struct mlx5_aso_age_pool *pool = NULL;
9917         struct mlx5_devx_obj *obj = NULL;
9918         uint32_t i;
9919
9920         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
9921                                                     priv->sh->pdn);
9922         if (!obj) {
9923                 rte_errno = ENODATA;
9924                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
9925                 return NULL;
9926         }
9927         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
9928         if (!pool) {
9929                 claim_zero(mlx5_devx_cmd_destroy(obj));
9930                 rte_errno = ENOMEM;
9931                 return NULL;
9932         }
9933         pool->flow_hit_aso_obj = obj;
9934         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
9935         rte_spinlock_lock(&mng->resize_sl);
9936         pool->index = mng->next;
9937         /* Resize pools array if there is no room for the new pool in it. */
9938         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
9939                 claim_zero(mlx5_devx_cmd_destroy(obj));
9940                 mlx5_free(pool);
9941                 rte_spinlock_unlock(&mng->resize_sl);
9942                 return NULL;
9943         }
9944         mng->pools[pool->index] = pool;
9945         mng->next++;
9946         rte_spinlock_unlock(&mng->resize_sl);
9947         /* Assign the first action in the new pool, the rest go to free list. */
9948         *age_free = &pool->actions[0];
9949         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
9950                 pool->actions[i].offset = i;
9951                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
9952         }
9953         return pool;
9954 }
9955
9956 /**
9957  * Allocate a ASO aging bit.
9958  *
9959  * @param[in] dev
9960  *   Pointer to the Ethernet device structure.
9961  * @param[out] error
9962  *   Pointer to the error structure.
9963  *
9964  * @return
9965  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
9966  */
9967 static uint32_t
9968 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
9969 {
9970         struct mlx5_priv *priv = dev->data->dev_private;
9971         const struct mlx5_aso_age_pool *pool;
9972         struct mlx5_aso_age_action *age_free = NULL;
9973         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9974
9975         MLX5_ASSERT(mng);
9976         /* Try to get the next free age action bit. */
9977         rte_spinlock_lock(&mng->free_sl);
9978         age_free = LIST_FIRST(&mng->free);
9979         if (age_free) {
9980                 LIST_REMOVE(age_free, next);
9981         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
9982                 rte_spinlock_unlock(&mng->free_sl);
9983                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
9984                                    NULL, "failed to create ASO age pool");
9985                 return 0; /* 0 is an error. */
9986         }
9987         rte_spinlock_unlock(&mng->free_sl);
9988         pool = container_of
9989           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
9990                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
9991                                                                        actions);
9992         if (!age_free->dr_action) {
9993                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
9994                                                  error);
9995
9996                 if (reg_c < 0) {
9997                         rte_flow_error_set(error, rte_errno,
9998                                            RTE_FLOW_ERROR_TYPE_ACTION,
9999                                            NULL, "failed to get reg_c "
10000                                            "for ASO flow hit");
10001                         return 0; /* 0 is an error. */
10002                 }
10003 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
10004                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
10005                                 (priv->sh->rx_domain,
10006                                  pool->flow_hit_aso_obj->obj, age_free->offset,
10007                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
10008                                  (reg_c - REG_C_0));
10009 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
10010                 if (!age_free->dr_action) {
10011                         rte_errno = errno;
10012                         rte_spinlock_lock(&mng->free_sl);
10013                         LIST_INSERT_HEAD(&mng->free, age_free, next);
10014                         rte_spinlock_unlock(&mng->free_sl);
10015                         rte_flow_error_set(error, rte_errno,
10016                                            RTE_FLOW_ERROR_TYPE_ACTION,
10017                                            NULL, "failed to create ASO "
10018                                            "flow hit action");
10019                         return 0; /* 0 is an error. */
10020                 }
10021         }
10022         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
10023         return pool->index | ((age_free->offset + 1) << 16);
10024 }
10025
10026 /**
10027  * Create a age action using ASO mechanism.
10028  *
10029  * @param[in] dev
10030  *   Pointer to rte_eth_dev structure.
10031  * @param[in] age
10032  *   Pointer to the aging action configuration.
10033  * @param[out] error
10034  *   Pointer to the error structure.
10035  *
10036  * @return
10037  *   Index to flow counter on success, 0 otherwise.
10038  */
10039 static uint32_t
10040 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
10041                                  const struct rte_flow_action_age *age,
10042                                  struct rte_flow_error *error)
10043 {
10044         uint32_t age_idx = 0;
10045         struct mlx5_aso_age_action *aso_age;
10046
10047         age_idx = flow_dv_aso_age_alloc(dev, error);
10048         if (!age_idx)
10049                 return 0;
10050         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
10051         aso_age->age_params.context = age->context;
10052         aso_age->age_params.timeout = age->timeout;
10053         aso_age->age_params.port_id = dev->data->port_id;
10054         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
10055                          __ATOMIC_RELAXED);
10056         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
10057                          __ATOMIC_RELAXED);
10058         return age_idx;
10059 }
10060
10061 /**
10062  * Fill the flow with DV spec, lock free
10063  * (mutex should be acquired by caller).
10064  *
10065  * @param[in] dev
10066  *   Pointer to rte_eth_dev structure.
10067  * @param[in, out] dev_flow
10068  *   Pointer to the sub flow.
10069  * @param[in] attr
10070  *   Pointer to the flow attributes.
10071  * @param[in] items
10072  *   Pointer to the list of items.
10073  * @param[in] actions
10074  *   Pointer to the list of actions.
10075  * @param[out] error
10076  *   Pointer to the error structure.
10077  *
10078  * @return
10079  *   0 on success, a negative errno value otherwise and rte_errno is set.
10080  */
10081 static int
10082 flow_dv_translate(struct rte_eth_dev *dev,
10083                   struct mlx5_flow *dev_flow,
10084                   const struct rte_flow_attr *attr,
10085                   const struct rte_flow_item items[],
10086                   const struct rte_flow_action actions[],
10087                   struct rte_flow_error *error)
10088 {
10089         struct mlx5_priv *priv = dev->data->dev_private;
10090         struct mlx5_dev_config *dev_conf = &priv->config;
10091         struct rte_flow *flow = dev_flow->flow;
10092         struct mlx5_flow_handle *handle = dev_flow->handle;
10093         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10094         struct mlx5_flow_rss_desc *rss_desc;
10095         uint64_t item_flags = 0;
10096         uint64_t last_item = 0;
10097         uint64_t action_flags = 0;
10098         uint64_t priority = attr->priority;
10099         struct mlx5_flow_dv_matcher matcher = {
10100                 .mask = {
10101                         .size = sizeof(matcher.mask.buf) -
10102                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
10103                 },
10104         };
10105         int actions_n = 0;
10106         bool actions_end = false;
10107         union {
10108                 struct mlx5_flow_dv_modify_hdr_resource res;
10109                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
10110                             sizeof(struct mlx5_modification_cmd) *
10111                             (MLX5_MAX_MODIFY_NUM + 1)];
10112         } mhdr_dummy;
10113         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
10114         const struct rte_flow_action_count *count = NULL;
10115         const struct rte_flow_action_age *age = NULL;
10116         union flow_dv_attr flow_attr = { .attr = 0 };
10117         uint32_t tag_be;
10118         union mlx5_flow_tbl_key tbl_key;
10119         uint32_t modify_action_position = UINT32_MAX;
10120         void *match_mask = matcher.mask.buf;
10121         void *match_value = dev_flow->dv.value.buf;
10122         uint8_t next_protocol = 0xff;
10123         struct rte_vlan_hdr vlan = { 0 };
10124         struct mlx5_flow_dv_dest_array_resource mdest_res;
10125         struct mlx5_flow_dv_sample_resource sample_res;
10126         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
10127         const struct rte_flow_action_sample *sample = NULL;
10128         struct mlx5_flow_sub_actions_list *sample_act;
10129         uint32_t sample_act_pos = UINT32_MAX;
10130         uint32_t num_of_dest = 0;
10131         int tmp_actions_n = 0;
10132         uint32_t table;
10133         int ret = 0;
10134         const struct mlx5_flow_tunnel *tunnel;
10135         struct flow_grp_info grp_info = {
10136                 .external = !!dev_flow->external,
10137                 .transfer = !!attr->transfer,
10138                 .fdb_def_rule = !!priv->fdb_def_rule,
10139                 .skip_scale = !!dev_flow->skip_scale,
10140         };
10141
10142         if (!wks)
10143                 return rte_flow_error_set(error, ENOMEM,
10144                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10145                                           NULL,
10146                                           "failed to push flow workspace");
10147         rss_desc = &wks->rss_desc;
10148         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
10149         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
10150         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10151                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10152         /* update normal path action resource into last index of array */
10153         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
10154         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
10155                  flow_items_to_tunnel(items) :
10156                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
10157                  flow_actions_to_tunnel(actions) :
10158                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
10159         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10160                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10161         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
10162                                 (dev, tunnel, attr, items, actions);
10163         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
10164                                        &grp_info, error);
10165         if (ret)
10166                 return ret;
10167         dev_flow->dv.group = table;
10168         if (attr->transfer)
10169                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10170         if (priority == MLX5_FLOW_PRIO_RSVD)
10171                 priority = dev_conf->flow_prio - 1;
10172         /* number of actions must be set to 0 in case of dirty stack. */
10173         mhdr_res->actions_num = 0;
10174         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
10175                 /*
10176                  * do not add decap action if match rule drops packet
10177                  * HW rejects rules with decap & drop
10178                  *
10179                  * if tunnel match rule was inserted before matching tunnel set
10180                  * rule flow table used in the match rule must be registered.
10181                  * current implementation handles that in the
10182                  * flow_dv_match_register() at the function end.
10183                  */
10184                 bool add_decap = true;
10185                 const struct rte_flow_action *ptr = actions;
10186
10187                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
10188                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
10189                                 add_decap = false;
10190                                 break;
10191                         }
10192                 }
10193                 if (add_decap) {
10194                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
10195                                                            attr->transfer,
10196                                                            error))
10197                                 return -rte_errno;
10198                         dev_flow->dv.actions[actions_n++] =
10199                                         dev_flow->dv.encap_decap->action;
10200                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10201                 }
10202         }
10203         for (; !actions_end ; actions++) {
10204                 const struct rte_flow_action_queue *queue;
10205                 const struct rte_flow_action_rss *rss;
10206                 const struct rte_flow_action *action = actions;
10207                 const uint8_t *rss_key;
10208                 const struct rte_flow_action_meter *mtr;
10209                 struct mlx5_flow_tbl_resource *tbl;
10210                 struct mlx5_aso_age_action *age_act;
10211                 uint32_t port_id = 0;
10212                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
10213                 int action_type = actions->type;
10214                 const struct rte_flow_action *found_action = NULL;
10215                 struct mlx5_flow_meter *fm = NULL;
10216                 uint32_t jump_group = 0;
10217
10218                 if (!mlx5_flow_os_action_supported(action_type))
10219                         return rte_flow_error_set(error, ENOTSUP,
10220                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10221                                                   actions,
10222                                                   "action not supported");
10223                 switch (action_type) {
10224                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
10225                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
10226                         break;
10227                 case RTE_FLOW_ACTION_TYPE_VOID:
10228                         break;
10229                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
10230                         if (flow_dv_translate_action_port_id(dev, action,
10231                                                              &port_id, error))
10232                                 return -rte_errno;
10233                         port_id_resource.port_id = port_id;
10234                         MLX5_ASSERT(!handle->rix_port_id_action);
10235                         if (flow_dv_port_id_action_resource_register
10236                             (dev, &port_id_resource, dev_flow, error))
10237                                 return -rte_errno;
10238                         dev_flow->dv.actions[actions_n++] =
10239                                         dev_flow->dv.port_id_action->action;
10240                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10241                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
10242                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10243                         num_of_dest++;
10244                         break;
10245                 case RTE_FLOW_ACTION_TYPE_FLAG:
10246                         action_flags |= MLX5_FLOW_ACTION_FLAG;
10247                         dev_flow->handle->mark = 1;
10248                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
10249                                 struct rte_flow_action_mark mark = {
10250                                         .id = MLX5_FLOW_MARK_DEFAULT,
10251                                 };
10252
10253                                 if (flow_dv_convert_action_mark(dev, &mark,
10254                                                                 mhdr_res,
10255                                                                 error))
10256                                         return -rte_errno;
10257                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
10258                                 break;
10259                         }
10260                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
10261                         /*
10262                          * Only one FLAG or MARK is supported per device flow
10263                          * right now. So the pointer to the tag resource must be
10264                          * zero before the register process.
10265                          */
10266                         MLX5_ASSERT(!handle->dvh.rix_tag);
10267                         if (flow_dv_tag_resource_register(dev, tag_be,
10268                                                           dev_flow, error))
10269                                 return -rte_errno;
10270                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10271                         dev_flow->dv.actions[actions_n++] =
10272                                         dev_flow->dv.tag_resource->action;
10273                         break;
10274                 case RTE_FLOW_ACTION_TYPE_MARK:
10275                         action_flags |= MLX5_FLOW_ACTION_MARK;
10276                         dev_flow->handle->mark = 1;
10277                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
10278                                 const struct rte_flow_action_mark *mark =
10279                                         (const struct rte_flow_action_mark *)
10280                                                 actions->conf;
10281
10282                                 if (flow_dv_convert_action_mark(dev, mark,
10283                                                                 mhdr_res,
10284                                                                 error))
10285                                         return -rte_errno;
10286                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
10287                                 break;
10288                         }
10289                         /* Fall-through */
10290                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
10291                         /* Legacy (non-extensive) MARK action. */
10292                         tag_be = mlx5_flow_mark_set
10293                               (((const struct rte_flow_action_mark *)
10294                                (actions->conf))->id);
10295                         MLX5_ASSERT(!handle->dvh.rix_tag);
10296                         if (flow_dv_tag_resource_register(dev, tag_be,
10297                                                           dev_flow, error))
10298                                 return -rte_errno;
10299                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10300                         dev_flow->dv.actions[actions_n++] =
10301                                         dev_flow->dv.tag_resource->action;
10302                         break;
10303                 case RTE_FLOW_ACTION_TYPE_SET_META:
10304                         if (flow_dv_convert_action_set_meta
10305                                 (dev, mhdr_res, attr,
10306                                  (const struct rte_flow_action_set_meta *)
10307                                   actions->conf, error))
10308                                 return -rte_errno;
10309                         action_flags |= MLX5_FLOW_ACTION_SET_META;
10310                         break;
10311                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
10312                         if (flow_dv_convert_action_set_tag
10313                                 (dev, mhdr_res,
10314                                  (const struct rte_flow_action_set_tag *)
10315                                   actions->conf, error))
10316                                 return -rte_errno;
10317                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10318                         break;
10319                 case RTE_FLOW_ACTION_TYPE_DROP:
10320                         action_flags |= MLX5_FLOW_ACTION_DROP;
10321                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
10322                         break;
10323                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10324                         queue = actions->conf;
10325                         rss_desc->queue_num = 1;
10326                         rss_desc->queue[0] = queue->index;
10327                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10328                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
10329                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
10330                         num_of_dest++;
10331                         break;
10332                 case RTE_FLOW_ACTION_TYPE_RSS:
10333                         rss = actions->conf;
10334                         memcpy(rss_desc->queue, rss->queue,
10335                                rss->queue_num * sizeof(uint16_t));
10336                         rss_desc->queue_num = rss->queue_num;
10337                         /* NULL RSS key indicates default RSS key. */
10338                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
10339                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
10340                         /*
10341                          * rss->level and rss.types should be set in advance
10342                          * when expanding items for RSS.
10343                          */
10344                         action_flags |= MLX5_FLOW_ACTION_RSS;
10345                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
10346                                 MLX5_FLOW_FATE_SHARED_RSS :
10347                                 MLX5_FLOW_FATE_QUEUE;
10348                         break;
10349                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
10350                         flow->age = (uint32_t)(uintptr_t)(action->conf);
10351                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
10352                         __atomic_fetch_add(&age_act->refcnt, 1,
10353                                            __ATOMIC_RELAXED);
10354                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
10355                         action_flags |= MLX5_FLOW_ACTION_AGE;
10356                         break;
10357                 case RTE_FLOW_ACTION_TYPE_AGE:
10358                         if (priv->sh->flow_hit_aso_en && attr->group) {
10359                                 flow->age = flow_dv_translate_create_aso_age
10360                                                 (dev, action->conf, error);
10361                                 if (!flow->age)
10362                                         return rte_flow_error_set
10363                                                 (error, rte_errno,
10364                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10365                                                  NULL,
10366                                                  "can't create ASO age action");
10367                                 dev_flow->dv.actions[actions_n++] =
10368                                           (flow_aso_age_get_by_idx
10369                                                 (dev, flow->age))->dr_action;
10370                                 action_flags |= MLX5_FLOW_ACTION_AGE;
10371                                 break;
10372                         }
10373                         /* Fall-through */
10374                 case RTE_FLOW_ACTION_TYPE_COUNT:
10375                         if (!dev_conf->devx) {
10376                                 return rte_flow_error_set
10377                                               (error, ENOTSUP,
10378                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10379                                                NULL,
10380                                                "count action not supported");
10381                         }
10382                         /* Save information first, will apply later. */
10383                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
10384                                 count = action->conf;
10385                         else
10386                                 age = action->conf;
10387                         action_flags |= MLX5_FLOW_ACTION_COUNT;
10388                         break;
10389                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
10390                         dev_flow->dv.actions[actions_n++] =
10391                                                 priv->sh->pop_vlan_action;
10392                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
10393                         break;
10394                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
10395                         if (!(action_flags &
10396                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
10397                                 flow_dev_get_vlan_info_from_items(items, &vlan);
10398                         vlan.eth_proto = rte_be_to_cpu_16
10399                              ((((const struct rte_flow_action_of_push_vlan *)
10400                                                    actions->conf)->ethertype));
10401                         found_action = mlx5_flow_find_action
10402                                         (actions + 1,
10403                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
10404                         if (found_action)
10405                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
10406                         found_action = mlx5_flow_find_action
10407                                         (actions + 1,
10408                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
10409                         if (found_action)
10410                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
10411                         if (flow_dv_create_action_push_vlan
10412                                             (dev, attr, &vlan, dev_flow, error))
10413                                 return -rte_errno;
10414                         dev_flow->dv.actions[actions_n++] =
10415                                         dev_flow->dv.push_vlan_res->action;
10416                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
10417                         break;
10418                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
10419                         /* of_vlan_push action handled this action */
10420                         MLX5_ASSERT(action_flags &
10421                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
10422                         break;
10423                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
10424                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
10425                                 break;
10426                         flow_dev_get_vlan_info_from_items(items, &vlan);
10427                         mlx5_update_vlan_vid_pcp(actions, &vlan);
10428                         /* If no VLAN push - this is a modify header action */
10429                         if (flow_dv_convert_action_modify_vlan_vid
10430                                                 (mhdr_res, actions, error))
10431                                 return -rte_errno;
10432                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
10433                         break;
10434                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
10435                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
10436                         if (flow_dv_create_action_l2_encap(dev, actions,
10437                                                            dev_flow,
10438                                                            attr->transfer,
10439                                                            error))
10440                                 return -rte_errno;
10441                         dev_flow->dv.actions[actions_n++] =
10442                                         dev_flow->dv.encap_decap->action;
10443                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10444                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
10445                                 sample_act->action_flags |=
10446                                                         MLX5_FLOW_ACTION_ENCAP;
10447                         break;
10448                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
10449                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
10450                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
10451                                                            attr->transfer,
10452                                                            error))
10453                                 return -rte_errno;
10454                         dev_flow->dv.actions[actions_n++] =
10455                                         dev_flow->dv.encap_decap->action;
10456                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10457                         break;
10458                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10459                         /* Handle encap with preceding decap. */
10460                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
10461                                 if (flow_dv_create_action_raw_encap
10462                                         (dev, actions, dev_flow, attr, error))
10463                                         return -rte_errno;
10464                                 dev_flow->dv.actions[actions_n++] =
10465                                         dev_flow->dv.encap_decap->action;
10466                         } else {
10467                                 /* Handle encap without preceding decap. */
10468                                 if (flow_dv_create_action_l2_encap
10469                                     (dev, actions, dev_flow, attr->transfer,
10470                                      error))
10471                                         return -rte_errno;
10472                                 dev_flow->dv.actions[actions_n++] =
10473                                         dev_flow->dv.encap_decap->action;
10474                         }
10475                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10476                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
10477                                 sample_act->action_flags |=
10478                                                         MLX5_FLOW_ACTION_ENCAP;
10479                         break;
10480                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
10481                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
10482                                 ;
10483                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
10484                                 if (flow_dv_create_action_l2_decap
10485                                     (dev, dev_flow, attr->transfer, error))
10486                                         return -rte_errno;
10487                                 dev_flow->dv.actions[actions_n++] =
10488                                         dev_flow->dv.encap_decap->action;
10489                         }
10490                         /* If decap is followed by encap, handle it at encap. */
10491                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10492                         break;
10493                 case RTE_FLOW_ACTION_TYPE_JUMP:
10494                         jump_group = ((const struct rte_flow_action_jump *)
10495                                                         action->conf)->group;
10496                         grp_info.std_tbl_fix = 0;
10497                         grp_info.skip_scale = 0;
10498                         ret = mlx5_flow_group_to_table(dev, tunnel,
10499                                                        jump_group,
10500                                                        &table,
10501                                                        &grp_info, error);
10502                         if (ret)
10503                                 return ret;
10504                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
10505                                                        attr->transfer,
10506                                                        !!dev_flow->external,
10507                                                        tunnel, jump_group, 0,
10508                                                        error);
10509                         if (!tbl)
10510                                 return rte_flow_error_set
10511                                                 (error, errno,
10512                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10513                                                  NULL,
10514                                                  "cannot create jump action.");
10515                         if (flow_dv_jump_tbl_resource_register
10516                             (dev, tbl, dev_flow, error)) {
10517                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10518                                 return rte_flow_error_set
10519                                                 (error, errno,
10520                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10521                                                  NULL,
10522                                                  "cannot create jump action.");
10523                         }
10524                         dev_flow->dv.actions[actions_n++] =
10525                                         dev_flow->dv.jump->action;
10526                         action_flags |= MLX5_FLOW_ACTION_JUMP;
10527                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
10528                         sample_act->action_flags |= MLX5_FLOW_ACTION_JUMP;
10529                         num_of_dest++;
10530                         break;
10531                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
10532                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
10533                         if (flow_dv_convert_action_modify_mac
10534                                         (mhdr_res, actions, error))
10535                                 return -rte_errno;
10536                         action_flags |= actions->type ==
10537                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
10538                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
10539                                         MLX5_FLOW_ACTION_SET_MAC_DST;
10540                         break;
10541                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
10542                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
10543                         if (flow_dv_convert_action_modify_ipv4
10544                                         (mhdr_res, actions, error))
10545                                 return -rte_errno;
10546                         action_flags |= actions->type ==
10547                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
10548                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
10549                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
10550                         break;
10551                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
10552                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
10553                         if (flow_dv_convert_action_modify_ipv6
10554                                         (mhdr_res, actions, error))
10555                                 return -rte_errno;
10556                         action_flags |= actions->type ==
10557                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
10558                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
10559                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
10560                         break;
10561                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
10562                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
10563                         if (flow_dv_convert_action_modify_tp
10564                                         (mhdr_res, actions, items,
10565                                          &flow_attr, dev_flow, !!(action_flags &
10566                                          MLX5_FLOW_ACTION_DECAP), error))
10567                                 return -rte_errno;
10568                         action_flags |= actions->type ==
10569                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
10570                                         MLX5_FLOW_ACTION_SET_TP_SRC :
10571                                         MLX5_FLOW_ACTION_SET_TP_DST;
10572                         break;
10573                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
10574                         if (flow_dv_convert_action_modify_dec_ttl
10575                                         (mhdr_res, items, &flow_attr, dev_flow,
10576                                          !!(action_flags &
10577                                          MLX5_FLOW_ACTION_DECAP), error))
10578                                 return -rte_errno;
10579                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
10580                         break;
10581                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
10582                         if (flow_dv_convert_action_modify_ttl
10583                                         (mhdr_res, actions, items, &flow_attr,
10584                                          dev_flow, !!(action_flags &
10585                                          MLX5_FLOW_ACTION_DECAP), error))
10586                                 return -rte_errno;
10587                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
10588                         break;
10589                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
10590                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
10591                         if (flow_dv_convert_action_modify_tcp_seq
10592                                         (mhdr_res, actions, error))
10593                                 return -rte_errno;
10594                         action_flags |= actions->type ==
10595                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
10596                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
10597                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
10598                         break;
10599
10600                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
10601                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
10602                         if (flow_dv_convert_action_modify_tcp_ack
10603                                         (mhdr_res, actions, error))
10604                                 return -rte_errno;
10605                         action_flags |= actions->type ==
10606                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
10607                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
10608                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
10609                         break;
10610                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
10611                         if (flow_dv_convert_action_set_reg
10612                                         (mhdr_res, actions, error))
10613                                 return -rte_errno;
10614                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10615                         break;
10616                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
10617                         if (flow_dv_convert_action_copy_mreg
10618                                         (dev, mhdr_res, actions, error))
10619                                 return -rte_errno;
10620                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10621                         break;
10622                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
10623                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
10624                         dev_flow->handle->fate_action =
10625                                         MLX5_FLOW_FATE_DEFAULT_MISS;
10626                         break;
10627                 case RTE_FLOW_ACTION_TYPE_METER:
10628                         mtr = actions->conf;
10629                         if (!flow->meter) {
10630                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
10631                                                             attr, error);
10632                                 if (!fm)
10633                                         return rte_flow_error_set(error,
10634                                                 rte_errno,
10635                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10636                                                 NULL,
10637                                                 "meter not found "
10638                                                 "or invalid parameters");
10639                                 flow->meter = fm->idx;
10640                         }
10641                         /* Set the meter action. */
10642                         if (!fm) {
10643                                 fm = mlx5_ipool_get(priv->sh->ipool
10644                                                 [MLX5_IPOOL_MTR], flow->meter);
10645                                 if (!fm)
10646                                         return rte_flow_error_set(error,
10647                                                 rte_errno,
10648                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10649                                                 NULL,
10650                                                 "meter not found "
10651                                                 "or invalid parameters");
10652                         }
10653                         dev_flow->dv.actions[actions_n++] =
10654                                 fm->mfts->meter_action;
10655                         action_flags |= MLX5_FLOW_ACTION_METER;
10656                         break;
10657                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
10658                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
10659                                                               actions, error))
10660                                 return -rte_errno;
10661                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
10662                         break;
10663                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
10664                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
10665                                                               actions, error))
10666                                 return -rte_errno;
10667                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
10668                         break;
10669                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
10670                         sample_act_pos = actions_n;
10671                         sample = (const struct rte_flow_action_sample *)
10672                                  action->conf;
10673                         actions_n++;
10674                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
10675                         /* put encap action into group if work with port id */
10676                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
10677                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
10678                                 sample_act->action_flags |=
10679                                                         MLX5_FLOW_ACTION_ENCAP;
10680                         break;
10681                 case RTE_FLOW_ACTION_TYPE_END:
10682                         actions_end = true;
10683                         if (mhdr_res->actions_num) {
10684                                 /* create modify action if needed. */
10685                                 if (flow_dv_modify_hdr_resource_register
10686                                         (dev, mhdr_res, dev_flow, error))
10687                                         return -rte_errno;
10688                                 dev_flow->dv.actions[modify_action_position] =
10689                                         handle->dvh.modify_hdr->action;
10690                         }
10691                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
10692                                 flow->counter =
10693                                         flow_dv_translate_create_counter(dev,
10694                                                 dev_flow, count, age);
10695
10696                                 if (!flow->counter)
10697                                         return rte_flow_error_set
10698                                                 (error, rte_errno,
10699                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10700                                                 NULL,
10701                                                 "cannot create counter"
10702                                                 " object.");
10703                                 dev_flow->dv.actions[actions_n] =
10704                                           (flow_dv_counter_get_by_idx(dev,
10705                                           flow->counter, NULL))->action;
10706                                 actions_n++;
10707                         }
10708                 default:
10709                         break;
10710                 }
10711                 if (mhdr_res->actions_num &&
10712                     modify_action_position == UINT32_MAX)
10713                         modify_action_position = actions_n++;
10714         }
10715         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
10716                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
10717                 int item_type = items->type;
10718
10719                 if (!mlx5_flow_os_item_supported(item_type))
10720                         return rte_flow_error_set(error, ENOTSUP,
10721                                                   RTE_FLOW_ERROR_TYPE_ITEM,
10722                                                   NULL, "item not supported");
10723                 switch (item_type) {
10724                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
10725                         flow_dv_translate_item_port_id
10726                                 (dev, match_mask, match_value, items, attr);
10727                         last_item = MLX5_FLOW_ITEM_PORT_ID;
10728                         break;
10729                 case RTE_FLOW_ITEM_TYPE_ETH:
10730                         flow_dv_translate_item_eth(match_mask, match_value,
10731                                                    items, tunnel,
10732                                                    dev_flow->dv.group);
10733                         matcher.priority = action_flags &
10734                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
10735                                         !dev_flow->external ?
10736                                         MLX5_PRIORITY_MAP_L3 :
10737                                         MLX5_PRIORITY_MAP_L2;
10738                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
10739                                              MLX5_FLOW_LAYER_OUTER_L2;
10740                         break;
10741                 case RTE_FLOW_ITEM_TYPE_VLAN:
10742                         flow_dv_translate_item_vlan(dev_flow,
10743                                                     match_mask, match_value,
10744                                                     items, tunnel,
10745                                                     dev_flow->dv.group);
10746                         matcher.priority = MLX5_PRIORITY_MAP_L2;
10747                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
10748                                               MLX5_FLOW_LAYER_INNER_VLAN) :
10749                                              (MLX5_FLOW_LAYER_OUTER_L2 |
10750                                               MLX5_FLOW_LAYER_OUTER_VLAN);
10751                         break;
10752                 case RTE_FLOW_ITEM_TYPE_IPV4:
10753                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10754                                                   &item_flags, &tunnel);
10755                         flow_dv_translate_item_ipv4(match_mask, match_value,
10756                                                     items, tunnel,
10757                                                     dev_flow->dv.group);
10758                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10759                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
10760                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
10761                         if (items->mask != NULL &&
10762                             ((const struct rte_flow_item_ipv4 *)
10763                              items->mask)->hdr.next_proto_id) {
10764                                 next_protocol =
10765                                         ((const struct rte_flow_item_ipv4 *)
10766                                          (items->spec))->hdr.next_proto_id;
10767                                 next_protocol &=
10768                                         ((const struct rte_flow_item_ipv4 *)
10769                                          (items->mask))->hdr.next_proto_id;
10770                         } else {
10771                                 /* Reset for inner layer. */
10772                                 next_protocol = 0xff;
10773                         }
10774                         break;
10775                 case RTE_FLOW_ITEM_TYPE_IPV6:
10776                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10777                                                   &item_flags, &tunnel);
10778                         flow_dv_translate_item_ipv6(match_mask, match_value,
10779                                                     items, tunnel,
10780                                                     dev_flow->dv.group);
10781                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10782                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10783                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10784                         if (items->mask != NULL &&
10785                             ((const struct rte_flow_item_ipv6 *)
10786                              items->mask)->hdr.proto) {
10787                                 next_protocol =
10788                                         ((const struct rte_flow_item_ipv6 *)
10789                                          items->spec)->hdr.proto;
10790                                 next_protocol &=
10791                                         ((const struct rte_flow_item_ipv6 *)
10792                                          items->mask)->hdr.proto;
10793                         } else {
10794                                 /* Reset for inner layer. */
10795                                 next_protocol = 0xff;
10796                         }
10797                         break;
10798                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10799                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
10800                                                              match_value,
10801                                                              items, tunnel);
10802                         last_item = tunnel ?
10803                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10804                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10805                         if (items->mask != NULL &&
10806                             ((const struct rte_flow_item_ipv6_frag_ext *)
10807                              items->mask)->hdr.next_header) {
10808                                 next_protocol =
10809                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10810                                  items->spec)->hdr.next_header;
10811                                 next_protocol &=
10812                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10813                                  items->mask)->hdr.next_header;
10814                         } else {
10815                                 /* Reset for inner layer. */
10816                                 next_protocol = 0xff;
10817                         }
10818                         break;
10819                 case RTE_FLOW_ITEM_TYPE_TCP:
10820                         flow_dv_translate_item_tcp(match_mask, match_value,
10821                                                    items, tunnel);
10822                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10823                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10824                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10825                         break;
10826                 case RTE_FLOW_ITEM_TYPE_UDP:
10827                         flow_dv_translate_item_udp(match_mask, match_value,
10828                                                    items, tunnel);
10829                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10830                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10831                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10832                         break;
10833                 case RTE_FLOW_ITEM_TYPE_GRE:
10834                         flow_dv_translate_item_gre(match_mask, match_value,
10835                                                    items, tunnel);
10836                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10837                         last_item = MLX5_FLOW_LAYER_GRE;
10838                         break;
10839                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10840                         flow_dv_translate_item_gre_key(match_mask,
10841                                                        match_value, items);
10842                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10843                         break;
10844                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10845                         flow_dv_translate_item_nvgre(match_mask, match_value,
10846                                                      items, tunnel);
10847                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10848                         last_item = MLX5_FLOW_LAYER_GRE;
10849                         break;
10850                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10851                         flow_dv_translate_item_vxlan(match_mask, match_value,
10852                                                      items, tunnel);
10853                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10854                         last_item = MLX5_FLOW_LAYER_VXLAN;
10855                         break;
10856                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10857                         flow_dv_translate_item_vxlan_gpe(match_mask,
10858                                                          match_value, items,
10859                                                          tunnel);
10860                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10861                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10862                         break;
10863                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10864                         flow_dv_translate_item_geneve(match_mask, match_value,
10865                                                       items, tunnel);
10866                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10867                         last_item = MLX5_FLOW_LAYER_GENEVE;
10868                         break;
10869                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
10870                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
10871                                                           match_value,
10872                                                           items, error);
10873                         if (ret)
10874                                 return rte_flow_error_set(error, -ret,
10875                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
10876                                         "cannot create GENEVE TLV option");
10877                         flow->geneve_tlv_option = 1;
10878                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
10879                         break;
10880                 case RTE_FLOW_ITEM_TYPE_MPLS:
10881                         flow_dv_translate_item_mpls(match_mask, match_value,
10882                                                     items, last_item, tunnel);
10883                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10884                         last_item = MLX5_FLOW_LAYER_MPLS;
10885                         break;
10886                 case RTE_FLOW_ITEM_TYPE_MARK:
10887                         flow_dv_translate_item_mark(dev, match_mask,
10888                                                     match_value, items);
10889                         last_item = MLX5_FLOW_ITEM_MARK;
10890                         break;
10891                 case RTE_FLOW_ITEM_TYPE_META:
10892                         flow_dv_translate_item_meta(dev, match_mask,
10893                                                     match_value, attr, items);
10894                         last_item = MLX5_FLOW_ITEM_METADATA;
10895                         break;
10896                 case RTE_FLOW_ITEM_TYPE_ICMP:
10897                         flow_dv_translate_item_icmp(match_mask, match_value,
10898                                                     items, tunnel);
10899                         last_item = MLX5_FLOW_LAYER_ICMP;
10900                         break;
10901                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10902                         flow_dv_translate_item_icmp6(match_mask, match_value,
10903                                                       items, tunnel);
10904                         last_item = MLX5_FLOW_LAYER_ICMP6;
10905                         break;
10906                 case RTE_FLOW_ITEM_TYPE_TAG:
10907                         flow_dv_translate_item_tag(dev, match_mask,
10908                                                    match_value, items);
10909                         last_item = MLX5_FLOW_ITEM_TAG;
10910                         break;
10911                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10912                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10913                                                         match_value, items);
10914                         last_item = MLX5_FLOW_ITEM_TAG;
10915                         break;
10916                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10917                         flow_dv_translate_item_tx_queue(dev, match_mask,
10918                                                         match_value,
10919                                                         items);
10920                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10921                         break;
10922                 case RTE_FLOW_ITEM_TYPE_GTP:
10923                         flow_dv_translate_item_gtp(match_mask, match_value,
10924                                                    items, tunnel);
10925                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10926                         last_item = MLX5_FLOW_LAYER_GTP;
10927                         break;
10928                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
10929                         ret = flow_dv_translate_item_gtp_psc(match_mask,
10930                                                           match_value,
10931                                                           items);
10932                         if (ret)
10933                                 return rte_flow_error_set(error, -ret,
10934                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
10935                                         "cannot create GTP PSC item");
10936                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
10937                         break;
10938                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10939                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10940                                 /* Create it only the first time to be used. */
10941                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10942                                 if (ret)
10943                                         return rte_flow_error_set
10944                                                 (error, -ret,
10945                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10946                                                 NULL,
10947                                                 "cannot create eCPRI parser");
10948                         }
10949                         /* Adjust the length matcher and device flow value. */
10950                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10951                         dev_flow->dv.value.size =
10952                                         MLX5_ST_SZ_BYTES(fte_match_param);
10953                         flow_dv_translate_item_ecpri(dev, match_mask,
10954                                                      match_value, items);
10955                         /* No other protocol should follow eCPRI layer. */
10956                         last_item = MLX5_FLOW_LAYER_ECPRI;
10957                         break;
10958                 default:
10959                         break;
10960                 }
10961                 item_flags |= last_item;
10962         }
10963         /*
10964          * When E-Switch mode is enabled, we have two cases where we need to
10965          * set the source port manually.
10966          * The first one, is in case of Nic steering rule, and the second is
10967          * E-Switch rule where no port_id item was found. In both cases
10968          * the source port is set according the current port in use.
10969          */
10970         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10971             (priv->representor || priv->master)) {
10972                 if (flow_dv_translate_item_port_id(dev, match_mask,
10973                                                    match_value, NULL, attr))
10974                         return -rte_errno;
10975         }
10976 #ifdef RTE_LIBRTE_MLX5_DEBUG
10977         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10978                                               dev_flow->dv.value.buf));
10979 #endif
10980         /*
10981          * Layers may be already initialized from prefix flow if this dev_flow
10982          * is the suffix flow.
10983          */
10984         handle->layers |= item_flags;
10985         if (action_flags & MLX5_FLOW_ACTION_RSS)
10986                 flow_dv_hashfields_set(dev_flow, rss_desc);
10987         /* If has RSS action in the sample action, the Sample/Mirror resource
10988          * should be registered after the hash filed be update.
10989          */
10990         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
10991                 ret = flow_dv_translate_action_sample(dev,
10992                                                       sample,
10993                                                       dev_flow, attr,
10994                                                       &num_of_dest,
10995                                                       sample_actions,
10996                                                       &sample_res,
10997                                                       error);
10998                 if (ret < 0)
10999                         return ret;
11000                 ret = flow_dv_create_action_sample(dev,
11001                                                    dev_flow,
11002                                                    num_of_dest,
11003                                                    &sample_res,
11004                                                    &mdest_res,
11005                                                    sample_actions,
11006                                                    action_flags,
11007                                                    error);
11008                 if (ret < 0)
11009                         return rte_flow_error_set
11010                                                 (error, rte_errno,
11011                                                 RTE_FLOW_ERROR_TYPE_ACTION,
11012                                                 NULL,
11013                                                 "cannot create sample action");
11014                 if (num_of_dest > 1) {
11015                         dev_flow->dv.actions[sample_act_pos] =
11016                         dev_flow->dv.dest_array_res->action;
11017                 } else {
11018                         dev_flow->dv.actions[sample_act_pos] =
11019                         dev_flow->dv.sample_res->verbs_action;
11020                 }
11021         }
11022         /*
11023          * For multiple destination (sample action with ratio=1), the encap
11024          * action and port id action will be combined into group action.
11025          * So need remove the original these actions in the flow and only
11026          * use the sample action instead of.
11027          */
11028         if (num_of_dest > 1 &&
11029             (sample_act->dr_port_id_action || sample_act->dr_jump_action)) {
11030                 int i;
11031                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
11032
11033                 for (i = 0; i < actions_n; i++) {
11034                         if ((sample_act->dr_encap_action &&
11035                                 sample_act->dr_encap_action ==
11036                                 dev_flow->dv.actions[i]) ||
11037                                 (sample_act->dr_port_id_action &&
11038                                 sample_act->dr_port_id_action ==
11039                                 dev_flow->dv.actions[i]) ||
11040                                 (sample_act->dr_jump_action &&
11041                                 sample_act->dr_jump_action ==
11042                                 dev_flow->dv.actions[i]))
11043                                 continue;
11044                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
11045                 }
11046                 memcpy((void *)dev_flow->dv.actions,
11047                                 (void *)temp_actions,
11048                                 tmp_actions_n * sizeof(void *));
11049                 actions_n = tmp_actions_n;
11050         }
11051         dev_flow->dv.actions_n = actions_n;
11052         dev_flow->act_flags = action_flags;
11053         /* Register matcher. */
11054         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
11055                                     matcher.mask.size);
11056         matcher.priority = mlx5_os_flow_adjust_priority(dev,
11057                                                         priority,
11058                                                         matcher.priority);
11059         /* reserved field no needs to be set to 0 here. */
11060         tbl_key.domain = attr->transfer;
11061         tbl_key.direction = attr->egress;
11062         tbl_key.table_id = dev_flow->dv.group;
11063         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
11064                                      tunnel, attr->group, error))
11065                 return -rte_errno;
11066         return 0;
11067 }
11068
11069 /**
11070  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11071  * and tunnel.
11072  *
11073  * @param[in, out] action
11074  *   Shred RSS action holding hash RX queue objects.
11075  * @param[in] hash_fields
11076  *   Defines combination of packet fields to participate in RX hash.
11077  * @param[in] tunnel
11078  *   Tunnel type
11079  * @param[in] hrxq_idx
11080  *   Hash RX queue index to set.
11081  *
11082  * @return
11083  *   0 on success, otherwise negative errno value.
11084  */
11085 static int
11086 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
11087                               const uint64_t hash_fields,
11088                               const int tunnel,
11089                               uint32_t hrxq_idx)
11090 {
11091         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
11092
11093         switch (hash_fields & ~IBV_RX_HASH_INNER) {
11094         case MLX5_RSS_HASH_IPV4:
11095                 hrxqs[0] = hrxq_idx;
11096                 return 0;
11097         case MLX5_RSS_HASH_IPV4_TCP:
11098                 hrxqs[1] = hrxq_idx;
11099                 return 0;
11100         case MLX5_RSS_HASH_IPV4_UDP:
11101                 hrxqs[2] = hrxq_idx;
11102                 return 0;
11103         case MLX5_RSS_HASH_IPV6:
11104                 hrxqs[3] = hrxq_idx;
11105                 return 0;
11106         case MLX5_RSS_HASH_IPV6_TCP:
11107                 hrxqs[4] = hrxq_idx;
11108                 return 0;
11109         case MLX5_RSS_HASH_IPV6_UDP:
11110                 hrxqs[5] = hrxq_idx;
11111                 return 0;
11112         case MLX5_RSS_HASH_NONE:
11113                 hrxqs[6] = hrxq_idx;
11114                 return 0;
11115         default:
11116                 return -1;
11117         }
11118 }
11119
11120 /**
11121  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11122  * and tunnel.
11123  *
11124  * @param[in] dev
11125  *   Pointer to the Ethernet device structure.
11126  * @param[in] idx
11127  *   Shared RSS action ID holding hash RX queue objects.
11128  * @param[in] hash_fields
11129  *   Defines combination of packet fields to participate in RX hash.
11130  * @param[in] tunnel
11131  *   Tunnel type
11132  *
11133  * @return
11134  *   Valid hash RX queue index, otherwise 0.
11135  */
11136 static uint32_t
11137 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
11138                                  const uint64_t hash_fields,
11139                                  const int tunnel)
11140 {
11141         struct mlx5_priv *priv = dev->data->dev_private;
11142         struct mlx5_shared_action_rss *shared_rss =
11143             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11144         const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
11145                                                         shared_rss->hrxq_tunnel;
11146
11147         switch (hash_fields & ~IBV_RX_HASH_INNER) {
11148         case MLX5_RSS_HASH_IPV4:
11149                 return hrxqs[0];
11150         case MLX5_RSS_HASH_IPV4_TCP:
11151                 return hrxqs[1];
11152         case MLX5_RSS_HASH_IPV4_UDP:
11153                 return hrxqs[2];
11154         case MLX5_RSS_HASH_IPV6:
11155                 return hrxqs[3];
11156         case MLX5_RSS_HASH_IPV6_TCP:
11157                 return hrxqs[4];
11158         case MLX5_RSS_HASH_IPV6_UDP:
11159                 return hrxqs[5];
11160         case MLX5_RSS_HASH_NONE:
11161                 return hrxqs[6];
11162         default:
11163                 return 0;
11164         }
11165 }
11166
11167 /**
11168  * Apply the flow to the NIC, lock free,
11169  * (mutex should be acquired by caller).
11170  *
11171  * @param[in] dev
11172  *   Pointer to the Ethernet device structure.
11173  * @param[in, out] flow
11174  *   Pointer to flow structure.
11175  * @param[out] error
11176  *   Pointer to error structure.
11177  *
11178  * @return
11179  *   0 on success, a negative errno value otherwise and rte_errno is set.
11180  */
11181 static int
11182 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
11183               struct rte_flow_error *error)
11184 {
11185         struct mlx5_flow_dv_workspace *dv;
11186         struct mlx5_flow_handle *dh;
11187         struct mlx5_flow_handle_dv *dv_h;
11188         struct mlx5_flow *dev_flow;
11189         struct mlx5_priv *priv = dev->data->dev_private;
11190         uint32_t handle_idx;
11191         int n;
11192         int err;
11193         int idx;
11194         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11195         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
11196
11197         MLX5_ASSERT(wks);
11198         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
11199                 dev_flow = &wks->flows[idx];
11200                 dv = &dev_flow->dv;
11201                 dh = dev_flow->handle;
11202                 dv_h = &dh->dvh;
11203                 n = dv->actions_n;
11204                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
11205                         if (dv->transfer) {
11206                                 dv->actions[n++] = priv->sh->esw_drop_action;
11207                         } else {
11208                                 MLX5_ASSERT(priv->drop_queue.hrxq);
11209                                 dv->actions[n++] =
11210                                                 priv->drop_queue.hrxq->action;
11211                         }
11212                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
11213                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
11214                         struct mlx5_hrxq *hrxq;
11215                         uint32_t hrxq_idx;
11216
11217                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
11218                                                     &hrxq_idx);
11219                         if (!hrxq) {
11220                                 rte_flow_error_set
11221                                         (error, rte_errno,
11222                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11223                                          "cannot get hash queue");
11224                                 goto error;
11225                         }
11226                         dh->rix_hrxq = hrxq_idx;
11227                         dv->actions[n++] = hrxq->action;
11228                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
11229                         struct mlx5_hrxq *hrxq = NULL;
11230                         uint32_t hrxq_idx;
11231
11232                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
11233                                                 rss_desc->shared_rss,
11234                                                 dev_flow->hash_fields,
11235                                                 !!(dh->layers &
11236                                                 MLX5_FLOW_LAYER_TUNNEL));
11237                         if (hrxq_idx)
11238                                 hrxq = mlx5_ipool_get
11239                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
11240                                          hrxq_idx);
11241                         if (!hrxq) {
11242                                 rte_flow_error_set
11243                                         (error, rte_errno,
11244                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11245                                          "cannot get hash queue");
11246                                 goto error;
11247                         }
11248                         dh->rix_srss = rss_desc->shared_rss;
11249                         dv->actions[n++] = hrxq->action;
11250                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
11251                         if (!priv->sh->default_miss_action) {
11252                                 rte_flow_error_set
11253                                         (error, rte_errno,
11254                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11255                                          "default miss action not be created.");
11256                                 goto error;
11257                         }
11258                         dv->actions[n++] = priv->sh->default_miss_action;
11259                 }
11260                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
11261                                                (void *)&dv->value, n,
11262                                                dv->actions, &dh->drv_flow);
11263                 if (err) {
11264                         rte_flow_error_set(error, errno,
11265                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11266                                            NULL,
11267                                            "hardware refuses to create flow");
11268                         goto error;
11269                 }
11270                 if (priv->vmwa_context &&
11271                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
11272                         /*
11273                          * The rule contains the VLAN pattern.
11274                          * For VF we are going to create VLAN
11275                          * interface to make hypervisor set correct
11276                          * e-Switch vport context.
11277                          */
11278                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
11279                 }
11280         }
11281         return 0;
11282 error:
11283         err = rte_errno; /* Save rte_errno before cleanup. */
11284         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
11285                        handle_idx, dh, next) {
11286                 /* hrxq is union, don't clear it if the flag is not set. */
11287                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
11288                         mlx5_hrxq_release(dev, dh->rix_hrxq);
11289                         dh->rix_hrxq = 0;
11290                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
11291                         dh->rix_srss = 0;
11292                 }
11293                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
11294                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
11295         }
11296         rte_errno = err; /* Restore rte_errno. */
11297         return -rte_errno;
11298 }
11299
11300 void
11301 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
11302                           struct mlx5_cache_entry *entry)
11303 {
11304         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
11305                                                           entry);
11306
11307         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
11308         mlx5_free(cache);
11309 }
11310
11311 /**
11312  * Release the flow matcher.
11313  *
11314  * @param dev
11315  *   Pointer to Ethernet device.
11316  * @param port_id
11317  *   Index to port ID action resource.
11318  *
11319  * @return
11320  *   1 while a reference on it exists, 0 when freed.
11321  */
11322 static int
11323 flow_dv_matcher_release(struct rte_eth_dev *dev,
11324                         struct mlx5_flow_handle *handle)
11325 {
11326         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
11327         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
11328                                                             typeof(*tbl), tbl);
11329         int ret;
11330
11331         MLX5_ASSERT(matcher->matcher_object);
11332         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
11333         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
11334         return ret;
11335 }
11336
11337 /**
11338  * Release encap_decap resource.
11339  *
11340  * @param list
11341  *   Pointer to the hash list.
11342  * @param entry
11343  *   Pointer to exist resource entry object.
11344  */
11345 void
11346 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
11347                               struct mlx5_hlist_entry *entry)
11348 {
11349         struct mlx5_dev_ctx_shared *sh = list->ctx;
11350         struct mlx5_flow_dv_encap_decap_resource *res =
11351                 container_of(entry, typeof(*res), entry);
11352
11353         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
11354         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
11355 }
11356
11357 /**
11358  * Release an encap/decap resource.
11359  *
11360  * @param dev
11361  *   Pointer to Ethernet device.
11362  * @param encap_decap_idx
11363  *   Index of encap decap resource.
11364  *
11365  * @return
11366  *   1 while a reference on it exists, 0 when freed.
11367  */
11368 static int
11369 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
11370                                      uint32_t encap_decap_idx)
11371 {
11372         struct mlx5_priv *priv = dev->data->dev_private;
11373         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
11374
11375         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
11376                                         encap_decap_idx);
11377         if (!cache_resource)
11378                 return 0;
11379         MLX5_ASSERT(cache_resource->action);
11380         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
11381                                      &cache_resource->entry);
11382 }
11383
11384 /**
11385  * Release an jump to table action resource.
11386  *
11387  * @param dev
11388  *   Pointer to Ethernet device.
11389  * @param rix_jump
11390  *   Index to the jump action resource.
11391  *
11392  * @return
11393  *   1 while a reference on it exists, 0 when freed.
11394  */
11395 static int
11396 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
11397                                   uint32_t rix_jump)
11398 {
11399         struct mlx5_priv *priv = dev->data->dev_private;
11400         struct mlx5_flow_tbl_data_entry *tbl_data;
11401
11402         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
11403                                   rix_jump);
11404         if (!tbl_data)
11405                 return 0;
11406         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
11407 }
11408
11409 void
11410 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
11411                          struct mlx5_hlist_entry *entry)
11412 {
11413         struct mlx5_flow_dv_modify_hdr_resource *res =
11414                 container_of(entry, typeof(*res), entry);
11415
11416         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
11417         mlx5_free(entry);
11418 }
11419
11420 /**
11421  * Release a modify-header resource.
11422  *
11423  * @param dev
11424  *   Pointer to Ethernet device.
11425  * @param handle
11426  *   Pointer to mlx5_flow_handle.
11427  *
11428  * @return
11429  *   1 while a reference on it exists, 0 when freed.
11430  */
11431 static int
11432 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
11433                                     struct mlx5_flow_handle *handle)
11434 {
11435         struct mlx5_priv *priv = dev->data->dev_private;
11436         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
11437
11438         MLX5_ASSERT(entry->action);
11439         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
11440 }
11441
11442 void
11443 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
11444                           struct mlx5_cache_entry *entry)
11445 {
11446         struct mlx5_dev_ctx_shared *sh = list->ctx;
11447         struct mlx5_flow_dv_port_id_action_resource *cache =
11448                         container_of(entry, typeof(*cache), entry);
11449
11450         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
11451         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
11452 }
11453
11454 /**
11455  * Release port ID action resource.
11456  *
11457  * @param dev
11458  *   Pointer to Ethernet device.
11459  * @param handle
11460  *   Pointer to mlx5_flow_handle.
11461  *
11462  * @return
11463  *   1 while a reference on it exists, 0 when freed.
11464  */
11465 static int
11466 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
11467                                         uint32_t port_id)
11468 {
11469         struct mlx5_priv *priv = dev->data->dev_private;
11470         struct mlx5_flow_dv_port_id_action_resource *cache;
11471
11472         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
11473         if (!cache)
11474                 return 0;
11475         MLX5_ASSERT(cache->action);
11476         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
11477                                      &cache->entry);
11478 }
11479
11480 /**
11481  * Release shared RSS action resource.
11482  *
11483  * @param dev
11484  *   Pointer to Ethernet device.
11485  * @param srss
11486  *   Shared RSS action index.
11487  */
11488 static void
11489 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
11490 {
11491         struct mlx5_priv *priv = dev->data->dev_private;
11492         struct mlx5_shared_action_rss *shared_rss;
11493
11494         shared_rss = mlx5_ipool_get
11495                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
11496         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
11497 }
11498
11499 void
11500 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
11501                             struct mlx5_cache_entry *entry)
11502 {
11503         struct mlx5_dev_ctx_shared *sh = list->ctx;
11504         struct mlx5_flow_dv_push_vlan_action_resource *cache =
11505                         container_of(entry, typeof(*cache), entry);
11506
11507         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
11508         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
11509 }
11510
11511 /**
11512  * Release push vlan action resource.
11513  *
11514  * @param dev
11515  *   Pointer to Ethernet device.
11516  * @param handle
11517  *   Pointer to mlx5_flow_handle.
11518  *
11519  * @return
11520  *   1 while a reference on it exists, 0 when freed.
11521  */
11522 static int
11523 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
11524                                           struct mlx5_flow_handle *handle)
11525 {
11526         struct mlx5_priv *priv = dev->data->dev_private;
11527         struct mlx5_flow_dv_push_vlan_action_resource *cache;
11528         uint32_t idx = handle->dvh.rix_push_vlan;
11529
11530         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
11531         if (!cache)
11532                 return 0;
11533         MLX5_ASSERT(cache->action);
11534         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
11535                                      &cache->entry);
11536 }
11537
11538 /**
11539  * Release the fate resource.
11540  *
11541  * @param dev
11542  *   Pointer to Ethernet device.
11543  * @param handle
11544  *   Pointer to mlx5_flow_handle.
11545  */
11546 static void
11547 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
11548                                struct mlx5_flow_handle *handle)
11549 {
11550         if (!handle->rix_fate)
11551                 return;
11552         switch (handle->fate_action) {
11553         case MLX5_FLOW_FATE_QUEUE:
11554                 mlx5_hrxq_release(dev, handle->rix_hrxq);
11555                 break;
11556         case MLX5_FLOW_FATE_JUMP:
11557                 flow_dv_jump_tbl_resource_release(dev, handle->rix_jump);
11558                 break;
11559         case MLX5_FLOW_FATE_PORT_ID:
11560                 flow_dv_port_id_action_resource_release(dev,
11561                                 handle->rix_port_id_action);
11562                 break;
11563         default:
11564                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
11565                 break;
11566         }
11567         handle->rix_fate = 0;
11568 }
11569
11570 void
11571 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
11572                          struct mlx5_cache_entry *entry)
11573 {
11574         struct mlx5_flow_dv_sample_resource *cache_resource =
11575                         container_of(entry, typeof(*cache_resource), entry);
11576         struct rte_eth_dev *dev = cache_resource->dev;
11577         struct mlx5_priv *priv = dev->data->dev_private;
11578
11579         if (cache_resource->verbs_action)
11580                 claim_zero(mlx5_flow_os_destroy_flow_action
11581                                 (cache_resource->verbs_action));
11582         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11583                 if (cache_resource->default_miss)
11584                         claim_zero(mlx5_flow_os_destroy_flow_action
11585                           (cache_resource->default_miss));
11586         }
11587         if (cache_resource->normal_path_tbl)
11588                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11589                         cache_resource->normal_path_tbl);
11590         flow_dv_sample_sub_actions_release(dev,
11591                                 &cache_resource->sample_idx);
11592         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11593                         cache_resource->idx);
11594         DRV_LOG(DEBUG, "sample resource %p: removed",
11595                 (void *)cache_resource);
11596 }
11597
11598 /**
11599  * Release an sample resource.
11600  *
11601  * @param dev
11602  *   Pointer to Ethernet device.
11603  * @param handle
11604  *   Pointer to mlx5_flow_handle.
11605  *
11606  * @return
11607  *   1 while a reference on it exists, 0 when freed.
11608  */
11609 static int
11610 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
11611                                      struct mlx5_flow_handle *handle)
11612 {
11613         struct mlx5_priv *priv = dev->data->dev_private;
11614         struct mlx5_flow_dv_sample_resource *cache_resource;
11615
11616         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11617                          handle->dvh.rix_sample);
11618         if (!cache_resource)
11619                 return 0;
11620         MLX5_ASSERT(cache_resource->verbs_action);
11621         return mlx5_cache_unregister(&priv->sh->sample_action_list,
11622                                      &cache_resource->entry);
11623 }
11624
11625 void
11626 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
11627                              struct mlx5_cache_entry *entry)
11628 {
11629         struct mlx5_flow_dv_dest_array_resource *cache_resource =
11630                         container_of(entry, typeof(*cache_resource), entry);
11631         struct rte_eth_dev *dev = cache_resource->dev;
11632         struct mlx5_priv *priv = dev->data->dev_private;
11633         uint32_t i = 0;
11634
11635         MLX5_ASSERT(cache_resource->action);
11636         if (cache_resource->action)
11637                 claim_zero(mlx5_flow_os_destroy_flow_action
11638                                         (cache_resource->action));
11639         for (; i < cache_resource->num_of_dest; i++)
11640                 flow_dv_sample_sub_actions_release(dev,
11641                                 &cache_resource->sample_idx[i]);
11642         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11643                         cache_resource->idx);
11644         DRV_LOG(DEBUG, "destination array resource %p: removed",
11645                 (void *)cache_resource);
11646 }
11647
11648 /**
11649  * Release an destination array resource.
11650  *
11651  * @param dev
11652  *   Pointer to Ethernet device.
11653  * @param handle
11654  *   Pointer to mlx5_flow_handle.
11655  *
11656  * @return
11657  *   1 while a reference on it exists, 0 when freed.
11658  */
11659 static int
11660 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
11661                                     struct mlx5_flow_handle *handle)
11662 {
11663         struct mlx5_priv *priv = dev->data->dev_private;
11664         struct mlx5_flow_dv_dest_array_resource *cache;
11665
11666         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11667                                handle->dvh.rix_dest_array);
11668         if (!cache)
11669                 return 0;
11670         MLX5_ASSERT(cache->action);
11671         return mlx5_cache_unregister(&priv->sh->dest_array_list,
11672                                      &cache->entry);
11673 }
11674
11675 static void
11676 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
11677 {
11678         struct mlx5_priv *priv = dev->data->dev_private;
11679         struct mlx5_dev_ctx_shared *sh = priv->sh;
11680         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
11681                                 sh->geneve_tlv_option_resource;
11682         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
11683         if (geneve_opt_resource) {
11684                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
11685                                          __ATOMIC_RELAXED))) {
11686                         claim_zero(mlx5_devx_cmd_destroy
11687                                         (geneve_opt_resource->obj));
11688                         mlx5_free(sh->geneve_tlv_option_resource);
11689                         sh->geneve_tlv_option_resource = NULL;
11690                 }
11691         }
11692         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
11693 }
11694
11695 /**
11696  * Remove the flow from the NIC but keeps it in memory.
11697  * Lock free, (mutex should be acquired by caller).
11698  *
11699  * @param[in] dev
11700  *   Pointer to Ethernet device.
11701  * @param[in, out] flow
11702  *   Pointer to flow structure.
11703  */
11704 static void
11705 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
11706 {
11707         struct mlx5_flow_handle *dh;
11708         uint32_t handle_idx;
11709         struct mlx5_priv *priv = dev->data->dev_private;
11710
11711         if (!flow)
11712                 return;
11713         handle_idx = flow->dev_handles;
11714         while (handle_idx) {
11715                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11716                                     handle_idx);
11717                 if (!dh)
11718                         return;
11719                 if (dh->drv_flow) {
11720                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
11721                         dh->drv_flow = NULL;
11722                 }
11723                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
11724                         flow_dv_fate_resource_release(dev, dh);
11725                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
11726                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
11727                 handle_idx = dh->next.next;
11728         }
11729 }
11730
11731 /**
11732  * Remove the flow from the NIC and the memory.
11733  * Lock free, (mutex should be acquired by caller).
11734  *
11735  * @param[in] dev
11736  *   Pointer to the Ethernet device structure.
11737  * @param[in, out] flow
11738  *   Pointer to flow structure.
11739  */
11740 static void
11741 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
11742 {
11743         struct mlx5_flow_handle *dev_handle;
11744         struct mlx5_priv *priv = dev->data->dev_private;
11745         uint32_t srss = 0;
11746
11747         if (!flow)
11748                 return;
11749         flow_dv_remove(dev, flow);
11750         if (flow->counter) {
11751                 flow_dv_counter_free(dev, flow->counter);
11752                 flow->counter = 0;
11753         }
11754         if (flow->meter) {
11755                 struct mlx5_flow_meter *fm;
11756
11757                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
11758                                     flow->meter);
11759                 if (fm)
11760                         mlx5_flow_meter_detach(fm);
11761                 flow->meter = 0;
11762         }
11763         if (flow->age)
11764                 flow_dv_aso_age_release(dev, flow->age);
11765         if (flow->geneve_tlv_option) {
11766                 flow_dv_geneve_tlv_option_resource_release(dev);
11767                 flow->geneve_tlv_option = 0;
11768         }
11769         while (flow->dev_handles) {
11770                 uint32_t tmp_idx = flow->dev_handles;
11771
11772                 dev_handle = mlx5_ipool_get(priv->sh->ipool
11773                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
11774                 if (!dev_handle)
11775                         return;
11776                 flow->dev_handles = dev_handle->next.next;
11777                 if (dev_handle->dvh.matcher)
11778                         flow_dv_matcher_release(dev, dev_handle);
11779                 if (dev_handle->dvh.rix_sample)
11780                         flow_dv_sample_resource_release(dev, dev_handle);
11781                 if (dev_handle->dvh.rix_dest_array)
11782                         flow_dv_dest_array_resource_release(dev, dev_handle);
11783                 if (dev_handle->dvh.rix_encap_decap)
11784                         flow_dv_encap_decap_resource_release(dev,
11785                                 dev_handle->dvh.rix_encap_decap);
11786                 if (dev_handle->dvh.modify_hdr)
11787                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
11788                 if (dev_handle->dvh.rix_push_vlan)
11789                         flow_dv_push_vlan_action_resource_release(dev,
11790                                                                   dev_handle);
11791                 if (dev_handle->dvh.rix_tag)
11792                         flow_dv_tag_release(dev,
11793                                             dev_handle->dvh.rix_tag);
11794                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
11795                         flow_dv_fate_resource_release(dev, dev_handle);
11796                 else if (!srss)
11797                         srss = dev_handle->rix_srss;
11798                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11799                            tmp_idx);
11800         }
11801         if (srss)
11802                 flow_dv_shared_rss_action_release(dev, srss);
11803 }
11804
11805 /**
11806  * Release array of hash RX queue objects.
11807  * Helper function.
11808  *
11809  * @param[in] dev
11810  *   Pointer to the Ethernet device structure.
11811  * @param[in, out] hrxqs
11812  *   Array of hash RX queue objects.
11813  *
11814  * @return
11815  *   Total number of references to hash RX queue objects in *hrxqs* array
11816  *   after this operation.
11817  */
11818 static int
11819 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
11820                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
11821 {
11822         size_t i;
11823         int remaining = 0;
11824
11825         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
11826                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
11827
11828                 if (!ret)
11829                         (*hrxqs)[i] = 0;
11830                 remaining += ret;
11831         }
11832         return remaining;
11833 }
11834
11835 /**
11836  * Release all hash RX queue objects representing shared RSS action.
11837  *
11838  * @param[in] dev
11839  *   Pointer to the Ethernet device structure.
11840  * @param[in, out] action
11841  *   Shared RSS action to remove hash RX queue objects from.
11842  *
11843  * @return
11844  *   Total number of references to hash RX queue objects stored in *action*
11845  *   after this operation.
11846  *   Expected to be 0 if no external references held.
11847  */
11848 static int
11849 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
11850                                  struct mlx5_shared_action_rss *action)
11851 {
11852         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
11853                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
11854 }
11855
11856 /**
11857  * Setup shared RSS action.
11858  * Prepare set of hash RX queue objects sufficient to handle all valid
11859  * hash_fields combinations (see enum ibv_rx_hash_fields).
11860  *
11861  * @param[in] dev
11862  *   Pointer to the Ethernet device structure.
11863  * @param[in] action_idx
11864  *   Shared RSS action ipool index.
11865  * @param[in, out] action
11866  *   Partially initialized shared RSS action.
11867  * @param[out] error
11868  *   Perform verbose error reporting if not NULL. Initialized in case of
11869  *   error only.
11870  *
11871  * @return
11872  *   0 on success, otherwise negative errno value.
11873  */
11874 static int
11875 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11876                            uint32_t action_idx,
11877                            struct mlx5_shared_action_rss *action,
11878                            struct rte_flow_error *error)
11879 {
11880         struct mlx5_flow_rss_desc rss_desc = { 0 };
11881         size_t i;
11882         int err;
11883
11884         if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) {
11885                 return rte_flow_error_set(error, rte_errno,
11886                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11887                                           "cannot setup indirection table");
11888         }
11889         memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
11890         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
11891         rss_desc.const_q = action->origin.queue;
11892         rss_desc.queue_num = action->origin.queue_num;
11893         /* Set non-zero value to indicate a shared RSS. */
11894         rss_desc.shared_rss = action_idx;
11895         rss_desc.ind_tbl = action->ind_tbl;
11896         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11897                 uint32_t hrxq_idx;
11898                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11899                 int tunnel;
11900
11901                 for (tunnel = 0; tunnel < 2; tunnel++) {
11902                         rss_desc.tunnel = tunnel;
11903                         rss_desc.hash_fields = hash_fields;
11904                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
11905                         if (!hrxq_idx) {
11906                                 rte_flow_error_set
11907                                         (error, rte_errno,
11908                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11909                                          "cannot get hash queue");
11910                                 goto error_hrxq_new;
11911                         }
11912                         err = __flow_dv_action_rss_hrxq_set
11913                                 (action, hash_fields, tunnel, hrxq_idx);
11914                         MLX5_ASSERT(!err);
11915                 }
11916         }
11917         return 0;
11918 error_hrxq_new:
11919         err = rte_errno;
11920         __flow_dv_action_rss_hrxqs_release(dev, action);
11921         if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true))
11922                 action->ind_tbl = NULL;
11923         rte_errno = err;
11924         return -rte_errno;
11925 }
11926
11927 /**
11928  * Create shared RSS action.
11929  *
11930  * @param[in] dev
11931  *   Pointer to the Ethernet device structure.
11932  * @param[in] conf
11933  *   Shared action configuration.
11934  * @param[in] rss
11935  *   RSS action specification used to create shared action.
11936  * @param[out] error
11937  *   Perform verbose error reporting if not NULL. Initialized in case of
11938  *   error only.
11939  *
11940  * @return
11941  *   A valid shared action ID in case of success, 0 otherwise and
11942  *   rte_errno is set.
11943  */
11944 static uint32_t
11945 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11946                             const struct rte_flow_shared_action_conf *conf,
11947                             const struct rte_flow_action_rss *rss,
11948                             struct rte_flow_error *error)
11949 {
11950         struct mlx5_priv *priv = dev->data->dev_private;
11951         struct mlx5_shared_action_rss *shared_action = NULL;
11952         void *queue = NULL;
11953         struct rte_flow_action_rss *origin;
11954         const uint8_t *rss_key;
11955         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11956         uint32_t idx;
11957
11958         RTE_SET_USED(conf);
11959         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11960                             0, SOCKET_ID_ANY);
11961         shared_action = mlx5_ipool_zmalloc
11962                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
11963         if (!shared_action || !queue) {
11964                 rte_flow_error_set(error, ENOMEM,
11965                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11966                                    "cannot allocate resource memory");
11967                 goto error_rss_init;
11968         }
11969         if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
11970                 rte_flow_error_set(error, E2BIG,
11971                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11972                                    "rss action number out of range");
11973                 goto error_rss_init;
11974         }
11975         shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
11976                                              sizeof(*shared_action->ind_tbl),
11977                                              0, SOCKET_ID_ANY);
11978         if (!shared_action->ind_tbl) {
11979                 rte_flow_error_set(error, ENOMEM,
11980                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11981                                    "cannot allocate resource memory");
11982                 goto error_rss_init;
11983         }
11984         memcpy(queue, rss->queue, queue_size);
11985         shared_action->ind_tbl->queues = queue;
11986         shared_action->ind_tbl->queues_n = rss->queue_num;
11987         origin = &shared_action->origin;
11988         origin->func = rss->func;
11989         origin->level = rss->level;
11990         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11991         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11992         /* NULL RSS key indicates default RSS key. */
11993         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11994         memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11995         origin->key = &shared_action->key[0];
11996         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11997         origin->queue = queue;
11998         origin->queue_num = rss->queue_num;
11999         if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
12000                 goto error_rss_init;
12001         rte_spinlock_init(&shared_action->action_rss_sl);
12002         __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
12003         rte_spinlock_lock(&priv->shared_act_sl);
12004         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12005                      &priv->rss_shared_actions, idx, shared_action, next);
12006         rte_spinlock_unlock(&priv->shared_act_sl);
12007         return idx;
12008 error_rss_init:
12009         if (shared_action) {
12010                 if (shared_action->ind_tbl)
12011                         mlx5_free(shared_action->ind_tbl);
12012                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12013                                 idx);
12014         }
12015         if (queue)
12016                 mlx5_free(queue);
12017         return 0;
12018 }
12019
12020 /**
12021  * Destroy the shared RSS action.
12022  * Release related hash RX queue objects.
12023  *
12024  * @param[in] dev
12025  *   Pointer to the Ethernet device structure.
12026  * @param[in] idx
12027  *   The shared RSS action object ID to be removed.
12028  * @param[out] error
12029  *   Perform verbose error reporting if not NULL. Initialized in case of
12030  *   error only.
12031  *
12032  * @return
12033  *   0 on success, otherwise negative errno value.
12034  */
12035 static int
12036 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
12037                              struct rte_flow_error *error)
12038 {
12039         struct mlx5_priv *priv = dev->data->dev_private;
12040         struct mlx5_shared_action_rss *shared_rss =
12041             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12042         uint32_t old_refcnt = 1;
12043         int remaining;
12044         uint16_t *queue = NULL;
12045
12046         if (!shared_rss)
12047                 return rte_flow_error_set(error, EINVAL,
12048                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12049                                           "invalid shared action");
12050         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
12051         if (remaining)
12052                 return rte_flow_error_set(error, EBUSY,
12053                                           RTE_FLOW_ERROR_TYPE_ACTION,
12054                                           NULL,
12055                                           "shared rss hrxq has references");
12056         queue = shared_rss->ind_tbl->queues;
12057         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
12058         if (remaining)
12059                 return rte_flow_error_set(error, EBUSY,
12060                                           RTE_FLOW_ERROR_TYPE_ACTION,
12061                                           NULL,
12062                                           "shared rss indirection table has"
12063                                           " references");
12064         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
12065                                          0, 0, __ATOMIC_ACQUIRE,
12066                                          __ATOMIC_RELAXED))
12067                 return rte_flow_error_set(error, EBUSY,
12068                                           RTE_FLOW_ERROR_TYPE_ACTION,
12069                                           NULL,
12070                                           "shared rss has references");
12071         mlx5_free(queue);
12072         rte_spinlock_lock(&priv->shared_act_sl);
12073         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12074                      &priv->rss_shared_actions, idx, shared_rss, next);
12075         rte_spinlock_unlock(&priv->shared_act_sl);
12076         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12077                         idx);
12078         return 0;
12079 }
12080
12081 /**
12082  * Create shared action, lock free,
12083  * (mutex should be acquired by caller).
12084  * Dispatcher for action type specific call.
12085  *
12086  * @param[in] dev
12087  *   Pointer to the Ethernet device structure.
12088  * @param[in] conf
12089  *   Shared action configuration.
12090  * @param[in] action
12091  *   Action specification used to create shared action.
12092  * @param[out] error
12093  *   Perform verbose error reporting if not NULL. Initialized in case of
12094  *   error only.
12095  *
12096  * @return
12097  *   A valid shared action handle in case of success, NULL otherwise and
12098  *   rte_errno is set.
12099  */
12100 static struct rte_flow_shared_action *
12101 flow_dv_action_create(struct rte_eth_dev *dev,
12102                       const struct rte_flow_shared_action_conf *conf,
12103                       const struct rte_flow_action *action,
12104                       struct rte_flow_error *err)
12105 {
12106         uint32_t idx = 0;
12107         uint32_t ret = 0;
12108
12109         switch (action->type) {
12110         case RTE_FLOW_ACTION_TYPE_RSS:
12111                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
12112                 idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
12113                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12114                 break;
12115         case RTE_FLOW_ACTION_TYPE_AGE:
12116                 ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
12117                 idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
12118                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12119                 if (ret) {
12120                         struct mlx5_aso_age_action *aso_age =
12121                                               flow_aso_age_get_by_idx(dev, ret);
12122
12123                         if (!aso_age->age_params.context)
12124                                 aso_age->age_params.context =
12125                                                          (void *)(uintptr_t)idx;
12126                 }
12127                 break;
12128         default:
12129                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12130                                    NULL, "action type not supported");
12131                 break;
12132         }
12133         return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
12134 }
12135
12136 /**
12137  * Destroy the shared action.
12138  * Release action related resources on the NIC and the memory.
12139  * Lock free, (mutex should be acquired by caller).
12140  * Dispatcher for action type specific call.
12141  *
12142  * @param[in] dev
12143  *   Pointer to the Ethernet device structure.
12144  * @param[in] action
12145  *   The shared action object to be removed.
12146  * @param[out] error
12147  *   Perform verbose error reporting if not NULL. Initialized in case of
12148  *   error only.
12149  *
12150  * @return
12151  *   0 on success, otherwise negative errno value.
12152  */
12153 static int
12154 flow_dv_action_destroy(struct rte_eth_dev *dev,
12155                        struct rte_flow_shared_action *action,
12156                        struct rte_flow_error *error)
12157 {
12158         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12159         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12160         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12161         int ret;
12162
12163         switch (type) {
12164         case MLX5_SHARED_ACTION_TYPE_RSS:
12165                 return __flow_dv_action_rss_release(dev, idx, error);
12166         case MLX5_SHARED_ACTION_TYPE_AGE:
12167                 ret = flow_dv_aso_age_release(dev, idx);
12168                 if (ret)
12169                         /*
12170                          * In this case, the last flow has a reference will
12171                          * actually release the age action.
12172                          */
12173                         DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
12174                                 " released with references %d.", idx, ret);
12175                 return 0;
12176         default:
12177                 return rte_flow_error_set(error, ENOTSUP,
12178                                           RTE_FLOW_ERROR_TYPE_ACTION,
12179                                           NULL,
12180                                           "action type not supported");
12181         }
12182 }
12183
12184 /**
12185  * Updates in place shared RSS action configuration.
12186  *
12187  * @param[in] dev
12188  *   Pointer to the Ethernet device structure.
12189  * @param[in] idx
12190  *   The shared RSS action object ID to be updated.
12191  * @param[in] action_conf
12192  *   RSS action specification used to modify *shared_rss*.
12193  * @param[out] error
12194  *   Perform verbose error reporting if not NULL. Initialized in case of
12195  *   error only.
12196  *
12197  * @return
12198  *   0 on success, otherwise negative errno value.
12199  * @note: currently only support update of RSS queues.
12200  */
12201 static int
12202 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
12203                             const struct rte_flow_action_rss *action_conf,
12204                             struct rte_flow_error *error)
12205 {
12206         struct mlx5_priv *priv = dev->data->dev_private;
12207         struct mlx5_shared_action_rss *shared_rss =
12208             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12209         int ret = 0;
12210         void *queue = NULL;
12211         uint16_t *queue_old = NULL;
12212         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
12213
12214         if (!shared_rss)
12215                 return rte_flow_error_set(error, EINVAL,
12216                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12217                                           "invalid shared action to update");
12218         queue = mlx5_malloc(MLX5_MEM_ZERO,
12219                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
12220                             0, SOCKET_ID_ANY);
12221         if (!queue)
12222                 return rte_flow_error_set(error, ENOMEM,
12223                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12224                                           NULL,
12225                                           "cannot allocate resource memory");
12226         memcpy(queue, action_conf->queue, queue_size);
12227         MLX5_ASSERT(shared_rss->ind_tbl);
12228         rte_spinlock_lock(&shared_rss->action_rss_sl);
12229         queue_old = shared_rss->ind_tbl->queues;
12230         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
12231                                         queue, action_conf->queue_num, true);
12232         if (ret) {
12233                 mlx5_free(queue);
12234                 ret = rte_flow_error_set(error, rte_errno,
12235                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12236                                           "cannot update indirection table");
12237         } else {
12238                 mlx5_free(queue_old);
12239                 shared_rss->origin.queue = queue;
12240                 shared_rss->origin.queue_num = action_conf->queue_num;
12241         }
12242         rte_spinlock_unlock(&shared_rss->action_rss_sl);
12243         return ret;
12244 }
12245
12246 /**
12247  * Updates in place shared action configuration, lock free,
12248  * (mutex should be acquired by caller).
12249  *
12250  * @param[in] dev
12251  *   Pointer to the Ethernet device structure.
12252  * @param[in] action
12253  *   The shared action object to be updated.
12254  * @param[in] action_conf
12255  *   Action specification used to modify *action*.
12256  *   *action_conf* should be of type correlating with type of the *action*,
12257  *   otherwise considered as invalid.
12258  * @param[out] error
12259  *   Perform verbose error reporting if not NULL. Initialized in case of
12260  *   error only.
12261  *
12262  * @return
12263  *   0 on success, otherwise negative errno value.
12264  */
12265 static int
12266 flow_dv_action_update(struct rte_eth_dev *dev,
12267                         struct rte_flow_shared_action *action,
12268                         const void *action_conf,
12269                         struct rte_flow_error *err)
12270 {
12271         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12272         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12273         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12274
12275         switch (type) {
12276         case MLX5_SHARED_ACTION_TYPE_RSS:
12277                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
12278         default:
12279                 return rte_flow_error_set(err, ENOTSUP,
12280                                           RTE_FLOW_ERROR_TYPE_ACTION,
12281                                           NULL,
12282                                           "action type update not supported");
12283         }
12284 }
12285
12286 static int
12287 flow_dv_action_query(struct rte_eth_dev *dev,
12288                      const struct rte_flow_shared_action *action, void *data,
12289                      struct rte_flow_error *error)
12290 {
12291         struct mlx5_age_param *age_param;
12292         struct rte_flow_query_age *resp;
12293         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12294         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12295         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12296
12297         switch (type) {
12298         case MLX5_SHARED_ACTION_TYPE_AGE:
12299                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
12300                 resp = data;
12301                 resp->aged = __atomic_load_n(&age_param->state,
12302                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
12303                                                                           1 : 0;
12304                 resp->sec_since_last_hit_valid = !resp->aged;
12305                 if (resp->sec_since_last_hit_valid)
12306                         resp->sec_since_last_hit = __atomic_load_n
12307                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
12308                 return 0;
12309         default:
12310                 return rte_flow_error_set(error, ENOTSUP,
12311                                           RTE_FLOW_ERROR_TYPE_ACTION,
12312                                           NULL,
12313                                           "action type query not supported");
12314         }
12315 }
12316
12317 /**
12318  * Query a dv flow  rule for its statistics via devx.
12319  *
12320  * @param[in] dev
12321  *   Pointer to Ethernet device.
12322  * @param[in] flow
12323  *   Pointer to the sub flow.
12324  * @param[out] data
12325  *   data retrieved by the query.
12326  * @param[out] error
12327  *   Perform verbose error reporting if not NULL.
12328  *
12329  * @return
12330  *   0 on success, a negative errno value otherwise and rte_errno is set.
12331  */
12332 static int
12333 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
12334                     void *data, struct rte_flow_error *error)
12335 {
12336         struct mlx5_priv *priv = dev->data->dev_private;
12337         struct rte_flow_query_count *qc = data;
12338
12339         if (!priv->config.devx)
12340                 return rte_flow_error_set(error, ENOTSUP,
12341                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12342                                           NULL,
12343                                           "counters are not supported");
12344         if (flow->counter) {
12345                 uint64_t pkts, bytes;
12346                 struct mlx5_flow_counter *cnt;
12347
12348                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
12349                                                  NULL);
12350                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
12351                                                &bytes);
12352
12353                 if (err)
12354                         return rte_flow_error_set(error, -err,
12355                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12356                                         NULL, "cannot read counters");
12357                 qc->hits_set = 1;
12358                 qc->bytes_set = 1;
12359                 qc->hits = pkts - cnt->hits;
12360                 qc->bytes = bytes - cnt->bytes;
12361                 if (qc->reset) {
12362                         cnt->hits = pkts;
12363                         cnt->bytes = bytes;
12364                 }
12365                 return 0;
12366         }
12367         return rte_flow_error_set(error, EINVAL,
12368                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12369                                   NULL,
12370                                   "counters are not available");
12371 }
12372
12373 /**
12374  * Query a flow rule AGE action for aging information.
12375  *
12376  * @param[in] dev
12377  *   Pointer to Ethernet device.
12378  * @param[in] flow
12379  *   Pointer to the sub flow.
12380  * @param[out] data
12381  *   data retrieved by the query.
12382  * @param[out] error
12383  *   Perform verbose error reporting if not NULL.
12384  *
12385  * @return
12386  *   0 on success, a negative errno value otherwise and rte_errno is set.
12387  */
12388 static int
12389 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
12390                   void *data, struct rte_flow_error *error)
12391 {
12392         struct rte_flow_query_age *resp = data;
12393         struct mlx5_age_param *age_param;
12394
12395         if (flow->age) {
12396                 struct mlx5_aso_age_action *act =
12397                                      flow_aso_age_get_by_idx(dev, flow->age);
12398
12399                 age_param = &act->age_params;
12400         } else if (flow->counter) {
12401                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
12402
12403                 if (!age_param || !age_param->timeout)
12404                         return rte_flow_error_set
12405                                         (error, EINVAL,
12406                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12407                                          NULL, "cannot read age data");
12408         } else {
12409                 return rte_flow_error_set(error, EINVAL,
12410                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12411                                           NULL, "age data not available");
12412         }
12413         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
12414                                      AGE_TMOUT ? 1 : 0;
12415         resp->sec_since_last_hit_valid = !resp->aged;
12416         if (resp->sec_since_last_hit_valid)
12417                 resp->sec_since_last_hit = __atomic_load_n
12418                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
12419         return 0;
12420 }
12421
12422 /**
12423  * Query a flow.
12424  *
12425  * @see rte_flow_query()
12426  * @see rte_flow_ops
12427  */
12428 static int
12429 flow_dv_query(struct rte_eth_dev *dev,
12430               struct rte_flow *flow __rte_unused,
12431               const struct rte_flow_action *actions __rte_unused,
12432               void *data __rte_unused,
12433               struct rte_flow_error *error __rte_unused)
12434 {
12435         int ret = -EINVAL;
12436
12437         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
12438                 switch (actions->type) {
12439                 case RTE_FLOW_ACTION_TYPE_VOID:
12440                         break;
12441                 case RTE_FLOW_ACTION_TYPE_COUNT:
12442                         ret = flow_dv_query_count(dev, flow, data, error);
12443                         break;
12444                 case RTE_FLOW_ACTION_TYPE_AGE:
12445                         ret = flow_dv_query_age(dev, flow, data, error);
12446                         break;
12447                 default:
12448                         return rte_flow_error_set(error, ENOTSUP,
12449                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12450                                                   actions,
12451                                                   "action not supported");
12452                 }
12453         }
12454         return ret;
12455 }
12456
12457 /**
12458  * Destroy the meter table set.
12459  * Lock free, (mutex should be acquired by caller).
12460  *
12461  * @param[in] dev
12462  *   Pointer to Ethernet device.
12463  * @param[in] tbl
12464  *   Pointer to the meter table set.
12465  *
12466  * @return
12467  *   Always 0.
12468  */
12469 static int
12470 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
12471                         struct mlx5_meter_domains_infos *tbl)
12472 {
12473         struct mlx5_priv *priv = dev->data->dev_private;
12474         struct mlx5_meter_domains_infos *mtd =
12475                                 (struct mlx5_meter_domains_infos *)tbl;
12476
12477         if (!mtd || !priv->config.dv_flow_en)
12478                 return 0;
12479         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
12480                 claim_zero(mlx5_flow_os_destroy_flow
12481                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
12482         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
12483                 claim_zero(mlx5_flow_os_destroy_flow
12484                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
12485         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
12486                 claim_zero(mlx5_flow_os_destroy_flow
12487                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
12488         if (mtd->egress.color_matcher)
12489                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12490                            (mtd->egress.color_matcher));
12491         if (mtd->egress.any_matcher)
12492                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12493                            (mtd->egress.any_matcher));
12494         if (mtd->egress.tbl)
12495                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
12496         if (mtd->egress.sfx_tbl)
12497                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
12498         if (mtd->ingress.color_matcher)
12499                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12500                            (mtd->ingress.color_matcher));
12501         if (mtd->ingress.any_matcher)
12502                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12503                            (mtd->ingress.any_matcher));
12504         if (mtd->ingress.tbl)
12505                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
12506         if (mtd->ingress.sfx_tbl)
12507                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12508                                              mtd->ingress.sfx_tbl);
12509         if (mtd->transfer.color_matcher)
12510                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12511                            (mtd->transfer.color_matcher));
12512         if (mtd->transfer.any_matcher)
12513                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12514                            (mtd->transfer.any_matcher));
12515         if (mtd->transfer.tbl)
12516                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
12517         if (mtd->transfer.sfx_tbl)
12518                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12519                                              mtd->transfer.sfx_tbl);
12520         if (mtd->drop_actn)
12521                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
12522         mlx5_free(mtd);
12523         return 0;
12524 }
12525
12526 /* Number of meter flow actions, count and jump or count and drop. */
12527 #define METER_ACTIONS 2
12528
12529 /**
12530  * Create specify domain meter table and suffix table.
12531  *
12532  * @param[in] dev
12533  *   Pointer to Ethernet device.
12534  * @param[in,out] mtb
12535  *   Pointer to DV meter table set.
12536  * @param[in] egress
12537  *   Table attribute.
12538  * @param[in] transfer
12539  *   Table attribute.
12540  * @param[in] color_reg_c_idx
12541  *   Reg C index for color match.
12542  *
12543  * @return
12544  *   0 on success, -1 otherwise and rte_errno is set.
12545  */
12546 static int
12547 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
12548                            struct mlx5_meter_domains_infos *mtb,
12549                            uint8_t egress, uint8_t transfer,
12550                            uint32_t color_reg_c_idx)
12551 {
12552         struct mlx5_priv *priv = dev->data->dev_private;
12553         struct mlx5_dev_ctx_shared *sh = priv->sh;
12554         struct mlx5_flow_dv_match_params mask = {
12555                 .size = sizeof(mask.buf),
12556         };
12557         struct mlx5_flow_dv_match_params value = {
12558                 .size = sizeof(value.buf),
12559         };
12560         struct mlx5dv_flow_matcher_attr dv_attr = {
12561                 .type = IBV_FLOW_ATTR_NORMAL,
12562                 .priority = 0,
12563                 .match_criteria_enable = 0,
12564                 .match_mask = (void *)&mask,
12565         };
12566         void *actions[METER_ACTIONS];
12567         struct mlx5_meter_domain_info *dtb;
12568         struct rte_flow_error error;
12569         int i = 0;
12570         int ret;
12571
12572         if (transfer)
12573                 dtb = &mtb->transfer;
12574         else if (egress)
12575                 dtb = &mtb->egress;
12576         else
12577                 dtb = &mtb->ingress;
12578         /* Create the meter table with METER level. */
12579         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
12580                                             egress, transfer, false, NULL, 0,
12581                                             0, &error);
12582         if (!dtb->tbl) {
12583                 DRV_LOG(ERR, "Failed to create meter policer table.");
12584                 return -1;
12585         }
12586         /* Create the meter suffix table with SUFFIX level. */
12587         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
12588                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
12589                                             egress, transfer, false, NULL, 0,
12590                                             0, &error);
12591         if (!dtb->sfx_tbl) {
12592                 DRV_LOG(ERR, "Failed to create meter suffix table.");
12593                 return -1;
12594         }
12595         /* Create matchers, Any and Color. */
12596         dv_attr.priority = 3;
12597         dv_attr.match_criteria_enable = 0;
12598         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12599                                                &dtb->any_matcher);
12600         if (ret) {
12601                 DRV_LOG(ERR, "Failed to create meter"
12602                              " policer default matcher.");
12603                 goto error_exit;
12604         }
12605         dv_attr.priority = 0;
12606         dv_attr.match_criteria_enable =
12607                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
12608         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
12609                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
12610         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12611                                                &dtb->color_matcher);
12612         if (ret) {
12613                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
12614                 goto error_exit;
12615         }
12616         if (mtb->count_actns[RTE_MTR_DROPPED])
12617                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
12618         actions[i++] = mtb->drop_actn;
12619         /* Default rule: lowest priority, match any, actions: drop. */
12620         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
12621                                        actions,
12622                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
12623         if (ret) {
12624                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
12625                 goto error_exit;
12626         }
12627         return 0;
12628 error_exit:
12629         return -1;
12630 }
12631
12632 /**
12633  * Create the needed meter and suffix tables.
12634  * Lock free, (mutex should be acquired by caller).
12635  *
12636  * @param[in] dev
12637  *   Pointer to Ethernet device.
12638  * @param[in] fm
12639  *   Pointer to the flow meter.
12640  *
12641  * @return
12642  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
12643  */
12644 static struct mlx5_meter_domains_infos *
12645 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
12646                        const struct mlx5_flow_meter *fm)
12647 {
12648         struct mlx5_priv *priv = dev->data->dev_private;
12649         struct mlx5_meter_domains_infos *mtb;
12650         int ret;
12651         int i;
12652
12653         if (!priv->mtr_en) {
12654                 rte_errno = ENOTSUP;
12655                 return NULL;
12656         }
12657         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
12658         if (!mtb) {
12659                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
12660                 return NULL;
12661         }
12662         /* Create meter count actions */
12663         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
12664                 struct mlx5_flow_counter *cnt;
12665                 if (!fm->policer_stats.cnt[i])
12666                         continue;
12667                 cnt = flow_dv_counter_get_by_idx(dev,
12668                       fm->policer_stats.cnt[i], NULL);
12669                 mtb->count_actns[i] = cnt->action;
12670         }
12671         /* Create drop action. */
12672         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
12673         if (ret) {
12674                 DRV_LOG(ERR, "Failed to create drop action.");
12675                 goto error_exit;
12676         }
12677         /* Egress meter table. */
12678         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
12679         if (ret) {
12680                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
12681                 goto error_exit;
12682         }
12683         /* Ingress meter table. */
12684         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
12685         if (ret) {
12686                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
12687                 goto error_exit;
12688         }
12689         /* FDB meter table. */
12690         if (priv->config.dv_esw_en) {
12691                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
12692                                                  priv->mtr_color_reg);
12693                 if (ret) {
12694                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
12695                         goto error_exit;
12696                 }
12697         }
12698         return mtb;
12699 error_exit:
12700         flow_dv_destroy_mtr_tbl(dev, mtb);
12701         return NULL;
12702 }
12703
12704 /**
12705  * Destroy domain policer rule.
12706  *
12707  * @param[in] dt
12708  *   Pointer to domain table.
12709  */
12710 static void
12711 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
12712 {
12713         int i;
12714
12715         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12716                 if (dt->policer_rules[i]) {
12717                         claim_zero(mlx5_flow_os_destroy_flow
12718                                    (dt->policer_rules[i]));
12719                         dt->policer_rules[i] = NULL;
12720                 }
12721         }
12722         if (dt->jump_actn) {
12723                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
12724                 dt->jump_actn = NULL;
12725         }
12726 }
12727
12728 /**
12729  * Destroy policer rules.
12730  *
12731  * @param[in] dev
12732  *   Pointer to Ethernet device.
12733  * @param[in] fm
12734  *   Pointer to flow meter structure.
12735  * @param[in] attr
12736  *   Pointer to flow attributes.
12737  *
12738  * @return
12739  *   Always 0.
12740  */
12741 static int
12742 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
12743                               const struct mlx5_flow_meter *fm,
12744                               const struct rte_flow_attr *attr)
12745 {
12746         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
12747
12748         if (!mtb)
12749                 return 0;
12750         if (attr->egress)
12751                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
12752         if (attr->ingress)
12753                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
12754         if (attr->transfer)
12755                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
12756         return 0;
12757 }
12758
12759 /**
12760  * Create specify domain meter policer rule.
12761  *
12762  * @param[in] fm
12763  *   Pointer to flow meter structure.
12764  * @param[in] mtb
12765  *   Pointer to DV meter table set.
12766  * @param[in] mtr_reg_c
12767  *   Color match REG_C.
12768  *
12769  * @return
12770  *   0 on success, -1 otherwise.
12771  */
12772 static int
12773 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
12774                                     struct mlx5_meter_domain_info *dtb,
12775                                     uint8_t mtr_reg_c)
12776 {
12777         struct mlx5_flow_dv_match_params matcher = {
12778                 .size = sizeof(matcher.buf),
12779         };
12780         struct mlx5_flow_dv_match_params value = {
12781                 .size = sizeof(value.buf),
12782         };
12783         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12784         void *actions[METER_ACTIONS];
12785         int i;
12786         int ret = 0;
12787
12788         /* Create jump action. */
12789         if (!dtb->jump_actn)
12790                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
12791                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
12792         if (ret) {
12793                 DRV_LOG(ERR, "Failed to create policer jump action.");
12794                 goto error;
12795         }
12796         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12797                 int j = 0;
12798
12799                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
12800                                        rte_col_2_mlx5_col(i), UINT8_MAX);
12801                 if (mtb->count_actns[i])
12802                         actions[j++] = mtb->count_actns[i];
12803                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
12804                         actions[j++] = mtb->drop_actn;
12805                 else
12806                         actions[j++] = dtb->jump_actn;
12807                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
12808                                                (void *)&value, j, actions,
12809                                                &dtb->policer_rules[i]);
12810                 if (ret) {
12811                         DRV_LOG(ERR, "Failed to create policer rule.");
12812                         goto error;
12813                 }
12814         }
12815         return 0;
12816 error:
12817         rte_errno = errno;
12818         return -1;
12819 }
12820
12821 /**
12822  * Create policer rules.
12823  *
12824  * @param[in] dev
12825  *   Pointer to Ethernet device.
12826  * @param[in] fm
12827  *   Pointer to flow meter structure.
12828  * @param[in] attr
12829  *   Pointer to flow attributes.
12830  *
12831  * @return
12832  *   0 on success, -1 otherwise.
12833  */
12834 static int
12835 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
12836                              struct mlx5_flow_meter *fm,
12837                              const struct rte_flow_attr *attr)
12838 {
12839         struct mlx5_priv *priv = dev->data->dev_private;
12840         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12841         int ret;
12842
12843         if (attr->egress) {
12844                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
12845                                                 priv->mtr_color_reg);
12846                 if (ret) {
12847                         DRV_LOG(ERR, "Failed to create egress policer.");
12848                         goto error;
12849                 }
12850         }
12851         if (attr->ingress) {
12852                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
12853                                                 priv->mtr_color_reg);
12854                 if (ret) {
12855                         DRV_LOG(ERR, "Failed to create ingress policer.");
12856                         goto error;
12857                 }
12858         }
12859         if (attr->transfer) {
12860                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
12861                                                 priv->mtr_color_reg);
12862                 if (ret) {
12863                         DRV_LOG(ERR, "Failed to create transfer policer.");
12864                         goto error;
12865                 }
12866         }
12867         return 0;
12868 error:
12869         flow_dv_destroy_policer_rules(dev, fm, attr);
12870         return -1;
12871 }
12872
12873 /**
12874  * Validate the batch counter support in root table.
12875  *
12876  * Create a simple flow with invalid counter and drop action on root table to
12877  * validate if batch counter with offset on root table is supported or not.
12878  *
12879  * @param[in] dev
12880  *   Pointer to rte_eth_dev structure.
12881  *
12882  * @return
12883  *   0 on success, a negative errno value otherwise and rte_errno is set.
12884  */
12885 int
12886 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
12887 {
12888         struct mlx5_priv *priv = dev->data->dev_private;
12889         struct mlx5_dev_ctx_shared *sh = priv->sh;
12890         struct mlx5_flow_dv_match_params mask = {
12891                 .size = sizeof(mask.buf),
12892         };
12893         struct mlx5_flow_dv_match_params value = {
12894                 .size = sizeof(value.buf),
12895         };
12896         struct mlx5dv_flow_matcher_attr dv_attr = {
12897                 .type = IBV_FLOW_ATTR_NORMAL,
12898                 .priority = 0,
12899                 .match_criteria_enable = 0,
12900                 .match_mask = (void *)&mask,
12901         };
12902         void *actions[2] = { 0 };
12903         struct mlx5_flow_tbl_resource *tbl = NULL;
12904         struct mlx5_devx_obj *dcs = NULL;
12905         void *matcher = NULL;
12906         void *flow = NULL;
12907         int ret = -1;
12908
12909         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
12910         if (!tbl)
12911                 goto err;
12912         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
12913         if (!dcs)
12914                 goto err;
12915         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
12916                                                     &actions[0]);
12917         if (ret)
12918                 goto err;
12919         actions[1] = priv->drop_queue.hrxq->action;
12920         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
12921         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
12922                                                &matcher);
12923         if (ret)
12924                 goto err;
12925         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
12926                                        actions, &flow);
12927 err:
12928         /*
12929          * If batch counter with offset is not supported, the driver will not
12930          * validate the invalid offset value, flow create should success.
12931          * In this case, it means batch counter is not supported in root table.
12932          *
12933          * Otherwise, if flow create is failed, counter offset is supported.
12934          */
12935         if (flow) {
12936                 DRV_LOG(INFO, "Batch counter is not supported in root "
12937                               "table. Switch to fallback mode.");
12938                 rte_errno = ENOTSUP;
12939                 ret = -rte_errno;
12940                 claim_zero(mlx5_flow_os_destroy_flow(flow));
12941         } else {
12942                 /* Check matcher to make sure validate fail at flow create. */
12943                 if (!matcher || (matcher && errno != EINVAL))
12944                         DRV_LOG(ERR, "Unexpected error in counter offset "
12945                                      "support detection");
12946                 ret = 0;
12947         }
12948         if (actions[0])
12949                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
12950         if (matcher)
12951                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
12952         if (tbl)
12953                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12954         if (dcs)
12955                 claim_zero(mlx5_devx_cmd_destroy(dcs));
12956         return ret;
12957 }
12958
12959 /**
12960  * Query a devx counter.
12961  *
12962  * @param[in] dev
12963  *   Pointer to the Ethernet device structure.
12964  * @param[in] cnt
12965  *   Index to the flow counter.
12966  * @param[in] clear
12967  *   Set to clear the counter statistics.
12968  * @param[out] pkts
12969  *   The statistics value of packets.
12970  * @param[out] bytes
12971  *   The statistics value of bytes.
12972  *
12973  * @return
12974  *   0 on success, otherwise return -1.
12975  */
12976 static int
12977 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12978                       uint64_t *pkts, uint64_t *bytes)
12979 {
12980         struct mlx5_priv *priv = dev->data->dev_private;
12981         struct mlx5_flow_counter *cnt;
12982         uint64_t inn_pkts, inn_bytes;
12983         int ret;
12984
12985         if (!priv->config.devx)
12986                 return -1;
12987
12988         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
12989         if (ret)
12990                 return -1;
12991         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
12992         *pkts = inn_pkts - cnt->hits;
12993         *bytes = inn_bytes - cnt->bytes;
12994         if (clear) {
12995                 cnt->hits = inn_pkts;
12996                 cnt->bytes = inn_bytes;
12997         }
12998         return 0;
12999 }
13000
13001 /**
13002  * Get aged-out flows.
13003  *
13004  * @param[in] dev
13005  *   Pointer to the Ethernet device structure.
13006  * @param[in] context
13007  *   The address of an array of pointers to the aged-out flows contexts.
13008  * @param[in] nb_contexts
13009  *   The length of context array pointers.
13010  * @param[out] error
13011  *   Perform verbose error reporting if not NULL. Initialized in case of
13012  *   error only.
13013  *
13014  * @return
13015  *   how many contexts get in success, otherwise negative errno value.
13016  *   if nb_contexts is 0, return the amount of all aged contexts.
13017  *   if nb_contexts is not 0 , return the amount of aged flows reported
13018  *   in the context array.
13019  * @note: only stub for now
13020  */
13021 static int
13022 flow_get_aged_flows(struct rte_eth_dev *dev,
13023                     void **context,
13024                     uint32_t nb_contexts,
13025                     struct rte_flow_error *error)
13026 {
13027         struct mlx5_priv *priv = dev->data->dev_private;
13028         struct mlx5_age_info *age_info;
13029         struct mlx5_age_param *age_param;
13030         struct mlx5_flow_counter *counter;
13031         struct mlx5_aso_age_action *act;
13032         int nb_flows = 0;
13033
13034         if (nb_contexts && !context)
13035                 return rte_flow_error_set(error, EINVAL,
13036                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13037                                           NULL, "empty context");
13038         age_info = GET_PORT_AGE_INFO(priv);
13039         rte_spinlock_lock(&age_info->aged_sl);
13040         LIST_FOREACH(act, &age_info->aged_aso, next) {
13041                 nb_flows++;
13042                 if (nb_contexts) {
13043                         context[nb_flows - 1] =
13044                                                 act->age_params.context;
13045                         if (!(--nb_contexts))
13046                                 break;
13047                 }
13048         }
13049         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
13050                 nb_flows++;
13051                 if (nb_contexts) {
13052                         age_param = MLX5_CNT_TO_AGE(counter);
13053                         context[nb_flows - 1] = age_param->context;
13054                         if (!(--nb_contexts))
13055                                 break;
13056                 }
13057         }
13058         rte_spinlock_unlock(&age_info->aged_sl);
13059         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
13060         return nb_flows;
13061 }
13062
13063 /*
13064  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
13065  */
13066 static uint32_t
13067 flow_dv_counter_allocate(struct rte_eth_dev *dev)
13068 {
13069         return flow_dv_counter_alloc(dev, 0);
13070 }
13071
13072 /**
13073  * Validate shared action.
13074  * Dispatcher for action type specific validation.
13075  *
13076  * @param[in] dev
13077  *   Pointer to the Ethernet device structure.
13078  * @param[in] conf
13079  *   Shared action configuration.
13080  * @param[in] action
13081  *   The shared action object to validate.
13082  * @param[out] error
13083  *   Perform verbose error reporting if not NULL. Initialized in case of
13084  *   error only.
13085  *
13086  * @return
13087  *   0 on success, otherwise negative errno value.
13088  */
13089 static int
13090 flow_dv_action_validate(struct rte_eth_dev *dev,
13091                         const struct rte_flow_shared_action_conf *conf,
13092                         const struct rte_flow_action *action,
13093                         struct rte_flow_error *err)
13094 {
13095         struct mlx5_priv *priv = dev->data->dev_private;
13096
13097         RTE_SET_USED(conf);
13098         switch (action->type) {
13099         case RTE_FLOW_ACTION_TYPE_RSS:
13100                 return mlx5_validate_action_rss(dev, action, err);
13101         case RTE_FLOW_ACTION_TYPE_AGE:
13102                 if (!priv->sh->aso_age_mng)
13103                         return rte_flow_error_set(err, ENOTSUP,
13104                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13105                                                 NULL,
13106                                              "shared age action not supported");
13107                 return flow_dv_validate_action_age(0, action, dev, err);
13108         default:
13109                 return rte_flow_error_set(err, ENOTSUP,
13110                                           RTE_FLOW_ERROR_TYPE_ACTION,
13111                                           NULL,
13112                                           "action type not supported");
13113         }
13114 }
13115
13116 static int
13117 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
13118 {
13119         struct mlx5_priv *priv = dev->data->dev_private;
13120         int ret = 0;
13121
13122         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
13123                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
13124                                                 flags);
13125                 if (ret != 0)
13126                         return ret;
13127         }
13128         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
13129                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
13130                 if (ret != 0)
13131                         return ret;
13132         }
13133         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
13134                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
13135                 if (ret != 0)
13136                         return ret;
13137         }
13138         return 0;
13139 }
13140
13141 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
13142         .validate = flow_dv_validate,
13143         .prepare = flow_dv_prepare,
13144         .translate = flow_dv_translate,
13145         .apply = flow_dv_apply,
13146         .remove = flow_dv_remove,
13147         .destroy = flow_dv_destroy,
13148         .query = flow_dv_query,
13149         .create_mtr_tbls = flow_dv_create_mtr_tbl,
13150         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
13151         .create_policer_rules = flow_dv_create_policer_rules,
13152         .destroy_policer_rules = flow_dv_destroy_policer_rules,
13153         .counter_alloc = flow_dv_counter_allocate,
13154         .counter_free = flow_dv_counter_free,
13155         .counter_query = flow_dv_counter_query,
13156         .get_aged_flows = flow_get_aged_flows,
13157         .action_validate = flow_dv_action_validate,
13158         .action_create = flow_dv_action_create,
13159         .action_destroy = flow_dv_action_destroy,
13160         .action_update = flow_dv_action_update,
13161         .action_query = flow_dv_action_query,
13162         .sync_domain = flow_dv_sync_domain,
13163 };
13164
13165 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
13166