3d69957da7d960d23482cd0f6a65edf5df5e9f08
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                       uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84 static void
85 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
86
87 /**
88  * Initialize flow attributes structure according to flow items' types.
89  *
90  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
91  * mode. For tunnel mode, the items to be modified are the outermost ones.
92  *
93  * @param[in] item
94  *   Pointer to item specification.
95  * @param[out] attr
96  *   Pointer to flow attributes structure.
97  * @param[in] dev_flow
98  *   Pointer to the sub flow.
99  * @param[in] tunnel_decap
100  *   Whether action is after tunnel decapsulation.
101  */
102 static void
103 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
104                   struct mlx5_flow *dev_flow, bool tunnel_decap)
105 {
106         uint64_t layers = dev_flow->handle->layers;
107
108         /*
109          * If layers is already initialized, it means this dev_flow is the
110          * suffix flow, the layers flags is set by the prefix flow. Need to
111          * use the layer flags from prefix flow as the suffix flow may not
112          * have the user defined items as the flow is split.
113          */
114         if (layers) {
115                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
116                         attr->ipv4 = 1;
117                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
118                         attr->ipv6 = 1;
119                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
120                         attr->tcp = 1;
121                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
122                         attr->udp = 1;
123                 attr->valid = 1;
124                 return;
125         }
126         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
127                 uint8_t next_protocol = 0xff;
128                 switch (item->type) {
129                 case RTE_FLOW_ITEM_TYPE_GRE:
130                 case RTE_FLOW_ITEM_TYPE_NVGRE:
131                 case RTE_FLOW_ITEM_TYPE_VXLAN:
132                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
133                 case RTE_FLOW_ITEM_TYPE_GENEVE:
134                 case RTE_FLOW_ITEM_TYPE_MPLS:
135                         if (tunnel_decap)
136                                 attr->attr = 0;
137                         break;
138                 case RTE_FLOW_ITEM_TYPE_IPV4:
139                         if (!attr->ipv6)
140                                 attr->ipv4 = 1;
141                         if (item->mask != NULL &&
142                             ((const struct rte_flow_item_ipv4 *)
143                             item->mask)->hdr.next_proto_id)
144                                 next_protocol =
145                                     ((const struct rte_flow_item_ipv4 *)
146                                       (item->spec))->hdr.next_proto_id &
147                                     ((const struct rte_flow_item_ipv4 *)
148                                       (item->mask))->hdr.next_proto_id;
149                         if ((next_protocol == IPPROTO_IPIP ||
150                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
151                                 attr->attr = 0;
152                         break;
153                 case RTE_FLOW_ITEM_TYPE_IPV6:
154                         if (!attr->ipv4)
155                                 attr->ipv6 = 1;
156                         if (item->mask != NULL &&
157                             ((const struct rte_flow_item_ipv6 *)
158                             item->mask)->hdr.proto)
159                                 next_protocol =
160                                     ((const struct rte_flow_item_ipv6 *)
161                                       (item->spec))->hdr.proto &
162                                     ((const struct rte_flow_item_ipv6 *)
163                                       (item->mask))->hdr.proto;
164                         if ((next_protocol == IPPROTO_IPIP ||
165                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
166                                 attr->attr = 0;
167                         break;
168                 case RTE_FLOW_ITEM_TYPE_UDP:
169                         if (!attr->tcp)
170                                 attr->udp = 1;
171                         break;
172                 case RTE_FLOW_ITEM_TYPE_TCP:
173                         if (!attr->udp)
174                                 attr->tcp = 1;
175                         break;
176                 default:
177                         break;
178                 }
179         }
180         attr->valid = 1;
181 }
182
183 /**
184  * Convert rte_mtr_color to mlx5 color.
185  *
186  * @param[in] rcol
187  *   rte_mtr_color.
188  *
189  * @return
190  *   mlx5 color.
191  */
192 static int
193 rte_col_2_mlx5_col(enum rte_color rcol)
194 {
195         switch (rcol) {
196         case RTE_COLOR_GREEN:
197                 return MLX5_FLOW_COLOR_GREEN;
198         case RTE_COLOR_YELLOW:
199                 return MLX5_FLOW_COLOR_YELLOW;
200         case RTE_COLOR_RED:
201                 return MLX5_FLOW_COLOR_RED;
202         default:
203                 break;
204         }
205         return MLX5_FLOW_COLOR_UNDEFINED;
206 }
207
208 struct field_modify_info {
209         uint32_t size; /* Size of field in protocol header, in bytes. */
210         uint32_t offset; /* Offset of field in protocol header, in bytes. */
211         enum mlx5_modification_field id;
212 };
213
214 struct field_modify_info modify_eth[] = {
215         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
216         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
217         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
218         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
219         {0, 0, 0},
220 };
221
222 struct field_modify_info modify_vlan_out_first_vid[] = {
223         /* Size in bits !!! */
224         {12, 0, MLX5_MODI_OUT_FIRST_VID},
225         {0, 0, 0},
226 };
227
228 struct field_modify_info modify_ipv4[] = {
229         {1,  1, MLX5_MODI_OUT_IP_DSCP},
230         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
231         {4, 12, MLX5_MODI_OUT_SIPV4},
232         {4, 16, MLX5_MODI_OUT_DIPV4},
233         {0, 0, 0},
234 };
235
236 struct field_modify_info modify_ipv6[] = {
237         {1,  0, MLX5_MODI_OUT_IP_DSCP},
238         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
239         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
240         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
241         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
242         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
243         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
244         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
245         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
246         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
247         {0, 0, 0},
248 };
249
250 struct field_modify_info modify_udp[] = {
251         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
252         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
253         {0, 0, 0},
254 };
255
256 struct field_modify_info modify_tcp[] = {
257         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
258         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
259         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
260         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
261         {0, 0, 0},
262 };
263
264 static void
265 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
266                           uint8_t next_protocol, uint64_t *item_flags,
267                           int *tunnel)
268 {
269         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
270                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
271         if (next_protocol == IPPROTO_IPIP) {
272                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
273                 *tunnel = 1;
274         }
275         if (next_protocol == IPPROTO_IPV6) {
276                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
277                 *tunnel = 1;
278         }
279 }
280
281 /* Update VLAN's VID/PCP based on input rte_flow_action.
282  *
283  * @param[in] action
284  *   Pointer to struct rte_flow_action.
285  * @param[out] vlan
286  *   Pointer to struct rte_vlan_hdr.
287  */
288 static void
289 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
290                          struct rte_vlan_hdr *vlan)
291 {
292         uint16_t vlan_tci;
293         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
294                 vlan_tci =
295                     ((const struct rte_flow_action_of_set_vlan_pcp *)
296                                                action->conf)->vlan_pcp;
297                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
298                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
299                 vlan->vlan_tci |= vlan_tci;
300         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
301                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
302                 vlan->vlan_tci |= rte_be_to_cpu_16
303                     (((const struct rte_flow_action_of_set_vlan_vid *)
304                                              action->conf)->vlan_vid);
305         }
306 }
307
308 /**
309  * Fetch 1, 2, 3 or 4 byte field from the byte array
310  * and return as unsigned integer in host-endian format.
311  *
312  * @param[in] data
313  *   Pointer to data array.
314  * @param[in] size
315  *   Size of field to extract.
316  *
317  * @return
318  *   converted field in host endian format.
319  */
320 static inline uint32_t
321 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
322 {
323         uint32_t ret;
324
325         switch (size) {
326         case 1:
327                 ret = *data;
328                 break;
329         case 2:
330                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
331                 break;
332         case 3:
333                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
334                 ret = (ret << 8) | *(data + sizeof(uint16_t));
335                 break;
336         case 4:
337                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
338                 break;
339         default:
340                 MLX5_ASSERT(false);
341                 ret = 0;
342                 break;
343         }
344         return ret;
345 }
346
347 /**
348  * Convert modify-header action to DV specification.
349  *
350  * Data length of each action is determined by provided field description
351  * and the item mask. Data bit offset and width of each action is determined
352  * by provided item mask.
353  *
354  * @param[in] item
355  *   Pointer to item specification.
356  * @param[in] field
357  *   Pointer to field modification information.
358  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
359  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
360  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
361  * @param[in] dcopy
362  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
363  *   Negative offset value sets the same offset as source offset.
364  *   size field is ignored, value is taken from source field.
365  * @param[in,out] resource
366  *   Pointer to the modify-header resource.
367  * @param[in] type
368  *   Type of modification.
369  * @param[out] error
370  *   Pointer to the error structure.
371  *
372  * @return
373  *   0 on success, a negative errno value otherwise and rte_errno is set.
374  */
375 static int
376 flow_dv_convert_modify_action(struct rte_flow_item *item,
377                               struct field_modify_info *field,
378                               struct field_modify_info *dcopy,
379                               struct mlx5_flow_dv_modify_hdr_resource *resource,
380                               uint32_t type, struct rte_flow_error *error)
381 {
382         uint32_t i = resource->actions_num;
383         struct mlx5_modification_cmd *actions = resource->actions;
384
385         /*
386          * The item and mask are provided in big-endian format.
387          * The fields should be presented as in big-endian format either.
388          * Mask must be always present, it defines the actual field width.
389          */
390         MLX5_ASSERT(item->mask);
391         MLX5_ASSERT(field->size);
392         do {
393                 unsigned int size_b;
394                 unsigned int off_b;
395                 uint32_t mask;
396                 uint32_t data;
397
398                 if (i >= MLX5_MAX_MODIFY_NUM)
399                         return rte_flow_error_set(error, EINVAL,
400                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
401                                  "too many items to modify");
402                 /* Fetch variable byte size mask from the array. */
403                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
404                                            field->offset, field->size);
405                 if (!mask) {
406                         ++field;
407                         continue;
408                 }
409                 /* Deduce actual data width in bits from mask value. */
410                 off_b = rte_bsf32(mask);
411                 size_b = sizeof(uint32_t) * CHAR_BIT -
412                          off_b - __builtin_clz(mask);
413                 MLX5_ASSERT(size_b);
414                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
415                 actions[i] = (struct mlx5_modification_cmd) {
416                         .action_type = type,
417                         .field = field->id,
418                         .offset = off_b,
419                         .length = size_b,
420                 };
421                 /* Convert entire record to expected big-endian format. */
422                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
423                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
424                         MLX5_ASSERT(dcopy);
425                         actions[i].dst_field = dcopy->id;
426                         actions[i].dst_offset =
427                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
428                         /* Convert entire record to big-endian format. */
429                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
430                 } else {
431                         MLX5_ASSERT(item->spec);
432                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
433                                                    field->offset, field->size);
434                         /* Shift out the trailing masked bits from data. */
435                         data = (data & mask) >> off_b;
436                         actions[i].data1 = rte_cpu_to_be_32(data);
437                 }
438                 ++i;
439                 ++field;
440         } while (field->size);
441         if (resource->actions_num == i)
442                 return rte_flow_error_set(error, EINVAL,
443                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
444                                           "invalid modification flow item");
445         resource->actions_num = i;
446         return 0;
447 }
448
449 /**
450  * Convert modify-header set IPv4 address action to DV specification.
451  *
452  * @param[in,out] resource
453  *   Pointer to the modify-header resource.
454  * @param[in] action
455  *   Pointer to action specification.
456  * @param[out] error
457  *   Pointer to the error structure.
458  *
459  * @return
460  *   0 on success, a negative errno value otherwise and rte_errno is set.
461  */
462 static int
463 flow_dv_convert_action_modify_ipv4
464                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
465                          const struct rte_flow_action *action,
466                          struct rte_flow_error *error)
467 {
468         const struct rte_flow_action_set_ipv4 *conf =
469                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
470         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
471         struct rte_flow_item_ipv4 ipv4;
472         struct rte_flow_item_ipv4 ipv4_mask;
473
474         memset(&ipv4, 0, sizeof(ipv4));
475         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
476         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
477                 ipv4.hdr.src_addr = conf->ipv4_addr;
478                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
479         } else {
480                 ipv4.hdr.dst_addr = conf->ipv4_addr;
481                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
482         }
483         item.spec = &ipv4;
484         item.mask = &ipv4_mask;
485         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
486                                              MLX5_MODIFICATION_TYPE_SET, error);
487 }
488
489 /**
490  * Convert modify-header set IPv6 address action to DV specification.
491  *
492  * @param[in,out] resource
493  *   Pointer to the modify-header resource.
494  * @param[in] action
495  *   Pointer to action specification.
496  * @param[out] error
497  *   Pointer to the error structure.
498  *
499  * @return
500  *   0 on success, a negative errno value otherwise and rte_errno is set.
501  */
502 static int
503 flow_dv_convert_action_modify_ipv6
504                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
505                          const struct rte_flow_action *action,
506                          struct rte_flow_error *error)
507 {
508         const struct rte_flow_action_set_ipv6 *conf =
509                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
510         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
511         struct rte_flow_item_ipv6 ipv6;
512         struct rte_flow_item_ipv6 ipv6_mask;
513
514         memset(&ipv6, 0, sizeof(ipv6));
515         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
516         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
517                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
518                        sizeof(ipv6.hdr.src_addr));
519                 memcpy(&ipv6_mask.hdr.src_addr,
520                        &rte_flow_item_ipv6_mask.hdr.src_addr,
521                        sizeof(ipv6.hdr.src_addr));
522         } else {
523                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
524                        sizeof(ipv6.hdr.dst_addr));
525                 memcpy(&ipv6_mask.hdr.dst_addr,
526                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
527                        sizeof(ipv6.hdr.dst_addr));
528         }
529         item.spec = &ipv6;
530         item.mask = &ipv6_mask;
531         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
532                                              MLX5_MODIFICATION_TYPE_SET, error);
533 }
534
535 /**
536  * Convert modify-header set MAC address action to DV specification.
537  *
538  * @param[in,out] resource
539  *   Pointer to the modify-header resource.
540  * @param[in] action
541  *   Pointer to action specification.
542  * @param[out] error
543  *   Pointer to the error structure.
544  *
545  * @return
546  *   0 on success, a negative errno value otherwise and rte_errno is set.
547  */
548 static int
549 flow_dv_convert_action_modify_mac
550                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
551                          const struct rte_flow_action *action,
552                          struct rte_flow_error *error)
553 {
554         const struct rte_flow_action_set_mac *conf =
555                 (const struct rte_flow_action_set_mac *)(action->conf);
556         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
557         struct rte_flow_item_eth eth;
558         struct rte_flow_item_eth eth_mask;
559
560         memset(&eth, 0, sizeof(eth));
561         memset(&eth_mask, 0, sizeof(eth_mask));
562         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
563                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
564                        sizeof(eth.src.addr_bytes));
565                 memcpy(&eth_mask.src.addr_bytes,
566                        &rte_flow_item_eth_mask.src.addr_bytes,
567                        sizeof(eth_mask.src.addr_bytes));
568         } else {
569                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
570                        sizeof(eth.dst.addr_bytes));
571                 memcpy(&eth_mask.dst.addr_bytes,
572                        &rte_flow_item_eth_mask.dst.addr_bytes,
573                        sizeof(eth_mask.dst.addr_bytes));
574         }
575         item.spec = &eth;
576         item.mask = &eth_mask;
577         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
578                                              MLX5_MODIFICATION_TYPE_SET, error);
579 }
580
581 /**
582  * Convert modify-header set VLAN VID action to DV specification.
583  *
584  * @param[in,out] resource
585  *   Pointer to the modify-header resource.
586  * @param[in] action
587  *   Pointer to action specification.
588  * @param[out] error
589  *   Pointer to the error structure.
590  *
591  * @return
592  *   0 on success, a negative errno value otherwise and rte_errno is set.
593  */
594 static int
595 flow_dv_convert_action_modify_vlan_vid
596                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
597                          const struct rte_flow_action *action,
598                          struct rte_flow_error *error)
599 {
600         const struct rte_flow_action_of_set_vlan_vid *conf =
601                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
602         int i = resource->actions_num;
603         struct mlx5_modification_cmd *actions = resource->actions;
604         struct field_modify_info *field = modify_vlan_out_first_vid;
605
606         if (i >= MLX5_MAX_MODIFY_NUM)
607                 return rte_flow_error_set(error, EINVAL,
608                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
609                          "too many items to modify");
610         actions[i] = (struct mlx5_modification_cmd) {
611                 .action_type = MLX5_MODIFICATION_TYPE_SET,
612                 .field = field->id,
613                 .length = field->size,
614                 .offset = field->offset,
615         };
616         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
617         actions[i].data1 = conf->vlan_vid;
618         actions[i].data1 = actions[i].data1 << 16;
619         resource->actions_num = ++i;
620         return 0;
621 }
622
623 /**
624  * Convert modify-header set TP action to DV specification.
625  *
626  * @param[in,out] resource
627  *   Pointer to the modify-header resource.
628  * @param[in] action
629  *   Pointer to action specification.
630  * @param[in] items
631  *   Pointer to rte_flow_item objects list.
632  * @param[in] attr
633  *   Pointer to flow attributes structure.
634  * @param[in] dev_flow
635  *   Pointer to the sub flow.
636  * @param[in] tunnel_decap
637  *   Whether action is after tunnel decapsulation.
638  * @param[out] error
639  *   Pointer to the error structure.
640  *
641  * @return
642  *   0 on success, a negative errno value otherwise and rte_errno is set.
643  */
644 static int
645 flow_dv_convert_action_modify_tp
646                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
647                          const struct rte_flow_action *action,
648                          const struct rte_flow_item *items,
649                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
650                          bool tunnel_decap, struct rte_flow_error *error)
651 {
652         const struct rte_flow_action_set_tp *conf =
653                 (const struct rte_flow_action_set_tp *)(action->conf);
654         struct rte_flow_item item;
655         struct rte_flow_item_udp udp;
656         struct rte_flow_item_udp udp_mask;
657         struct rte_flow_item_tcp tcp;
658         struct rte_flow_item_tcp tcp_mask;
659         struct field_modify_info *field;
660
661         if (!attr->valid)
662                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
663         if (attr->udp) {
664                 memset(&udp, 0, sizeof(udp));
665                 memset(&udp_mask, 0, sizeof(udp_mask));
666                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
667                         udp.hdr.src_port = conf->port;
668                         udp_mask.hdr.src_port =
669                                         rte_flow_item_udp_mask.hdr.src_port;
670                 } else {
671                         udp.hdr.dst_port = conf->port;
672                         udp_mask.hdr.dst_port =
673                                         rte_flow_item_udp_mask.hdr.dst_port;
674                 }
675                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
676                 item.spec = &udp;
677                 item.mask = &udp_mask;
678                 field = modify_udp;
679         } else {
680                 MLX5_ASSERT(attr->tcp);
681                 memset(&tcp, 0, sizeof(tcp));
682                 memset(&tcp_mask, 0, sizeof(tcp_mask));
683                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
684                         tcp.hdr.src_port = conf->port;
685                         tcp_mask.hdr.src_port =
686                                         rte_flow_item_tcp_mask.hdr.src_port;
687                 } else {
688                         tcp.hdr.dst_port = conf->port;
689                         tcp_mask.hdr.dst_port =
690                                         rte_flow_item_tcp_mask.hdr.dst_port;
691                 }
692                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
693                 item.spec = &tcp;
694                 item.mask = &tcp_mask;
695                 field = modify_tcp;
696         }
697         return flow_dv_convert_modify_action(&item, field, NULL, resource,
698                                              MLX5_MODIFICATION_TYPE_SET, error);
699 }
700
701 /**
702  * Convert modify-header set TTL action to DV specification.
703  *
704  * @param[in,out] resource
705  *   Pointer to the modify-header resource.
706  * @param[in] action
707  *   Pointer to action specification.
708  * @param[in] items
709  *   Pointer to rte_flow_item objects list.
710  * @param[in] attr
711  *   Pointer to flow attributes structure.
712  * @param[in] dev_flow
713  *   Pointer to the sub flow.
714  * @param[in] tunnel_decap
715  *   Whether action is after tunnel decapsulation.
716  * @param[out] error
717  *   Pointer to the error structure.
718  *
719  * @return
720  *   0 on success, a negative errno value otherwise and rte_errno is set.
721  */
722 static int
723 flow_dv_convert_action_modify_ttl
724                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
725                          const struct rte_flow_action *action,
726                          const struct rte_flow_item *items,
727                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
728                          bool tunnel_decap, struct rte_flow_error *error)
729 {
730         const struct rte_flow_action_set_ttl *conf =
731                 (const struct rte_flow_action_set_ttl *)(action->conf);
732         struct rte_flow_item item;
733         struct rte_flow_item_ipv4 ipv4;
734         struct rte_flow_item_ipv4 ipv4_mask;
735         struct rte_flow_item_ipv6 ipv6;
736         struct rte_flow_item_ipv6 ipv6_mask;
737         struct field_modify_info *field;
738
739         if (!attr->valid)
740                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
741         if (attr->ipv4) {
742                 memset(&ipv4, 0, sizeof(ipv4));
743                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
744                 ipv4.hdr.time_to_live = conf->ttl_value;
745                 ipv4_mask.hdr.time_to_live = 0xFF;
746                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
747                 item.spec = &ipv4;
748                 item.mask = &ipv4_mask;
749                 field = modify_ipv4;
750         } else {
751                 MLX5_ASSERT(attr->ipv6);
752                 memset(&ipv6, 0, sizeof(ipv6));
753                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
754                 ipv6.hdr.hop_limits = conf->ttl_value;
755                 ipv6_mask.hdr.hop_limits = 0xFF;
756                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
757                 item.spec = &ipv6;
758                 item.mask = &ipv6_mask;
759                 field = modify_ipv6;
760         }
761         return flow_dv_convert_modify_action(&item, field, NULL, resource,
762                                              MLX5_MODIFICATION_TYPE_SET, error);
763 }
764
765 /**
766  * Convert modify-header decrement TTL action to DV specification.
767  *
768  * @param[in,out] resource
769  *   Pointer to the modify-header resource.
770  * @param[in] action
771  *   Pointer to action specification.
772  * @param[in] items
773  *   Pointer to rte_flow_item objects list.
774  * @param[in] attr
775  *   Pointer to flow attributes structure.
776  * @param[in] dev_flow
777  *   Pointer to the sub flow.
778  * @param[in] tunnel_decap
779  *   Whether action is after tunnel decapsulation.
780  * @param[out] error
781  *   Pointer to the error structure.
782  *
783  * @return
784  *   0 on success, a negative errno value otherwise and rte_errno is set.
785  */
786 static int
787 flow_dv_convert_action_modify_dec_ttl
788                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
789                          const struct rte_flow_item *items,
790                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
791                          bool tunnel_decap, struct rte_flow_error *error)
792 {
793         struct rte_flow_item item;
794         struct rte_flow_item_ipv4 ipv4;
795         struct rte_flow_item_ipv4 ipv4_mask;
796         struct rte_flow_item_ipv6 ipv6;
797         struct rte_flow_item_ipv6 ipv6_mask;
798         struct field_modify_info *field;
799
800         if (!attr->valid)
801                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
802         if (attr->ipv4) {
803                 memset(&ipv4, 0, sizeof(ipv4));
804                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
805                 ipv4.hdr.time_to_live = 0xFF;
806                 ipv4_mask.hdr.time_to_live = 0xFF;
807                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
808                 item.spec = &ipv4;
809                 item.mask = &ipv4_mask;
810                 field = modify_ipv4;
811         } else {
812                 MLX5_ASSERT(attr->ipv6);
813                 memset(&ipv6, 0, sizeof(ipv6));
814                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
815                 ipv6.hdr.hop_limits = 0xFF;
816                 ipv6_mask.hdr.hop_limits = 0xFF;
817                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
818                 item.spec = &ipv6;
819                 item.mask = &ipv6_mask;
820                 field = modify_ipv6;
821         }
822         return flow_dv_convert_modify_action(&item, field, NULL, resource,
823                                              MLX5_MODIFICATION_TYPE_ADD, error);
824 }
825
826 /**
827  * Convert modify-header increment/decrement TCP Sequence number
828  * to DV specification.
829  *
830  * @param[in,out] resource
831  *   Pointer to the modify-header resource.
832  * @param[in] action
833  *   Pointer to action specification.
834  * @param[out] error
835  *   Pointer to the error structure.
836  *
837  * @return
838  *   0 on success, a negative errno value otherwise and rte_errno is set.
839  */
840 static int
841 flow_dv_convert_action_modify_tcp_seq
842                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
843                          const struct rte_flow_action *action,
844                          struct rte_flow_error *error)
845 {
846         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
847         uint64_t value = rte_be_to_cpu_32(*conf);
848         struct rte_flow_item item;
849         struct rte_flow_item_tcp tcp;
850         struct rte_flow_item_tcp tcp_mask;
851
852         memset(&tcp, 0, sizeof(tcp));
853         memset(&tcp_mask, 0, sizeof(tcp_mask));
854         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
855                 /*
856                  * The HW has no decrement operation, only increment operation.
857                  * To simulate decrement X from Y using increment operation
858                  * we need to add UINT32_MAX X times to Y.
859                  * Each adding of UINT32_MAX decrements Y by 1.
860                  */
861                 value *= UINT32_MAX;
862         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
863         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
864         item.type = RTE_FLOW_ITEM_TYPE_TCP;
865         item.spec = &tcp;
866         item.mask = &tcp_mask;
867         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
868                                              MLX5_MODIFICATION_TYPE_ADD, error);
869 }
870
871 /**
872  * Convert modify-header increment/decrement TCP Acknowledgment number
873  * to DV specification.
874  *
875  * @param[in,out] resource
876  *   Pointer to the modify-header resource.
877  * @param[in] action
878  *   Pointer to action specification.
879  * @param[out] error
880  *   Pointer to the error structure.
881  *
882  * @return
883  *   0 on success, a negative errno value otherwise and rte_errno is set.
884  */
885 static int
886 flow_dv_convert_action_modify_tcp_ack
887                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
888                          const struct rte_flow_action *action,
889                          struct rte_flow_error *error)
890 {
891         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
892         uint64_t value = rte_be_to_cpu_32(*conf);
893         struct rte_flow_item item;
894         struct rte_flow_item_tcp tcp;
895         struct rte_flow_item_tcp tcp_mask;
896
897         memset(&tcp, 0, sizeof(tcp));
898         memset(&tcp_mask, 0, sizeof(tcp_mask));
899         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
900                 /*
901                  * The HW has no decrement operation, only increment operation.
902                  * To simulate decrement X from Y using increment operation
903                  * we need to add UINT32_MAX X times to Y.
904                  * Each adding of UINT32_MAX decrements Y by 1.
905                  */
906                 value *= UINT32_MAX;
907         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
908         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
909         item.type = RTE_FLOW_ITEM_TYPE_TCP;
910         item.spec = &tcp;
911         item.mask = &tcp_mask;
912         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 static enum mlx5_modification_field reg_to_field[] = {
917         [REG_NON] = MLX5_MODI_OUT_NONE,
918         [REG_A] = MLX5_MODI_META_DATA_REG_A,
919         [REG_B] = MLX5_MODI_META_DATA_REG_B,
920         [REG_C_0] = MLX5_MODI_META_REG_C_0,
921         [REG_C_1] = MLX5_MODI_META_REG_C_1,
922         [REG_C_2] = MLX5_MODI_META_REG_C_2,
923         [REG_C_3] = MLX5_MODI_META_REG_C_3,
924         [REG_C_4] = MLX5_MODI_META_REG_C_4,
925         [REG_C_5] = MLX5_MODI_META_REG_C_5,
926         [REG_C_6] = MLX5_MODI_META_REG_C_6,
927         [REG_C_7] = MLX5_MODI_META_REG_C_7,
928 };
929
930 /**
931  * Convert register set to DV specification.
932  *
933  * @param[in,out] resource
934  *   Pointer to the modify-header resource.
935  * @param[in] action
936  *   Pointer to action specification.
937  * @param[out] error
938  *   Pointer to the error structure.
939  *
940  * @return
941  *   0 on success, a negative errno value otherwise and rte_errno is set.
942  */
943 static int
944 flow_dv_convert_action_set_reg
945                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
946                          const struct rte_flow_action *action,
947                          struct rte_flow_error *error)
948 {
949         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
950         struct mlx5_modification_cmd *actions = resource->actions;
951         uint32_t i = resource->actions_num;
952
953         if (i >= MLX5_MAX_MODIFY_NUM)
954                 return rte_flow_error_set(error, EINVAL,
955                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
956                                           "too many items to modify");
957         MLX5_ASSERT(conf->id != REG_NON);
958         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
959         actions[i] = (struct mlx5_modification_cmd) {
960                 .action_type = MLX5_MODIFICATION_TYPE_SET,
961                 .field = reg_to_field[conf->id],
962         };
963         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
964         actions[i].data1 = rte_cpu_to_be_32(conf->data);
965         ++i;
966         resource->actions_num = i;
967         return 0;
968 }
969
970 /**
971  * Convert SET_TAG action to DV specification.
972  *
973  * @param[in] dev
974  *   Pointer to the rte_eth_dev structure.
975  * @param[in,out] resource
976  *   Pointer to the modify-header resource.
977  * @param[in] conf
978  *   Pointer to action specification.
979  * @param[out] error
980  *   Pointer to the error structure.
981  *
982  * @return
983  *   0 on success, a negative errno value otherwise and rte_errno is set.
984  */
985 static int
986 flow_dv_convert_action_set_tag
987                         (struct rte_eth_dev *dev,
988                          struct mlx5_flow_dv_modify_hdr_resource *resource,
989                          const struct rte_flow_action_set_tag *conf,
990                          struct rte_flow_error *error)
991 {
992         rte_be32_t data = rte_cpu_to_be_32(conf->data);
993         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
994         struct rte_flow_item item = {
995                 .spec = &data,
996                 .mask = &mask,
997         };
998         struct field_modify_info reg_c_x[] = {
999                 [1] = {0, 0, 0},
1000         };
1001         enum mlx5_modification_field reg_type;
1002         int ret;
1003
1004         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1005         if (ret < 0)
1006                 return ret;
1007         MLX5_ASSERT(ret != REG_NON);
1008         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1009         reg_type = reg_to_field[ret];
1010         MLX5_ASSERT(reg_type > 0);
1011         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1012         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1013                                              MLX5_MODIFICATION_TYPE_SET, error);
1014 }
1015
1016 /**
1017  * Convert internal COPY_REG action to DV specification.
1018  *
1019  * @param[in] dev
1020  *   Pointer to the rte_eth_dev structure.
1021  * @param[in,out] res
1022  *   Pointer to the modify-header resource.
1023  * @param[in] action
1024  *   Pointer to action specification.
1025  * @param[out] error
1026  *   Pointer to the error structure.
1027  *
1028  * @return
1029  *   0 on success, a negative errno value otherwise and rte_errno is set.
1030  */
1031 static int
1032 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1033                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1034                                  const struct rte_flow_action *action,
1035                                  struct rte_flow_error *error)
1036 {
1037         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1038         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1039         struct rte_flow_item item = {
1040                 .spec = NULL,
1041                 .mask = &mask,
1042         };
1043         struct field_modify_info reg_src[] = {
1044                 {4, 0, reg_to_field[conf->src]},
1045                 {0, 0, 0},
1046         };
1047         struct field_modify_info reg_dst = {
1048                 .offset = 0,
1049                 .id = reg_to_field[conf->dst],
1050         };
1051         /* Adjust reg_c[0] usage according to reported mask. */
1052         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1053                 struct mlx5_priv *priv = dev->data->dev_private;
1054                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1055
1056                 MLX5_ASSERT(reg_c0);
1057                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1058                 if (conf->dst == REG_C_0) {
1059                         /* Copy to reg_c[0], within mask only. */
1060                         reg_dst.offset = rte_bsf32(reg_c0);
1061                         /*
1062                          * Mask is ignoring the enianness, because
1063                          * there is no conversion in datapath.
1064                          */
1065 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1066                         /* Copy from destination lower bits to reg_c[0]. */
1067                         mask = reg_c0 >> reg_dst.offset;
1068 #else
1069                         /* Copy from destination upper bits to reg_c[0]. */
1070                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1071                                           rte_fls_u32(reg_c0));
1072 #endif
1073                 } else {
1074                         mask = rte_cpu_to_be_32(reg_c0);
1075 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1076                         /* Copy from reg_c[0] to destination lower bits. */
1077                         reg_dst.offset = 0;
1078 #else
1079                         /* Copy from reg_c[0] to destination upper bits. */
1080                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1081                                          (rte_fls_u32(reg_c0) -
1082                                           rte_bsf32(reg_c0));
1083 #endif
1084                 }
1085         }
1086         return flow_dv_convert_modify_action(&item,
1087                                              reg_src, &reg_dst, res,
1088                                              MLX5_MODIFICATION_TYPE_COPY,
1089                                              error);
1090 }
1091
1092 /**
1093  * Convert MARK action to DV specification. This routine is used
1094  * in extensive metadata only and requires metadata register to be
1095  * handled. In legacy mode hardware tag resource is engaged.
1096  *
1097  * @param[in] dev
1098  *   Pointer to the rte_eth_dev structure.
1099  * @param[in] conf
1100  *   Pointer to MARK action specification.
1101  * @param[in,out] resource
1102  *   Pointer to the modify-header resource.
1103  * @param[out] error
1104  *   Pointer to the error structure.
1105  *
1106  * @return
1107  *   0 on success, a negative errno value otherwise and rte_errno is set.
1108  */
1109 static int
1110 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1111                             const struct rte_flow_action_mark *conf,
1112                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1113                             struct rte_flow_error *error)
1114 {
1115         struct mlx5_priv *priv = dev->data->dev_private;
1116         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1117                                            priv->sh->dv_mark_mask);
1118         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1119         struct rte_flow_item item = {
1120                 .spec = &data,
1121                 .mask = &mask,
1122         };
1123         struct field_modify_info reg_c_x[] = {
1124                 [1] = {0, 0, 0},
1125         };
1126         int reg;
1127
1128         if (!mask)
1129                 return rte_flow_error_set(error, EINVAL,
1130                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1131                                           NULL, "zero mark action mask");
1132         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1133         if (reg < 0)
1134                 return reg;
1135         MLX5_ASSERT(reg > 0);
1136         if (reg == REG_C_0) {
1137                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1138                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1139
1140                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1141                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1142                 mask = rte_cpu_to_be_32(mask << shl_c0);
1143         }
1144         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1145         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1146                                              MLX5_MODIFICATION_TYPE_SET, error);
1147 }
1148
1149 /**
1150  * Get metadata register index for specified steering domain.
1151  *
1152  * @param[in] dev
1153  *   Pointer to the rte_eth_dev structure.
1154  * @param[in] attr
1155  *   Attributes of flow to determine steering domain.
1156  * @param[out] error
1157  *   Pointer to the error structure.
1158  *
1159  * @return
1160  *   positive index on success, a negative errno value otherwise
1161  *   and rte_errno is set.
1162  */
1163 static enum modify_reg
1164 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1165                          const struct rte_flow_attr *attr,
1166                          struct rte_flow_error *error)
1167 {
1168         int reg =
1169                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1170                                           MLX5_METADATA_FDB :
1171                                             attr->egress ?
1172                                             MLX5_METADATA_TX :
1173                                             MLX5_METADATA_RX, 0, error);
1174         if (reg < 0)
1175                 return rte_flow_error_set(error,
1176                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1177                                           NULL, "unavailable "
1178                                           "metadata register");
1179         return reg;
1180 }
1181
1182 /**
1183  * Convert SET_META action to DV specification.
1184  *
1185  * @param[in] dev
1186  *   Pointer to the rte_eth_dev structure.
1187  * @param[in,out] resource
1188  *   Pointer to the modify-header resource.
1189  * @param[in] attr
1190  *   Attributes of flow that includes this item.
1191  * @param[in] conf
1192  *   Pointer to action specification.
1193  * @param[out] error
1194  *   Pointer to the error structure.
1195  *
1196  * @return
1197  *   0 on success, a negative errno value otherwise and rte_errno is set.
1198  */
1199 static int
1200 flow_dv_convert_action_set_meta
1201                         (struct rte_eth_dev *dev,
1202                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1203                          const struct rte_flow_attr *attr,
1204                          const struct rte_flow_action_set_meta *conf,
1205                          struct rte_flow_error *error)
1206 {
1207         uint32_t data = conf->data;
1208         uint32_t mask = conf->mask;
1209         struct rte_flow_item item = {
1210                 .spec = &data,
1211                 .mask = &mask,
1212         };
1213         struct field_modify_info reg_c_x[] = {
1214                 [1] = {0, 0, 0},
1215         };
1216         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1217
1218         if (reg < 0)
1219                 return reg;
1220         /*
1221          * In datapath code there is no endianness
1222          * coversions for perfromance reasons, all
1223          * pattern conversions are done in rte_flow.
1224          */
1225         if (reg == REG_C_0) {
1226                 struct mlx5_priv *priv = dev->data->dev_private;
1227                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1228                 uint32_t shl_c0;
1229
1230                 MLX5_ASSERT(msk_c0);
1231 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1232                 shl_c0 = rte_bsf32(msk_c0);
1233 #else
1234                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1235 #endif
1236                 mask <<= shl_c0;
1237                 data <<= shl_c0;
1238                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1239         }
1240         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1241         /* The routine expects parameters in memory as big-endian ones. */
1242         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1243                                              MLX5_MODIFICATION_TYPE_SET, error);
1244 }
1245
1246 /**
1247  * Convert modify-header set IPv4 DSCP action to DV specification.
1248  *
1249  * @param[in,out] resource
1250  *   Pointer to the modify-header resource.
1251  * @param[in] action
1252  *   Pointer to action specification.
1253  * @param[out] error
1254  *   Pointer to the error structure.
1255  *
1256  * @return
1257  *   0 on success, a negative errno value otherwise and rte_errno is set.
1258  */
1259 static int
1260 flow_dv_convert_action_modify_ipv4_dscp
1261                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1262                          const struct rte_flow_action *action,
1263                          struct rte_flow_error *error)
1264 {
1265         const struct rte_flow_action_set_dscp *conf =
1266                 (const struct rte_flow_action_set_dscp *)(action->conf);
1267         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1268         struct rte_flow_item_ipv4 ipv4;
1269         struct rte_flow_item_ipv4 ipv4_mask;
1270
1271         memset(&ipv4, 0, sizeof(ipv4));
1272         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1273         ipv4.hdr.type_of_service = conf->dscp;
1274         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1275         item.spec = &ipv4;
1276         item.mask = &ipv4_mask;
1277         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1278                                              MLX5_MODIFICATION_TYPE_SET, error);
1279 }
1280
1281 /**
1282  * Convert modify-header set IPv6 DSCP action to DV specification.
1283  *
1284  * @param[in,out] resource
1285  *   Pointer to the modify-header resource.
1286  * @param[in] action
1287  *   Pointer to action specification.
1288  * @param[out] error
1289  *   Pointer to the error structure.
1290  *
1291  * @return
1292  *   0 on success, a negative errno value otherwise and rte_errno is set.
1293  */
1294 static int
1295 flow_dv_convert_action_modify_ipv6_dscp
1296                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1297                          const struct rte_flow_action *action,
1298                          struct rte_flow_error *error)
1299 {
1300         const struct rte_flow_action_set_dscp *conf =
1301                 (const struct rte_flow_action_set_dscp *)(action->conf);
1302         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1303         struct rte_flow_item_ipv6 ipv6;
1304         struct rte_flow_item_ipv6 ipv6_mask;
1305
1306         memset(&ipv6, 0, sizeof(ipv6));
1307         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1308         /*
1309          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1310          * rdma-core only accept the DSCP bits byte aligned start from
1311          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1312          * bits in IPv6 case as rdma-core requires byte aligned value.
1313          */
1314         ipv6.hdr.vtc_flow = conf->dscp;
1315         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1316         item.spec = &ipv6;
1317         item.mask = &ipv6_mask;
1318         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1319                                              MLX5_MODIFICATION_TYPE_SET, error);
1320 }
1321
1322 /**
1323  * Validate MARK item.
1324  *
1325  * @param[in] dev
1326  *   Pointer to the rte_eth_dev structure.
1327  * @param[in] item
1328  *   Item specification.
1329  * @param[in] attr
1330  *   Attributes of flow that includes this item.
1331  * @param[out] error
1332  *   Pointer to error structure.
1333  *
1334  * @return
1335  *   0 on success, a negative errno value otherwise and rte_errno is set.
1336  */
1337 static int
1338 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1339                            const struct rte_flow_item *item,
1340                            const struct rte_flow_attr *attr __rte_unused,
1341                            struct rte_flow_error *error)
1342 {
1343         struct mlx5_priv *priv = dev->data->dev_private;
1344         struct mlx5_dev_config *config = &priv->config;
1345         const struct rte_flow_item_mark *spec = item->spec;
1346         const struct rte_flow_item_mark *mask = item->mask;
1347         const struct rte_flow_item_mark nic_mask = {
1348                 .id = priv->sh->dv_mark_mask,
1349         };
1350         int ret;
1351
1352         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1353                 return rte_flow_error_set(error, ENOTSUP,
1354                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1355                                           "extended metadata feature"
1356                                           " isn't enabled");
1357         if (!mlx5_flow_ext_mreg_supported(dev))
1358                 return rte_flow_error_set(error, ENOTSUP,
1359                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1360                                           "extended metadata register"
1361                                           " isn't supported");
1362         if (!nic_mask.id)
1363                 return rte_flow_error_set(error, ENOTSUP,
1364                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1365                                           "extended metadata register"
1366                                           " isn't available");
1367         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1368         if (ret < 0)
1369                 return ret;
1370         if (!spec)
1371                 return rte_flow_error_set(error, EINVAL,
1372                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1373                                           item->spec,
1374                                           "data cannot be empty");
1375         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1376                 return rte_flow_error_set(error, EINVAL,
1377                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1378                                           &spec->id,
1379                                           "mark id exceeds the limit");
1380         if (!mask)
1381                 mask = &nic_mask;
1382         if (!mask->id)
1383                 return rte_flow_error_set(error, EINVAL,
1384                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1385                                         "mask cannot be zero");
1386
1387         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1388                                         (const uint8_t *)&nic_mask,
1389                                         sizeof(struct rte_flow_item_mark),
1390                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1391         if (ret < 0)
1392                 return ret;
1393         return 0;
1394 }
1395
1396 /**
1397  * Validate META item.
1398  *
1399  * @param[in] dev
1400  *   Pointer to the rte_eth_dev structure.
1401  * @param[in] item
1402  *   Item specification.
1403  * @param[in] attr
1404  *   Attributes of flow that includes this item.
1405  * @param[out] error
1406  *   Pointer to error structure.
1407  *
1408  * @return
1409  *   0 on success, a negative errno value otherwise and rte_errno is set.
1410  */
1411 static int
1412 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1413                            const struct rte_flow_item *item,
1414                            const struct rte_flow_attr *attr,
1415                            struct rte_flow_error *error)
1416 {
1417         struct mlx5_priv *priv = dev->data->dev_private;
1418         struct mlx5_dev_config *config = &priv->config;
1419         const struct rte_flow_item_meta *spec = item->spec;
1420         const struct rte_flow_item_meta *mask = item->mask;
1421         struct rte_flow_item_meta nic_mask = {
1422                 .data = UINT32_MAX
1423         };
1424         int reg;
1425         int ret;
1426
1427         if (!spec)
1428                 return rte_flow_error_set(error, EINVAL,
1429                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1430                                           item->spec,
1431                                           "data cannot be empty");
1432         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1433                 if (!mlx5_flow_ext_mreg_supported(dev))
1434                         return rte_flow_error_set(error, ENOTSUP,
1435                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1436                                           "extended metadata register"
1437                                           " isn't supported");
1438                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1439                 if (reg < 0)
1440                         return reg;
1441                 if (reg == REG_B)
1442                         return rte_flow_error_set(error, ENOTSUP,
1443                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1444                                           "match on reg_b "
1445                                           "isn't supported");
1446                 if (reg != REG_A)
1447                         nic_mask.data = priv->sh->dv_meta_mask;
1448         } else if (attr->transfer) {
1449                 return rte_flow_error_set(error, ENOTSUP,
1450                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1451                                         "extended metadata feature "
1452                                         "should be enabled when "
1453                                         "meta item is requested "
1454                                         "with e-switch mode ");
1455         }
1456         if (!mask)
1457                 mask = &rte_flow_item_meta_mask;
1458         if (!mask->data)
1459                 return rte_flow_error_set(error, EINVAL,
1460                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1461                                         "mask cannot be zero");
1462
1463         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1464                                         (const uint8_t *)&nic_mask,
1465                                         sizeof(struct rte_flow_item_meta),
1466                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1467         return ret;
1468 }
1469
1470 /**
1471  * Validate TAG item.
1472  *
1473  * @param[in] dev
1474  *   Pointer to the rte_eth_dev structure.
1475  * @param[in] item
1476  *   Item specification.
1477  * @param[in] attr
1478  *   Attributes of flow that includes this item.
1479  * @param[out] error
1480  *   Pointer to error structure.
1481  *
1482  * @return
1483  *   0 on success, a negative errno value otherwise and rte_errno is set.
1484  */
1485 static int
1486 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1487                           const struct rte_flow_item *item,
1488                           const struct rte_flow_attr *attr __rte_unused,
1489                           struct rte_flow_error *error)
1490 {
1491         const struct rte_flow_item_tag *spec = item->spec;
1492         const struct rte_flow_item_tag *mask = item->mask;
1493         const struct rte_flow_item_tag nic_mask = {
1494                 .data = RTE_BE32(UINT32_MAX),
1495                 .index = 0xff,
1496         };
1497         int ret;
1498
1499         if (!mlx5_flow_ext_mreg_supported(dev))
1500                 return rte_flow_error_set(error, ENOTSUP,
1501                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1502                                           "extensive metadata register"
1503                                           " isn't supported");
1504         if (!spec)
1505                 return rte_flow_error_set(error, EINVAL,
1506                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1507                                           item->spec,
1508                                           "data cannot be empty");
1509         if (!mask)
1510                 mask = &rte_flow_item_tag_mask;
1511         if (!mask->data)
1512                 return rte_flow_error_set(error, EINVAL,
1513                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1514                                         "mask cannot be zero");
1515
1516         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1517                                         (const uint8_t *)&nic_mask,
1518                                         sizeof(struct rte_flow_item_tag),
1519                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1520         if (ret < 0)
1521                 return ret;
1522         if (mask->index != 0xff)
1523                 return rte_flow_error_set(error, EINVAL,
1524                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1525                                           "partial mask for tag index"
1526                                           " is not supported");
1527         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1528         if (ret < 0)
1529                 return ret;
1530         MLX5_ASSERT(ret != REG_NON);
1531         return 0;
1532 }
1533
1534 /**
1535  * Validate vport item.
1536  *
1537  * @param[in] dev
1538  *   Pointer to the rte_eth_dev structure.
1539  * @param[in] item
1540  *   Item specification.
1541  * @param[in] attr
1542  *   Attributes of flow that includes this item.
1543  * @param[in] item_flags
1544  *   Bit-fields that holds the items detected until now.
1545  * @param[out] error
1546  *   Pointer to error structure.
1547  *
1548  * @return
1549  *   0 on success, a negative errno value otherwise and rte_errno is set.
1550  */
1551 static int
1552 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1553                               const struct rte_flow_item *item,
1554                               const struct rte_flow_attr *attr,
1555                               uint64_t item_flags,
1556                               struct rte_flow_error *error)
1557 {
1558         const struct rte_flow_item_port_id *spec = item->spec;
1559         const struct rte_flow_item_port_id *mask = item->mask;
1560         const struct rte_flow_item_port_id switch_mask = {
1561                         .id = 0xffffffff,
1562         };
1563         struct mlx5_priv *esw_priv;
1564         struct mlx5_priv *dev_priv;
1565         int ret;
1566
1567         if (!attr->transfer)
1568                 return rte_flow_error_set(error, EINVAL,
1569                                           RTE_FLOW_ERROR_TYPE_ITEM,
1570                                           NULL,
1571                                           "match on port id is valid only"
1572                                           " when transfer flag is enabled");
1573         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1574                 return rte_flow_error_set(error, ENOTSUP,
1575                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1576                                           "multiple source ports are not"
1577                                           " supported");
1578         if (!mask)
1579                 mask = &switch_mask;
1580         if (mask->id != 0xffffffff)
1581                 return rte_flow_error_set(error, ENOTSUP,
1582                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1583                                            mask,
1584                                            "no support for partial mask on"
1585                                            " \"id\" field");
1586         ret = mlx5_flow_item_acceptable
1587                                 (item, (const uint8_t *)mask,
1588                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1589                                  sizeof(struct rte_flow_item_port_id),
1590                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1591         if (ret)
1592                 return ret;
1593         if (!spec)
1594                 return 0;
1595         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1596         if (!esw_priv)
1597                 return rte_flow_error_set(error, rte_errno,
1598                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1599                                           "failed to obtain E-Switch info for"
1600                                           " port");
1601         dev_priv = mlx5_dev_to_eswitch_info(dev);
1602         if (!dev_priv)
1603                 return rte_flow_error_set(error, rte_errno,
1604                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1605                                           NULL,
1606                                           "failed to obtain E-Switch info");
1607         if (esw_priv->domain_id != dev_priv->domain_id)
1608                 return rte_flow_error_set(error, EINVAL,
1609                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1610                                           "cannot match on a port from a"
1611                                           " different E-Switch");
1612         return 0;
1613 }
1614
1615 /**
1616  * Validate VLAN item.
1617  *
1618  * @param[in] item
1619  *   Item specification.
1620  * @param[in] item_flags
1621  *   Bit-fields that holds the items detected until now.
1622  * @param[in] dev
1623  *   Ethernet device flow is being created on.
1624  * @param[out] error
1625  *   Pointer to error structure.
1626  *
1627  * @return
1628  *   0 on success, a negative errno value otherwise and rte_errno is set.
1629  */
1630 static int
1631 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1632                            uint64_t item_flags,
1633                            struct rte_eth_dev *dev,
1634                            struct rte_flow_error *error)
1635 {
1636         const struct rte_flow_item_vlan *mask = item->mask;
1637         const struct rte_flow_item_vlan nic_mask = {
1638                 .tci = RTE_BE16(UINT16_MAX),
1639                 .inner_type = RTE_BE16(UINT16_MAX),
1640                 .has_more_vlan = 1,
1641         };
1642         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1643         int ret;
1644         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1645                                         MLX5_FLOW_LAYER_INNER_L4) :
1646                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1647                                         MLX5_FLOW_LAYER_OUTER_L4);
1648         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1649                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1650
1651         if (item_flags & vlanm)
1652                 return rte_flow_error_set(error, EINVAL,
1653                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1654                                           "multiple VLAN layers not supported");
1655         else if ((item_flags & l34m) != 0)
1656                 return rte_flow_error_set(error, EINVAL,
1657                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1658                                           "VLAN cannot follow L3/L4 layer");
1659         if (!mask)
1660                 mask = &rte_flow_item_vlan_mask;
1661         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1662                                         (const uint8_t *)&nic_mask,
1663                                         sizeof(struct rte_flow_item_vlan),
1664                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1665         if (ret)
1666                 return ret;
1667         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1668                 struct mlx5_priv *priv = dev->data->dev_private;
1669
1670                 if (priv->vmwa_context) {
1671                         /*
1672                          * Non-NULL context means we have a virtual machine
1673                          * and SR-IOV enabled, we have to create VLAN interface
1674                          * to make hypervisor to setup E-Switch vport
1675                          * context correctly. We avoid creating the multiple
1676                          * VLAN interfaces, so we cannot support VLAN tag mask.
1677                          */
1678                         return rte_flow_error_set(error, EINVAL,
1679                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1680                                                   item,
1681                                                   "VLAN tag mask is not"
1682                                                   " supported in virtual"
1683                                                   " environment");
1684                 }
1685         }
1686         return 0;
1687 }
1688
1689 /*
1690  * GTP flags are contained in 1 byte of the format:
1691  * -------------------------------------------
1692  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1693  * |-----------------------------------------|
1694  * | value | Version | PT | Res | E | S | PN |
1695  * -------------------------------------------
1696  *
1697  * Matching is supported only for GTP flags E, S, PN.
1698  */
1699 #define MLX5_GTP_FLAGS_MASK     0x07
1700
1701 /**
1702  * Validate GTP item.
1703  *
1704  * @param[in] dev
1705  *   Pointer to the rte_eth_dev structure.
1706  * @param[in] item
1707  *   Item specification.
1708  * @param[in] item_flags
1709  *   Bit-fields that holds the items detected until now.
1710  * @param[out] error
1711  *   Pointer to error structure.
1712  *
1713  * @return
1714  *   0 on success, a negative errno value otherwise and rte_errno is set.
1715  */
1716 static int
1717 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1718                           const struct rte_flow_item *item,
1719                           uint64_t item_flags,
1720                           struct rte_flow_error *error)
1721 {
1722         struct mlx5_priv *priv = dev->data->dev_private;
1723         const struct rte_flow_item_gtp *spec = item->spec;
1724         const struct rte_flow_item_gtp *mask = item->mask;
1725         const struct rte_flow_item_gtp nic_mask = {
1726                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1727                 .msg_type = 0xff,
1728                 .teid = RTE_BE32(0xffffffff),
1729         };
1730
1731         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1732                 return rte_flow_error_set(error, ENOTSUP,
1733                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1734                                           "GTP support is not enabled");
1735         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1736                 return rte_flow_error_set(error, ENOTSUP,
1737                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1738                                           "multiple tunnel layers not"
1739                                           " supported");
1740         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1741                 return rte_flow_error_set(error, EINVAL,
1742                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1743                                           "no outer UDP layer found");
1744         if (!mask)
1745                 mask = &rte_flow_item_gtp_mask;
1746         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1747                 return rte_flow_error_set(error, ENOTSUP,
1748                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1749                                           "Match is supported for GTP"
1750                                           " flags only");
1751         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1752                                          (const uint8_t *)&nic_mask,
1753                                          sizeof(struct rte_flow_item_gtp),
1754                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1755 }
1756
1757 /**
1758  * Validate IPV4 item.
1759  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1760  * add specific validation of fragment_offset field,
1761  *
1762  * @param[in] item
1763  *   Item specification.
1764  * @param[in] item_flags
1765  *   Bit-fields that holds the items detected until now.
1766  * @param[out] error
1767  *   Pointer to error structure.
1768  *
1769  * @return
1770  *   0 on success, a negative errno value otherwise and rte_errno is set.
1771  */
1772 static int
1773 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1774                            uint64_t item_flags,
1775                            uint64_t last_item,
1776                            uint16_t ether_type,
1777                            struct rte_flow_error *error)
1778 {
1779         int ret;
1780         const struct rte_flow_item_ipv4 *spec = item->spec;
1781         const struct rte_flow_item_ipv4 *last = item->last;
1782         const struct rte_flow_item_ipv4 *mask = item->mask;
1783         rte_be16_t fragment_offset_spec = 0;
1784         rte_be16_t fragment_offset_last = 0;
1785         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1786                 .hdr = {
1787                         .src_addr = RTE_BE32(0xffffffff),
1788                         .dst_addr = RTE_BE32(0xffffffff),
1789                         .type_of_service = 0xff,
1790                         .fragment_offset = RTE_BE16(0xffff),
1791                         .next_proto_id = 0xff,
1792                         .time_to_live = 0xff,
1793                 },
1794         };
1795
1796         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1797                                            ether_type, &nic_ipv4_mask,
1798                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1799         if (ret < 0)
1800                 return ret;
1801         if (spec && mask)
1802                 fragment_offset_spec = spec->hdr.fragment_offset &
1803                                        mask->hdr.fragment_offset;
1804         if (!fragment_offset_spec)
1805                 return 0;
1806         /*
1807          * spec and mask are valid, enforce using full mask to make sure the
1808          * complete value is used correctly.
1809          */
1810         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1811                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1812                 return rte_flow_error_set(error, EINVAL,
1813                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1814                                           item, "must use full mask for"
1815                                           " fragment_offset");
1816         /*
1817          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1818          * indicating this is 1st fragment of fragmented packet.
1819          * This is not yet supported in MLX5, return appropriate error message.
1820          */
1821         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1822                 return rte_flow_error_set(error, ENOTSUP,
1823                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1824                                           "match on first fragment not "
1825                                           "supported");
1826         if (fragment_offset_spec && !last)
1827                 return rte_flow_error_set(error, ENOTSUP,
1828                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1829                                           "specified value not supported");
1830         /* spec and last are valid, validate the specified range. */
1831         fragment_offset_last = last->hdr.fragment_offset &
1832                                mask->hdr.fragment_offset;
1833         /*
1834          * Match on fragment_offset spec 0x2001 and last 0x3fff
1835          * means MF is 1 and frag-offset is > 0.
1836          * This packet is fragment 2nd and onward, excluding last.
1837          * This is not yet supported in MLX5, return appropriate
1838          * error message.
1839          */
1840         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1841             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1842                 return rte_flow_error_set(error, ENOTSUP,
1843                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1844                                           last, "match on following "
1845                                           "fragments not supported");
1846         /*
1847          * Match on fragment_offset spec 0x0001 and last 0x1fff
1848          * means MF is 0 and frag-offset is > 0.
1849          * This packet is last fragment of fragmented packet.
1850          * This is not yet supported in MLX5, return appropriate
1851          * error message.
1852          */
1853         if (fragment_offset_spec == RTE_BE16(1) &&
1854             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1855                 return rte_flow_error_set(error, ENOTSUP,
1856                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1857                                           last, "match on last "
1858                                           "fragment not supported");
1859         /*
1860          * Match on fragment_offset spec 0x0001 and last 0x3fff
1861          * means MF and/or frag-offset is not 0.
1862          * This is a fragmented packet.
1863          * Other range values are invalid and rejected.
1864          */
1865         if (!(fragment_offset_spec == RTE_BE16(1) &&
1866               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1867                 return rte_flow_error_set(error, ENOTSUP,
1868                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1869                                           "specified range not supported");
1870         return 0;
1871 }
1872
1873 /**
1874  * Validate IPV6 fragment extension item.
1875  *
1876  * @param[in] item
1877  *   Item specification.
1878  * @param[in] item_flags
1879  *   Bit-fields that holds the items detected until now.
1880  * @param[out] error
1881  *   Pointer to error structure.
1882  *
1883  * @return
1884  *   0 on success, a negative errno value otherwise and rte_errno is set.
1885  */
1886 static int
1887 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1888                                     uint64_t item_flags,
1889                                     struct rte_flow_error *error)
1890 {
1891         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1892         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1893         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1894         rte_be16_t frag_data_spec = 0;
1895         rte_be16_t frag_data_last = 0;
1896         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1897         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1898                                       MLX5_FLOW_LAYER_OUTER_L4;
1899         int ret = 0;
1900         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1901                 .hdr = {
1902                         .next_header = 0xff,
1903                         .frag_data = RTE_BE16(0xffff),
1904                 },
1905         };
1906
1907         if (item_flags & l4m)
1908                 return rte_flow_error_set(error, EINVAL,
1909                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1910                                           "ipv6 fragment extension item cannot "
1911                                           "follow L4 item.");
1912         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1913             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1914                 return rte_flow_error_set(error, EINVAL,
1915                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1916                                           "ipv6 fragment extension item must "
1917                                           "follow ipv6 item");
1918         if (spec && mask)
1919                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1920         if (!frag_data_spec)
1921                 return 0;
1922         /*
1923          * spec and mask are valid, enforce using full mask to make sure the
1924          * complete value is used correctly.
1925          */
1926         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1927                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1928                 return rte_flow_error_set(error, EINVAL,
1929                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1930                                           item, "must use full mask for"
1931                                           " frag_data");
1932         /*
1933          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1934          * This is 1st fragment of fragmented packet.
1935          */
1936         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1937                 return rte_flow_error_set(error, ENOTSUP,
1938                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1939                                           "match on first fragment not "
1940                                           "supported");
1941         if (frag_data_spec && !last)
1942                 return rte_flow_error_set(error, EINVAL,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1944                                           "specified value not supported");
1945         ret = mlx5_flow_item_acceptable
1946                                 (item, (const uint8_t *)mask,
1947                                  (const uint8_t *)&nic_mask,
1948                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
1949                                  MLX5_ITEM_RANGE_ACCEPTED, error);
1950         if (ret)
1951                 return ret;
1952         /* spec and last are valid, validate the specified range. */
1953         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1954         /*
1955          * Match on frag_data spec 0x0009 and last 0xfff9
1956          * means M is 1 and frag-offset is > 0.
1957          * This packet is fragment 2nd and onward, excluding last.
1958          * This is not yet supported in MLX5, return appropriate
1959          * error message.
1960          */
1961         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1962                                        RTE_IPV6_EHDR_MF_MASK) &&
1963             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1964                 return rte_flow_error_set(error, ENOTSUP,
1965                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1966                                           last, "match on following "
1967                                           "fragments not supported");
1968         /*
1969          * Match on frag_data spec 0x0008 and last 0xfff8
1970          * means M is 0 and frag-offset is > 0.
1971          * This packet is last fragment of fragmented packet.
1972          * This is not yet supported in MLX5, return appropriate
1973          * error message.
1974          */
1975         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
1976             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
1977                 return rte_flow_error_set(error, ENOTSUP,
1978                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1979                                           last, "match on last "
1980                                           "fragment not supported");
1981         /* Other range values are invalid and rejected. */
1982         return rte_flow_error_set(error, EINVAL,
1983                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1984                                   "specified range not supported");
1985 }
1986
1987 /**
1988  * Validate the pop VLAN action.
1989  *
1990  * @param[in] dev
1991  *   Pointer to the rte_eth_dev structure.
1992  * @param[in] action_flags
1993  *   Holds the actions detected until now.
1994  * @param[in] action
1995  *   Pointer to the pop vlan action.
1996  * @param[in] item_flags
1997  *   The items found in this flow rule.
1998  * @param[in] attr
1999  *   Pointer to flow attributes.
2000  * @param[out] error
2001  *   Pointer to error structure.
2002  *
2003  * @return
2004  *   0 on success, a negative errno value otherwise and rte_errno is set.
2005  */
2006 static int
2007 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2008                                  uint64_t action_flags,
2009                                  const struct rte_flow_action *action,
2010                                  uint64_t item_flags,
2011                                  const struct rte_flow_attr *attr,
2012                                  struct rte_flow_error *error)
2013 {
2014         const struct mlx5_priv *priv = dev->data->dev_private;
2015
2016         (void)action;
2017         (void)attr;
2018         if (!priv->sh->pop_vlan_action)
2019                 return rte_flow_error_set(error, ENOTSUP,
2020                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2021                                           NULL,
2022                                           "pop vlan action is not supported");
2023         if (attr->egress)
2024                 return rte_flow_error_set(error, ENOTSUP,
2025                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2026                                           NULL,
2027                                           "pop vlan action not supported for "
2028                                           "egress");
2029         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2030                 return rte_flow_error_set(error, ENOTSUP,
2031                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2032                                           "no support for multiple VLAN "
2033                                           "actions");
2034         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2035         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2036             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2037                 return rte_flow_error_set(error, ENOTSUP,
2038                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2039                                           NULL,
2040                                           "cannot pop vlan after decap without "
2041                                           "match on inner vlan in the flow");
2042         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2043         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2044             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2045                 return rte_flow_error_set(error, ENOTSUP,
2046                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2047                                           NULL,
2048                                           "cannot pop vlan without a "
2049                                           "match on (outer) vlan in the flow");
2050         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2051                 return rte_flow_error_set(error, EINVAL,
2052                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2053                                           "wrong action order, port_id should "
2054                                           "be after pop VLAN action");
2055         if (!attr->transfer && priv->representor)
2056                 return rte_flow_error_set(error, ENOTSUP,
2057                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2058                                           "pop vlan action for VF representor "
2059                                           "not supported on NIC table");
2060         return 0;
2061 }
2062
2063 /**
2064  * Get VLAN default info from vlan match info.
2065  *
2066  * @param[in] items
2067  *   the list of item specifications.
2068  * @param[out] vlan
2069  *   pointer VLAN info to fill to.
2070  *
2071  * @return
2072  *   0 on success, a negative errno value otherwise and rte_errno is set.
2073  */
2074 static void
2075 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2076                                   struct rte_vlan_hdr *vlan)
2077 {
2078         const struct rte_flow_item_vlan nic_mask = {
2079                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2080                                 MLX5DV_FLOW_VLAN_VID_MASK),
2081                 .inner_type = RTE_BE16(0xffff),
2082         };
2083
2084         if (items == NULL)
2085                 return;
2086         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2087                 int type = items->type;
2088
2089                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2090                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2091                         break;
2092         }
2093         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2094                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2095                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2096
2097                 /* If VLAN item in pattern doesn't contain data, return here. */
2098                 if (!vlan_v)
2099                         return;
2100                 if (!vlan_m)
2101                         vlan_m = &nic_mask;
2102                 /* Only full match values are accepted */
2103                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2104                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2105                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2106                         vlan->vlan_tci |=
2107                                 rte_be_to_cpu_16(vlan_v->tci &
2108                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2109                 }
2110                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2111                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2112                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2113                         vlan->vlan_tci |=
2114                                 rte_be_to_cpu_16(vlan_v->tci &
2115                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2116                 }
2117                 if (vlan_m->inner_type == nic_mask.inner_type)
2118                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2119                                                            vlan_m->inner_type);
2120         }
2121 }
2122
2123 /**
2124  * Validate the push VLAN action.
2125  *
2126  * @param[in] dev
2127  *   Pointer to the rte_eth_dev structure.
2128  * @param[in] action_flags
2129  *   Holds the actions detected until now.
2130  * @param[in] item_flags
2131  *   The items found in this flow rule.
2132  * @param[in] action
2133  *   Pointer to the action structure.
2134  * @param[in] attr
2135  *   Pointer to flow attributes
2136  * @param[out] error
2137  *   Pointer to error structure.
2138  *
2139  * @return
2140  *   0 on success, a negative errno value otherwise and rte_errno is set.
2141  */
2142 static int
2143 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2144                                   uint64_t action_flags,
2145                                   const struct rte_flow_item_vlan *vlan_m,
2146                                   const struct rte_flow_action *action,
2147                                   const struct rte_flow_attr *attr,
2148                                   struct rte_flow_error *error)
2149 {
2150         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2151         const struct mlx5_priv *priv = dev->data->dev_private;
2152
2153         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2154             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2155                 return rte_flow_error_set(error, EINVAL,
2156                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2157                                           "invalid vlan ethertype");
2158         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2159                 return rte_flow_error_set(error, EINVAL,
2160                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2161                                           "wrong action order, port_id should "
2162                                           "be after push VLAN");
2163         if (!attr->transfer && priv->representor)
2164                 return rte_flow_error_set(error, ENOTSUP,
2165                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2166                                           "push vlan action for VF representor "
2167                                           "not supported on NIC table");
2168         if (vlan_m &&
2169             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2170             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2171                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2172             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2173             !(mlx5_flow_find_action
2174                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2175                 return rte_flow_error_set(error, EINVAL,
2176                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2177                                           "not full match mask on VLAN PCP and "
2178                                           "there is no of_set_vlan_pcp action, "
2179                                           "push VLAN action cannot figure out "
2180                                           "PCP value");
2181         if (vlan_m &&
2182             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2183             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2184                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2185             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2186             !(mlx5_flow_find_action
2187                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2188                 return rte_flow_error_set(error, EINVAL,
2189                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2190                                           "not full match mask on VLAN VID and "
2191                                           "there is no of_set_vlan_vid action, "
2192                                           "push VLAN action cannot figure out "
2193                                           "VID value");
2194         (void)attr;
2195         return 0;
2196 }
2197
2198 /**
2199  * Validate the set VLAN PCP.
2200  *
2201  * @param[in] action_flags
2202  *   Holds the actions detected until now.
2203  * @param[in] actions
2204  *   Pointer to the list of actions remaining in the flow rule.
2205  * @param[out] error
2206  *   Pointer to error structure.
2207  *
2208  * @return
2209  *   0 on success, a negative errno value otherwise and rte_errno is set.
2210  */
2211 static int
2212 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2213                                      const struct rte_flow_action actions[],
2214                                      struct rte_flow_error *error)
2215 {
2216         const struct rte_flow_action *action = actions;
2217         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2218
2219         if (conf->vlan_pcp > 7)
2220                 return rte_flow_error_set(error, EINVAL,
2221                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2222                                           "VLAN PCP value is too big");
2223         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2224                 return rte_flow_error_set(error, ENOTSUP,
2225                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2226                                           "set VLAN PCP action must follow "
2227                                           "the push VLAN action");
2228         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2229                 return rte_flow_error_set(error, ENOTSUP,
2230                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2231                                           "Multiple VLAN PCP modification are "
2232                                           "not supported");
2233         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2234                 return rte_flow_error_set(error, EINVAL,
2235                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2236                                           "wrong action order, port_id should "
2237                                           "be after set VLAN PCP");
2238         return 0;
2239 }
2240
2241 /**
2242  * Validate the set VLAN VID.
2243  *
2244  * @param[in] item_flags
2245  *   Holds the items detected in this rule.
2246  * @param[in] action_flags
2247  *   Holds the actions detected until now.
2248  * @param[in] actions
2249  *   Pointer to the list of actions remaining in the flow rule.
2250  * @param[out] error
2251  *   Pointer to error structure.
2252  *
2253  * @return
2254  *   0 on success, a negative errno value otherwise and rte_errno is set.
2255  */
2256 static int
2257 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2258                                      uint64_t action_flags,
2259                                      const struct rte_flow_action actions[],
2260                                      struct rte_flow_error *error)
2261 {
2262         const struct rte_flow_action *action = actions;
2263         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2264
2265         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2266                 return rte_flow_error_set(error, EINVAL,
2267                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2268                                           "VLAN VID value is too big");
2269         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2270             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2271                 return rte_flow_error_set(error, ENOTSUP,
2272                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2273                                           "set VLAN VID action must follow push"
2274                                           " VLAN action or match on VLAN item");
2275         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2276                 return rte_flow_error_set(error, ENOTSUP,
2277                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2278                                           "Multiple VLAN VID modifications are "
2279                                           "not supported");
2280         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2281                 return rte_flow_error_set(error, EINVAL,
2282                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2283                                           "wrong action order, port_id should "
2284                                           "be after set VLAN VID");
2285         return 0;
2286 }
2287
2288 /*
2289  * Validate the FLAG action.
2290  *
2291  * @param[in] dev
2292  *   Pointer to the rte_eth_dev structure.
2293  * @param[in] action_flags
2294  *   Holds the actions detected until now.
2295  * @param[in] attr
2296  *   Pointer to flow attributes
2297  * @param[out] error
2298  *   Pointer to error structure.
2299  *
2300  * @return
2301  *   0 on success, a negative errno value otherwise and rte_errno is set.
2302  */
2303 static int
2304 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2305                              uint64_t action_flags,
2306                              const struct rte_flow_attr *attr,
2307                              struct rte_flow_error *error)
2308 {
2309         struct mlx5_priv *priv = dev->data->dev_private;
2310         struct mlx5_dev_config *config = &priv->config;
2311         int ret;
2312
2313         /* Fall back if no extended metadata register support. */
2314         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2315                 return mlx5_flow_validate_action_flag(action_flags, attr,
2316                                                       error);
2317         /* Extensive metadata mode requires registers. */
2318         if (!mlx5_flow_ext_mreg_supported(dev))
2319                 return rte_flow_error_set(error, ENOTSUP,
2320                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2321                                           "no metadata registers "
2322                                           "to support flag action");
2323         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2324                 return rte_flow_error_set(error, ENOTSUP,
2325                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2326                                           "extended metadata register"
2327                                           " isn't available");
2328         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2329         if (ret < 0)
2330                 return ret;
2331         MLX5_ASSERT(ret > 0);
2332         if (action_flags & MLX5_FLOW_ACTION_MARK)
2333                 return rte_flow_error_set(error, EINVAL,
2334                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2335                                           "can't mark and flag in same flow");
2336         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2337                 return rte_flow_error_set(error, EINVAL,
2338                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2339                                           "can't have 2 flag"
2340                                           " actions in same flow");
2341         return 0;
2342 }
2343
2344 /**
2345  * Validate MARK action.
2346  *
2347  * @param[in] dev
2348  *   Pointer to the rte_eth_dev structure.
2349  * @param[in] action
2350  *   Pointer to action.
2351  * @param[in] action_flags
2352  *   Holds the actions detected until now.
2353  * @param[in] attr
2354  *   Pointer to flow attributes
2355  * @param[out] error
2356  *   Pointer to error structure.
2357  *
2358  * @return
2359  *   0 on success, a negative errno value otherwise and rte_errno is set.
2360  */
2361 static int
2362 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2363                              const struct rte_flow_action *action,
2364                              uint64_t action_flags,
2365                              const struct rte_flow_attr *attr,
2366                              struct rte_flow_error *error)
2367 {
2368         struct mlx5_priv *priv = dev->data->dev_private;
2369         struct mlx5_dev_config *config = &priv->config;
2370         const struct rte_flow_action_mark *mark = action->conf;
2371         int ret;
2372
2373         /* Fall back if no extended metadata register support. */
2374         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2375                 return mlx5_flow_validate_action_mark(action, action_flags,
2376                                                       attr, error);
2377         /* Extensive metadata mode requires registers. */
2378         if (!mlx5_flow_ext_mreg_supported(dev))
2379                 return rte_flow_error_set(error, ENOTSUP,
2380                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2381                                           "no metadata registers "
2382                                           "to support mark action");
2383         if (!priv->sh->dv_mark_mask)
2384                 return rte_flow_error_set(error, ENOTSUP,
2385                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2386                                           "extended metadata register"
2387                                           " isn't available");
2388         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2389         if (ret < 0)
2390                 return ret;
2391         MLX5_ASSERT(ret > 0);
2392         if (!mark)
2393                 return rte_flow_error_set(error, EINVAL,
2394                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2395                                           "configuration cannot be null");
2396         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2397                 return rte_flow_error_set(error, EINVAL,
2398                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2399                                           &mark->id,
2400                                           "mark id exceeds the limit");
2401         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2402                 return rte_flow_error_set(error, EINVAL,
2403                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2404                                           "can't flag and mark in same flow");
2405         if (action_flags & MLX5_FLOW_ACTION_MARK)
2406                 return rte_flow_error_set(error, EINVAL,
2407                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2408                                           "can't have 2 mark actions in same"
2409                                           " flow");
2410         return 0;
2411 }
2412
2413 /**
2414  * Validate SET_META action.
2415  *
2416  * @param[in] dev
2417  *   Pointer to the rte_eth_dev structure.
2418  * @param[in] action
2419  *   Pointer to the action structure.
2420  * @param[in] action_flags
2421  *   Holds the actions detected until now.
2422  * @param[in] attr
2423  *   Pointer to flow attributes
2424  * @param[out] error
2425  *   Pointer to error structure.
2426  *
2427  * @return
2428  *   0 on success, a negative errno value otherwise and rte_errno is set.
2429  */
2430 static int
2431 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2432                                  const struct rte_flow_action *action,
2433                                  uint64_t action_flags __rte_unused,
2434                                  const struct rte_flow_attr *attr,
2435                                  struct rte_flow_error *error)
2436 {
2437         const struct rte_flow_action_set_meta *conf;
2438         uint32_t nic_mask = UINT32_MAX;
2439         int reg;
2440
2441         if (!mlx5_flow_ext_mreg_supported(dev))
2442                 return rte_flow_error_set(error, ENOTSUP,
2443                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2444                                           "extended metadata register"
2445                                           " isn't supported");
2446         reg = flow_dv_get_metadata_reg(dev, attr, error);
2447         if (reg < 0)
2448                 return reg;
2449         if (reg != REG_A && reg != REG_B) {
2450                 struct mlx5_priv *priv = dev->data->dev_private;
2451
2452                 nic_mask = priv->sh->dv_meta_mask;
2453         }
2454         if (!(action->conf))
2455                 return rte_flow_error_set(error, EINVAL,
2456                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2457                                           "configuration cannot be null");
2458         conf = (const struct rte_flow_action_set_meta *)action->conf;
2459         if (!conf->mask)
2460                 return rte_flow_error_set(error, EINVAL,
2461                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2462                                           "zero mask doesn't have any effect");
2463         if (conf->mask & ~nic_mask)
2464                 return rte_flow_error_set(error, EINVAL,
2465                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2466                                           "meta data must be within reg C0");
2467         return 0;
2468 }
2469
2470 /**
2471  * Validate SET_TAG action.
2472  *
2473  * @param[in] dev
2474  *   Pointer to the rte_eth_dev structure.
2475  * @param[in] action
2476  *   Pointer to the action structure.
2477  * @param[in] action_flags
2478  *   Holds the actions detected until now.
2479  * @param[in] attr
2480  *   Pointer to flow attributes
2481  * @param[out] error
2482  *   Pointer to error structure.
2483  *
2484  * @return
2485  *   0 on success, a negative errno value otherwise and rte_errno is set.
2486  */
2487 static int
2488 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2489                                 const struct rte_flow_action *action,
2490                                 uint64_t action_flags,
2491                                 const struct rte_flow_attr *attr,
2492                                 struct rte_flow_error *error)
2493 {
2494         const struct rte_flow_action_set_tag *conf;
2495         const uint64_t terminal_action_flags =
2496                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2497                 MLX5_FLOW_ACTION_RSS;
2498         int ret;
2499
2500         if (!mlx5_flow_ext_mreg_supported(dev))
2501                 return rte_flow_error_set(error, ENOTSUP,
2502                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2503                                           "extensive metadata register"
2504                                           " isn't supported");
2505         if (!(action->conf))
2506                 return rte_flow_error_set(error, EINVAL,
2507                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2508                                           "configuration cannot be null");
2509         conf = (const struct rte_flow_action_set_tag *)action->conf;
2510         if (!conf->mask)
2511                 return rte_flow_error_set(error, EINVAL,
2512                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2513                                           "zero mask doesn't have any effect");
2514         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2515         if (ret < 0)
2516                 return ret;
2517         if (!attr->transfer && attr->ingress &&
2518             (action_flags & terminal_action_flags))
2519                 return rte_flow_error_set(error, EINVAL,
2520                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2521                                           "set_tag has no effect"
2522                                           " with terminal actions");
2523         return 0;
2524 }
2525
2526 /**
2527  * Validate count action.
2528  *
2529  * @param[in] dev
2530  *   Pointer to rte_eth_dev structure.
2531  * @param[out] error
2532  *   Pointer to error structure.
2533  *
2534  * @return
2535  *   0 on success, a negative errno value otherwise and rte_errno is set.
2536  */
2537 static int
2538 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2539                               struct rte_flow_error *error)
2540 {
2541         struct mlx5_priv *priv = dev->data->dev_private;
2542
2543         if (!priv->config.devx)
2544                 goto notsup_err;
2545 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2546         return 0;
2547 #endif
2548 notsup_err:
2549         return rte_flow_error_set
2550                       (error, ENOTSUP,
2551                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2552                        NULL,
2553                        "count action not supported");
2554 }
2555
2556 /**
2557  * Validate the L2 encap action.
2558  *
2559  * @param[in] dev
2560  *   Pointer to the rte_eth_dev structure.
2561  * @param[in] action_flags
2562  *   Holds the actions detected until now.
2563  * @param[in] action
2564  *   Pointer to the action structure.
2565  * @param[in] attr
2566  *   Pointer to flow attributes.
2567  * @param[out] error
2568  *   Pointer to error structure.
2569  *
2570  * @return
2571  *   0 on success, a negative errno value otherwise and rte_errno is set.
2572  */
2573 static int
2574 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2575                                  uint64_t action_flags,
2576                                  const struct rte_flow_action *action,
2577                                  const struct rte_flow_attr *attr,
2578                                  struct rte_flow_error *error)
2579 {
2580         const struct mlx5_priv *priv = dev->data->dev_private;
2581
2582         if (!(action->conf))
2583                 return rte_flow_error_set(error, EINVAL,
2584                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2585                                           "configuration cannot be null");
2586         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2587                 return rte_flow_error_set(error, EINVAL,
2588                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2589                                           "can only have a single encap action "
2590                                           "in a flow");
2591         if (!attr->transfer && priv->representor)
2592                 return rte_flow_error_set(error, ENOTSUP,
2593                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2594                                           "encap action for VF representor "
2595                                           "not supported on NIC table");
2596         return 0;
2597 }
2598
2599 /**
2600  * Validate a decap action.
2601  *
2602  * @param[in] dev
2603  *   Pointer to the rte_eth_dev structure.
2604  * @param[in] action_flags
2605  *   Holds the actions detected until now.
2606  * @param[in] attr
2607  *   Pointer to flow attributes
2608  * @param[out] error
2609  *   Pointer to error structure.
2610  *
2611  * @return
2612  *   0 on success, a negative errno value otherwise and rte_errno is set.
2613  */
2614 static int
2615 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2616                               uint64_t action_flags,
2617                               const struct rte_flow_attr *attr,
2618                               struct rte_flow_error *error)
2619 {
2620         const struct mlx5_priv *priv = dev->data->dev_private;
2621
2622         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2623             !priv->config.decap_en)
2624                 return rte_flow_error_set(error, ENOTSUP,
2625                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2626                                           "decap is not enabled");
2627         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2628                 return rte_flow_error_set(error, ENOTSUP,
2629                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2630                                           action_flags &
2631                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2632                                           "have a single decap action" : "decap "
2633                                           "after encap is not supported");
2634         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2635                 return rte_flow_error_set(error, EINVAL,
2636                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2637                                           "can't have decap action after"
2638                                           " modify action");
2639         if (attr->egress)
2640                 return rte_flow_error_set(error, ENOTSUP,
2641                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2642                                           NULL,
2643                                           "decap action not supported for "
2644                                           "egress");
2645         if (!attr->transfer && priv->representor)
2646                 return rte_flow_error_set(error, ENOTSUP,
2647                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2648                                           "decap action for VF representor "
2649                                           "not supported on NIC table");
2650         return 0;
2651 }
2652
2653 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2654
2655 /**
2656  * Validate the raw encap and decap actions.
2657  *
2658  * @param[in] dev
2659  *   Pointer to the rte_eth_dev structure.
2660  * @param[in] decap
2661  *   Pointer to the decap action.
2662  * @param[in] encap
2663  *   Pointer to the encap action.
2664  * @param[in] attr
2665  *   Pointer to flow attributes
2666  * @param[in/out] action_flags
2667  *   Holds the actions detected until now.
2668  * @param[out] actions_n
2669  *   pointer to the number of actions counter.
2670  * @param[out] error
2671  *   Pointer to error structure.
2672  *
2673  * @return
2674  *   0 on success, a negative errno value otherwise and rte_errno is set.
2675  */
2676 static int
2677 flow_dv_validate_action_raw_encap_decap
2678         (struct rte_eth_dev *dev,
2679          const struct rte_flow_action_raw_decap *decap,
2680          const struct rte_flow_action_raw_encap *encap,
2681          const struct rte_flow_attr *attr, uint64_t *action_flags,
2682          int *actions_n, struct rte_flow_error *error)
2683 {
2684         const struct mlx5_priv *priv = dev->data->dev_private;
2685         int ret;
2686
2687         if (encap && (!encap->size || !encap->data))
2688                 return rte_flow_error_set(error, EINVAL,
2689                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2690                                           "raw encap data cannot be empty");
2691         if (decap && encap) {
2692                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2693                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2694                         /* L3 encap. */
2695                         decap = NULL;
2696                 else if (encap->size <=
2697                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2698                            decap->size >
2699                            MLX5_ENCAPSULATION_DECISION_SIZE)
2700                         /* L3 decap. */
2701                         encap = NULL;
2702                 else if (encap->size >
2703                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2704                            decap->size >
2705                            MLX5_ENCAPSULATION_DECISION_SIZE)
2706                         /* 2 L2 actions: encap and decap. */
2707                         ;
2708                 else
2709                         return rte_flow_error_set(error,
2710                                 ENOTSUP,
2711                                 RTE_FLOW_ERROR_TYPE_ACTION,
2712                                 NULL, "unsupported too small "
2713                                 "raw decap and too small raw "
2714                                 "encap combination");
2715         }
2716         if (decap) {
2717                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2718                                                     error);
2719                 if (ret < 0)
2720                         return ret;
2721                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2722                 ++(*actions_n);
2723         }
2724         if (encap) {
2725                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2726                         return rte_flow_error_set(error, ENOTSUP,
2727                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2728                                                   NULL,
2729                                                   "small raw encap size");
2730                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2731                         return rte_flow_error_set(error, EINVAL,
2732                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2733                                                   NULL,
2734                                                   "more than one encap action");
2735                 if (!attr->transfer && priv->representor)
2736                         return rte_flow_error_set
2737                                         (error, ENOTSUP,
2738                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2739                                          "encap action for VF representor "
2740                                          "not supported on NIC table");
2741                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2742                 ++(*actions_n);
2743         }
2744         return 0;
2745 }
2746
2747 /**
2748  * Match encap_decap resource.
2749  *
2750  * @param list
2751  *   Pointer to the hash list.
2752  * @param entry
2753  *   Pointer to exist resource entry object.
2754  * @param key
2755  *   Key of the new entry.
2756  * @param ctx_cb
2757  *   Pointer to new encap_decap resource.
2758  *
2759  * @return
2760  *   0 on matching, none-zero otherwise.
2761  */
2762 int
2763 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2764                              struct mlx5_hlist_entry *entry,
2765                              uint64_t key __rte_unused, void *cb_ctx)
2766 {
2767         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2768         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2769         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2770
2771         cache_resource = container_of(entry,
2772                                       struct mlx5_flow_dv_encap_decap_resource,
2773                                       entry);
2774         if (resource->entry.key == cache_resource->entry.key &&
2775             resource->reformat_type == cache_resource->reformat_type &&
2776             resource->ft_type == cache_resource->ft_type &&
2777             resource->flags == cache_resource->flags &&
2778             resource->size == cache_resource->size &&
2779             !memcmp((const void *)resource->buf,
2780                     (const void *)cache_resource->buf,
2781                     resource->size))
2782                 return 0;
2783         return -1;
2784 }
2785
2786 /**
2787  * Allocate encap_decap resource.
2788  *
2789  * @param list
2790  *   Pointer to the hash list.
2791  * @param entry
2792  *   Pointer to exist resource entry object.
2793  * @param ctx_cb
2794  *   Pointer to new encap_decap resource.
2795  *
2796  * @return
2797  *   0 on matching, none-zero otherwise.
2798  */
2799 struct mlx5_hlist_entry *
2800 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2801                               uint64_t key __rte_unused,
2802                               void *cb_ctx)
2803 {
2804         struct mlx5_dev_ctx_shared *sh = list->ctx;
2805         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2806         struct mlx5dv_dr_domain *domain;
2807         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2808         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2809         uint32_t idx;
2810         int ret;
2811
2812         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2813                 domain = sh->fdb_domain;
2814         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2815                 domain = sh->rx_domain;
2816         else
2817                 domain = sh->tx_domain;
2818         /* Register new encap/decap resource. */
2819         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2820                                        &idx);
2821         if (!cache_resource) {
2822                 rte_flow_error_set(ctx->error, ENOMEM,
2823                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2824                                    "cannot allocate resource memory");
2825                 return NULL;
2826         }
2827         *cache_resource = *resource;
2828         cache_resource->idx = idx;
2829         ret = mlx5_flow_os_create_flow_action_packet_reformat
2830                                         (sh->ctx, domain, cache_resource,
2831                                          &cache_resource->action);
2832         if (ret) {
2833                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2834                 rte_flow_error_set(ctx->error, ENOMEM,
2835                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2836                                    NULL, "cannot create action");
2837                 return NULL;
2838         }
2839
2840         return &cache_resource->entry;
2841 }
2842
2843 /**
2844  * Find existing encap/decap resource or create and register a new one.
2845  *
2846  * @param[in, out] dev
2847  *   Pointer to rte_eth_dev structure.
2848  * @param[in, out] resource
2849  *   Pointer to encap/decap resource.
2850  * @parm[in, out] dev_flow
2851  *   Pointer to the dev_flow.
2852  * @param[out] error
2853  *   pointer to error structure.
2854  *
2855  * @return
2856  *   0 on success otherwise -errno and errno is set.
2857  */
2858 static int
2859 flow_dv_encap_decap_resource_register
2860                         (struct rte_eth_dev *dev,
2861                          struct mlx5_flow_dv_encap_decap_resource *resource,
2862                          struct mlx5_flow *dev_flow,
2863                          struct rte_flow_error *error)
2864 {
2865         struct mlx5_priv *priv = dev->data->dev_private;
2866         struct mlx5_dev_ctx_shared *sh = priv->sh;
2867         struct mlx5_hlist_entry *entry;
2868         union mlx5_flow_encap_decap_key encap_decap_key = {
2869                 {
2870                         .ft_type = resource->ft_type,
2871                         .refmt_type = resource->reformat_type,
2872                         .buf_size = resource->size,
2873                         .table_level = !!dev_flow->dv.group,
2874                         .cksum = 0,
2875                 }
2876         };
2877         struct mlx5_flow_cb_ctx ctx = {
2878                 .error = error,
2879                 .data = resource,
2880         };
2881
2882         resource->flags = dev_flow->dv.group ? 0 : 1;
2883         encap_decap_key.cksum = __rte_raw_cksum(resource->buf,
2884                                                 resource->size, 0);
2885         resource->entry.key = encap_decap_key.v64;
2886         entry = mlx5_hlist_register(sh->encaps_decaps, resource->entry.key,
2887                                     &ctx);
2888         if (!entry)
2889                 return -rte_errno;
2890         resource = container_of(entry, typeof(*resource), entry);
2891         dev_flow->dv.encap_decap = resource;
2892         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2893         return 0;
2894 }
2895
2896 /**
2897  * Find existing table jump resource or create and register a new one.
2898  *
2899  * @param[in, out] dev
2900  *   Pointer to rte_eth_dev structure.
2901  * @param[in, out] tbl
2902  *   Pointer to flow table resource.
2903  * @parm[in, out] dev_flow
2904  *   Pointer to the dev_flow.
2905  * @param[out] error
2906  *   pointer to error structure.
2907  *
2908  * @return
2909  *   0 on success otherwise -errno and errno is set.
2910  */
2911 static int
2912 flow_dv_jump_tbl_resource_register
2913                         (struct rte_eth_dev *dev __rte_unused,
2914                          struct mlx5_flow_tbl_resource *tbl,
2915                          struct mlx5_flow *dev_flow,
2916                          struct rte_flow_error *error __rte_unused)
2917 {
2918         struct mlx5_flow_tbl_data_entry *tbl_data =
2919                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2920
2921         MLX5_ASSERT(tbl);
2922         MLX5_ASSERT(tbl_data->jump.action);
2923         dev_flow->handle->rix_jump = tbl_data->idx;
2924         dev_flow->dv.jump = &tbl_data->jump;
2925         return 0;
2926 }
2927
2928 int
2929 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
2930                          struct mlx5_cache_entry *entry, void *cb_ctx)
2931 {
2932         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2933         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2934         struct mlx5_flow_dv_port_id_action_resource *res =
2935                         container_of(entry, typeof(*res), entry);
2936
2937         return ref->port_id != res->port_id;
2938 }
2939
2940 struct mlx5_cache_entry *
2941 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
2942                           struct mlx5_cache_entry *entry __rte_unused,
2943                           void *cb_ctx)
2944 {
2945         struct mlx5_dev_ctx_shared *sh = list->ctx;
2946         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2947         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2948         struct mlx5_flow_dv_port_id_action_resource *cache;
2949         uint32_t idx;
2950         int ret;
2951
2952         /* Register new port id action resource. */
2953         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
2954         if (!cache) {
2955                 rte_flow_error_set(ctx->error, ENOMEM,
2956                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2957                                    "cannot allocate port_id action cache memory");
2958                 return NULL;
2959         }
2960         *cache = *ref;
2961         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
2962                                                         ref->port_id,
2963                                                         &cache->action);
2964         if (ret) {
2965                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
2966                 rte_flow_error_set(ctx->error, ENOMEM,
2967                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2968                                    "cannot create action");
2969                 return NULL;
2970         }
2971         return &cache->entry;
2972 }
2973
2974 /**
2975  * Find existing table port ID resource or create and register a new one.
2976  *
2977  * @param[in, out] dev
2978  *   Pointer to rte_eth_dev structure.
2979  * @param[in, out] resource
2980  *   Pointer to port ID action resource.
2981  * @parm[in, out] dev_flow
2982  *   Pointer to the dev_flow.
2983  * @param[out] error
2984  *   pointer to error structure.
2985  *
2986  * @return
2987  *   0 on success otherwise -errno and errno is set.
2988  */
2989 static int
2990 flow_dv_port_id_action_resource_register
2991                         (struct rte_eth_dev *dev,
2992                          struct mlx5_flow_dv_port_id_action_resource *resource,
2993                          struct mlx5_flow *dev_flow,
2994                          struct rte_flow_error *error)
2995 {
2996         struct mlx5_priv *priv = dev->data->dev_private;
2997         struct mlx5_cache_entry *entry;
2998         struct mlx5_flow_dv_port_id_action_resource *cache;
2999         struct mlx5_flow_cb_ctx ctx = {
3000                 .error = error,
3001                 .data = resource,
3002         };
3003
3004         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3005         if (!entry)
3006                 return -rte_errno;
3007         cache = container_of(entry, typeof(*cache), entry);
3008         dev_flow->dv.port_id_action = cache;
3009         dev_flow->handle->rix_port_id_action = cache->idx;
3010         return 0;
3011 }
3012
3013 int
3014 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3015                          struct mlx5_cache_entry *entry, void *cb_ctx)
3016 {
3017         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3018         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3019         struct mlx5_flow_dv_push_vlan_action_resource *res =
3020                         container_of(entry, typeof(*res), entry);
3021
3022         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3023 }
3024
3025 struct mlx5_cache_entry *
3026 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3027                           struct mlx5_cache_entry *entry __rte_unused,
3028                           void *cb_ctx)
3029 {
3030         struct mlx5_dev_ctx_shared *sh = list->ctx;
3031         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3032         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3033         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3034         struct mlx5dv_dr_domain *domain;
3035         uint32_t idx;
3036         int ret;
3037
3038         /* Register new port id action resource. */
3039         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3040         if (!cache) {
3041                 rte_flow_error_set(ctx->error, ENOMEM,
3042                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3043                                    "cannot allocate push_vlan action cache memory");
3044                 return NULL;
3045         }
3046         *cache = *ref;
3047         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3048                 domain = sh->fdb_domain;
3049         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3050                 domain = sh->rx_domain;
3051         else
3052                 domain = sh->tx_domain;
3053         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3054                                                         &cache->action);
3055         if (ret) {
3056                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3057                 rte_flow_error_set(ctx->error, ENOMEM,
3058                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3059                                    "cannot create push vlan action");
3060                 return NULL;
3061         }
3062         return &cache->entry;
3063 }
3064
3065 /**
3066  * Find existing push vlan resource or create and register a new one.
3067  *
3068  * @param [in, out] dev
3069  *   Pointer to rte_eth_dev structure.
3070  * @param[in, out] resource
3071  *   Pointer to port ID action resource.
3072  * @parm[in, out] dev_flow
3073  *   Pointer to the dev_flow.
3074  * @param[out] error
3075  *   pointer to error structure.
3076  *
3077  * @return
3078  *   0 on success otherwise -errno and errno is set.
3079  */
3080 static int
3081 flow_dv_push_vlan_action_resource_register
3082                        (struct rte_eth_dev *dev,
3083                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3084                         struct mlx5_flow *dev_flow,
3085                         struct rte_flow_error *error)
3086 {
3087         struct mlx5_priv *priv = dev->data->dev_private;
3088         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3089         struct mlx5_cache_entry *entry;
3090         struct mlx5_flow_cb_ctx ctx = {
3091                 .error = error,
3092                 .data = resource,
3093         };
3094
3095         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3096         if (!entry)
3097                 return -rte_errno;
3098         cache = container_of(entry, typeof(*cache), entry);
3099
3100         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3101         dev_flow->dv.push_vlan_res = cache;
3102         return 0;
3103 }
3104
3105 /**
3106  * Get the size of specific rte_flow_item_type hdr size
3107  *
3108  * @param[in] item_type
3109  *   Tested rte_flow_item_type.
3110  *
3111  * @return
3112  *   sizeof struct item_type, 0 if void or irrelevant.
3113  */
3114 static size_t
3115 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3116 {
3117         size_t retval;
3118
3119         switch (item_type) {
3120         case RTE_FLOW_ITEM_TYPE_ETH:
3121                 retval = sizeof(struct rte_ether_hdr);
3122                 break;
3123         case RTE_FLOW_ITEM_TYPE_VLAN:
3124                 retval = sizeof(struct rte_vlan_hdr);
3125                 break;
3126         case RTE_FLOW_ITEM_TYPE_IPV4:
3127                 retval = sizeof(struct rte_ipv4_hdr);
3128                 break;
3129         case RTE_FLOW_ITEM_TYPE_IPV6:
3130                 retval = sizeof(struct rte_ipv6_hdr);
3131                 break;
3132         case RTE_FLOW_ITEM_TYPE_UDP:
3133                 retval = sizeof(struct rte_udp_hdr);
3134                 break;
3135         case RTE_FLOW_ITEM_TYPE_TCP:
3136                 retval = sizeof(struct rte_tcp_hdr);
3137                 break;
3138         case RTE_FLOW_ITEM_TYPE_VXLAN:
3139         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3140                 retval = sizeof(struct rte_vxlan_hdr);
3141                 break;
3142         case RTE_FLOW_ITEM_TYPE_GRE:
3143         case RTE_FLOW_ITEM_TYPE_NVGRE:
3144                 retval = sizeof(struct rte_gre_hdr);
3145                 break;
3146         case RTE_FLOW_ITEM_TYPE_MPLS:
3147                 retval = sizeof(struct rte_mpls_hdr);
3148                 break;
3149         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3150         default:
3151                 retval = 0;
3152                 break;
3153         }
3154         return retval;
3155 }
3156
3157 #define MLX5_ENCAP_IPV4_VERSION         0x40
3158 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3159 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3160 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3161 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3162 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3163 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3164
3165 /**
3166  * Convert the encap action data from list of rte_flow_item to raw buffer
3167  *
3168  * @param[in] items
3169  *   Pointer to rte_flow_item objects list.
3170  * @param[out] buf
3171  *   Pointer to the output buffer.
3172  * @param[out] size
3173  *   Pointer to the output buffer size.
3174  * @param[out] error
3175  *   Pointer to the error structure.
3176  *
3177  * @return
3178  *   0 on success, a negative errno value otherwise and rte_errno is set.
3179  */
3180 static int
3181 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3182                            size_t *size, struct rte_flow_error *error)
3183 {
3184         struct rte_ether_hdr *eth = NULL;
3185         struct rte_vlan_hdr *vlan = NULL;
3186         struct rte_ipv4_hdr *ipv4 = NULL;
3187         struct rte_ipv6_hdr *ipv6 = NULL;
3188         struct rte_udp_hdr *udp = NULL;
3189         struct rte_vxlan_hdr *vxlan = NULL;
3190         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3191         struct rte_gre_hdr *gre = NULL;
3192         size_t len;
3193         size_t temp_size = 0;
3194
3195         if (!items)
3196                 return rte_flow_error_set(error, EINVAL,
3197                                           RTE_FLOW_ERROR_TYPE_ACTION,
3198                                           NULL, "invalid empty data");
3199         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3200                 len = flow_dv_get_item_hdr_len(items->type);
3201                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3202                         return rte_flow_error_set(error, EINVAL,
3203                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3204                                                   (void *)items->type,
3205                                                   "items total size is too big"
3206                                                   " for encap action");
3207                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3208                 switch (items->type) {
3209                 case RTE_FLOW_ITEM_TYPE_ETH:
3210                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3211                         break;
3212                 case RTE_FLOW_ITEM_TYPE_VLAN:
3213                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3214                         if (!eth)
3215                                 return rte_flow_error_set(error, EINVAL,
3216                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3217                                                 (void *)items->type,
3218                                                 "eth header not found");
3219                         if (!eth->ether_type)
3220                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3221                         break;
3222                 case RTE_FLOW_ITEM_TYPE_IPV4:
3223                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3224                         if (!vlan && !eth)
3225                                 return rte_flow_error_set(error, EINVAL,
3226                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3227                                                 (void *)items->type,
3228                                                 "neither eth nor vlan"
3229                                                 " header found");
3230                         if (vlan && !vlan->eth_proto)
3231                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3232                         else if (eth && !eth->ether_type)
3233                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3234                         if (!ipv4->version_ihl)
3235                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3236                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3237                         if (!ipv4->time_to_live)
3238                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3239                         break;
3240                 case RTE_FLOW_ITEM_TYPE_IPV6:
3241                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3242                         if (!vlan && !eth)
3243                                 return rte_flow_error_set(error, EINVAL,
3244                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3245                                                 (void *)items->type,
3246                                                 "neither eth nor vlan"
3247                                                 " header found");
3248                         if (vlan && !vlan->eth_proto)
3249                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3250                         else if (eth && !eth->ether_type)
3251                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3252                         if (!ipv6->vtc_flow)
3253                                 ipv6->vtc_flow =
3254                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3255                         if (!ipv6->hop_limits)
3256                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3257                         break;
3258                 case RTE_FLOW_ITEM_TYPE_UDP:
3259                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3260                         if (!ipv4 && !ipv6)
3261                                 return rte_flow_error_set(error, EINVAL,
3262                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3263                                                 (void *)items->type,
3264                                                 "ip header not found");
3265                         if (ipv4 && !ipv4->next_proto_id)
3266                                 ipv4->next_proto_id = IPPROTO_UDP;
3267                         else if (ipv6 && !ipv6->proto)
3268                                 ipv6->proto = IPPROTO_UDP;
3269                         break;
3270                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3271                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3272                         if (!udp)
3273                                 return rte_flow_error_set(error, EINVAL,
3274                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3275                                                 (void *)items->type,
3276                                                 "udp header not found");
3277                         if (!udp->dst_port)
3278                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3279                         if (!vxlan->vx_flags)
3280                                 vxlan->vx_flags =
3281                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3282                         break;
3283                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3284                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3285                         if (!udp)
3286                                 return rte_flow_error_set(error, EINVAL,
3287                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3288                                                 (void *)items->type,
3289                                                 "udp header not found");
3290                         if (!vxlan_gpe->proto)
3291                                 return rte_flow_error_set(error, EINVAL,
3292                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3293                                                 (void *)items->type,
3294                                                 "next protocol not found");
3295                         if (!udp->dst_port)
3296                                 udp->dst_port =
3297                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3298                         if (!vxlan_gpe->vx_flags)
3299                                 vxlan_gpe->vx_flags =
3300                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3301                         break;
3302                 case RTE_FLOW_ITEM_TYPE_GRE:
3303                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3304                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3305                         if (!gre->proto)
3306                                 return rte_flow_error_set(error, EINVAL,
3307                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3308                                                 (void *)items->type,
3309                                                 "next protocol not found");
3310                         if (!ipv4 && !ipv6)
3311                                 return rte_flow_error_set(error, EINVAL,
3312                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3313                                                 (void *)items->type,
3314                                                 "ip header not found");
3315                         if (ipv4 && !ipv4->next_proto_id)
3316                                 ipv4->next_proto_id = IPPROTO_GRE;
3317                         else if (ipv6 && !ipv6->proto)
3318                                 ipv6->proto = IPPROTO_GRE;
3319                         break;
3320                 case RTE_FLOW_ITEM_TYPE_VOID:
3321                         break;
3322                 default:
3323                         return rte_flow_error_set(error, EINVAL,
3324                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3325                                                   (void *)items->type,
3326                                                   "unsupported item type");
3327                         break;
3328                 }
3329                 temp_size += len;
3330         }
3331         *size = temp_size;
3332         return 0;
3333 }
3334
3335 static int
3336 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3337 {
3338         struct rte_ether_hdr *eth = NULL;
3339         struct rte_vlan_hdr *vlan = NULL;
3340         struct rte_ipv6_hdr *ipv6 = NULL;
3341         struct rte_udp_hdr *udp = NULL;
3342         char *next_hdr;
3343         uint16_t proto;
3344
3345         eth = (struct rte_ether_hdr *)data;
3346         next_hdr = (char *)(eth + 1);
3347         proto = RTE_BE16(eth->ether_type);
3348
3349         /* VLAN skipping */
3350         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3351                 vlan = (struct rte_vlan_hdr *)next_hdr;
3352                 proto = RTE_BE16(vlan->eth_proto);
3353                 next_hdr += sizeof(struct rte_vlan_hdr);
3354         }
3355
3356         /* HW calculates IPv4 csum. no need to proceed */
3357         if (proto == RTE_ETHER_TYPE_IPV4)
3358                 return 0;
3359
3360         /* non IPv4/IPv6 header. not supported */
3361         if (proto != RTE_ETHER_TYPE_IPV6) {
3362                 return rte_flow_error_set(error, ENOTSUP,
3363                                           RTE_FLOW_ERROR_TYPE_ACTION,
3364                                           NULL, "Cannot offload non IPv4/IPv6");
3365         }
3366
3367         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3368
3369         /* ignore non UDP */
3370         if (ipv6->proto != IPPROTO_UDP)
3371                 return 0;
3372
3373         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3374         udp->dgram_cksum = 0;
3375
3376         return 0;
3377 }
3378
3379 /**
3380  * Convert L2 encap action to DV specification.
3381  *
3382  * @param[in] dev
3383  *   Pointer to rte_eth_dev structure.
3384  * @param[in] action
3385  *   Pointer to action structure.
3386  * @param[in, out] dev_flow
3387  *   Pointer to the mlx5_flow.
3388  * @param[in] transfer
3389  *   Mark if the flow is E-Switch flow.
3390  * @param[out] error
3391  *   Pointer to the error structure.
3392  *
3393  * @return
3394  *   0 on success, a negative errno value otherwise and rte_errno is set.
3395  */
3396 static int
3397 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3398                                const struct rte_flow_action *action,
3399                                struct mlx5_flow *dev_flow,
3400                                uint8_t transfer,
3401                                struct rte_flow_error *error)
3402 {
3403         const struct rte_flow_item *encap_data;
3404         const struct rte_flow_action_raw_encap *raw_encap_data;
3405         struct mlx5_flow_dv_encap_decap_resource res = {
3406                 .reformat_type =
3407                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3408                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3409                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3410         };
3411
3412         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3413                 raw_encap_data =
3414                         (const struct rte_flow_action_raw_encap *)action->conf;
3415                 res.size = raw_encap_data->size;
3416                 memcpy(res.buf, raw_encap_data->data, res.size);
3417         } else {
3418                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3419                         encap_data =
3420                                 ((const struct rte_flow_action_vxlan_encap *)
3421                                                 action->conf)->definition;
3422                 else
3423                         encap_data =
3424                                 ((const struct rte_flow_action_nvgre_encap *)
3425                                                 action->conf)->definition;
3426                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3427                                                &res.size, error))
3428                         return -rte_errno;
3429         }
3430         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3431                 return -rte_errno;
3432         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3433                 return rte_flow_error_set(error, EINVAL,
3434                                           RTE_FLOW_ERROR_TYPE_ACTION,
3435                                           NULL, "can't create L2 encap action");
3436         return 0;
3437 }
3438
3439 /**
3440  * Convert L2 decap action to DV specification.
3441  *
3442  * @param[in] dev
3443  *   Pointer to rte_eth_dev structure.
3444  * @param[in, out] dev_flow
3445  *   Pointer to the mlx5_flow.
3446  * @param[in] transfer
3447  *   Mark if the flow is E-Switch flow.
3448  * @param[out] error
3449  *   Pointer to the error structure.
3450  *
3451  * @return
3452  *   0 on success, a negative errno value otherwise and rte_errno is set.
3453  */
3454 static int
3455 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3456                                struct mlx5_flow *dev_flow,
3457                                uint8_t transfer,
3458                                struct rte_flow_error *error)
3459 {
3460         struct mlx5_flow_dv_encap_decap_resource res = {
3461                 .size = 0,
3462                 .reformat_type =
3463                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3464                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3465                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3466         };
3467
3468         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3469                 return rte_flow_error_set(error, EINVAL,
3470                                           RTE_FLOW_ERROR_TYPE_ACTION,
3471                                           NULL, "can't create L2 decap action");
3472         return 0;
3473 }
3474
3475 /**
3476  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3477  *
3478  * @param[in] dev
3479  *   Pointer to rte_eth_dev structure.
3480  * @param[in] action
3481  *   Pointer to action structure.
3482  * @param[in, out] dev_flow
3483  *   Pointer to the mlx5_flow.
3484  * @param[in] attr
3485  *   Pointer to the flow attributes.
3486  * @param[out] error
3487  *   Pointer to the error structure.
3488  *
3489  * @return
3490  *   0 on success, a negative errno value otherwise and rte_errno is set.
3491  */
3492 static int
3493 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3494                                 const struct rte_flow_action *action,
3495                                 struct mlx5_flow *dev_flow,
3496                                 const struct rte_flow_attr *attr,
3497                                 struct rte_flow_error *error)
3498 {
3499         const struct rte_flow_action_raw_encap *encap_data;
3500         struct mlx5_flow_dv_encap_decap_resource res;
3501
3502         memset(&res, 0, sizeof(res));
3503         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3504         res.size = encap_data->size;
3505         memcpy(res.buf, encap_data->data, res.size);
3506         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3507                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3508                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3509         if (attr->transfer)
3510                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3511         else
3512                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3513                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3514         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3515                 return rte_flow_error_set(error, EINVAL,
3516                                           RTE_FLOW_ERROR_TYPE_ACTION,
3517                                           NULL, "can't create encap action");
3518         return 0;
3519 }
3520
3521 /**
3522  * Create action push VLAN.
3523  *
3524  * @param[in] dev
3525  *   Pointer to rte_eth_dev structure.
3526  * @param[in] attr
3527  *   Pointer to the flow attributes.
3528  * @param[in] vlan
3529  *   Pointer to the vlan to push to the Ethernet header.
3530  * @param[in, out] dev_flow
3531  *   Pointer to the mlx5_flow.
3532  * @param[out] error
3533  *   Pointer to the error structure.
3534  *
3535  * @return
3536  *   0 on success, a negative errno value otherwise and rte_errno is set.
3537  */
3538 static int
3539 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3540                                 const struct rte_flow_attr *attr,
3541                                 const struct rte_vlan_hdr *vlan,
3542                                 struct mlx5_flow *dev_flow,
3543                                 struct rte_flow_error *error)
3544 {
3545         struct mlx5_flow_dv_push_vlan_action_resource res;
3546
3547         memset(&res, 0, sizeof(res));
3548         res.vlan_tag =
3549                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3550                                  vlan->vlan_tci);
3551         if (attr->transfer)
3552                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3553         else
3554                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3555                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3556         return flow_dv_push_vlan_action_resource_register
3557                                             (dev, &res, dev_flow, error);
3558 }
3559
3560 static int fdb_mirror;
3561
3562 /**
3563  * Validate the modify-header actions.
3564  *
3565  * @param[in] action_flags
3566  *   Holds the actions detected until now.
3567  * @param[in] action
3568  *   Pointer to the modify action.
3569  * @param[out] error
3570  *   Pointer to error structure.
3571  *
3572  * @return
3573  *   0 on success, a negative errno value otherwise and rte_errno is set.
3574  */
3575 static int
3576 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3577                                    const struct rte_flow_action *action,
3578                                    struct rte_flow_error *error)
3579 {
3580         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3581                 return rte_flow_error_set(error, EINVAL,
3582                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3583                                           NULL, "action configuration not set");
3584         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3585                 return rte_flow_error_set(error, EINVAL,
3586                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3587                                           "can't have encap action before"
3588                                           " modify action");
3589         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3590                 return rte_flow_error_set(error, EINVAL,
3591                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3592                                           "can't support sample action before"
3593                                           " modify action for E-Switch"
3594                                           " mirroring");
3595         return 0;
3596 }
3597
3598 /**
3599  * Validate the modify-header MAC address actions.
3600  *
3601  * @param[in] action_flags
3602  *   Holds the actions detected until now.
3603  * @param[in] action
3604  *   Pointer to the modify action.
3605  * @param[in] item_flags
3606  *   Holds the items detected.
3607  * @param[out] error
3608  *   Pointer to error structure.
3609  *
3610  * @return
3611  *   0 on success, a negative errno value otherwise and rte_errno is set.
3612  */
3613 static int
3614 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3615                                    const struct rte_flow_action *action,
3616                                    const uint64_t item_flags,
3617                                    struct rte_flow_error *error)
3618 {
3619         int ret = 0;
3620
3621         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3622         if (!ret) {
3623                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3624                         return rte_flow_error_set(error, EINVAL,
3625                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3626                                                   NULL,
3627                                                   "no L2 item in pattern");
3628         }
3629         return ret;
3630 }
3631
3632 /**
3633  * Validate the modify-header IPv4 address actions.
3634  *
3635  * @param[in] action_flags
3636  *   Holds the actions detected until now.
3637  * @param[in] action
3638  *   Pointer to the modify action.
3639  * @param[in] item_flags
3640  *   Holds the items detected.
3641  * @param[out] error
3642  *   Pointer to error structure.
3643  *
3644  * @return
3645  *   0 on success, a negative errno value otherwise and rte_errno is set.
3646  */
3647 static int
3648 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3649                                     const struct rte_flow_action *action,
3650                                     const uint64_t item_flags,
3651                                     struct rte_flow_error *error)
3652 {
3653         int ret = 0;
3654         uint64_t layer;
3655
3656         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3657         if (!ret) {
3658                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3659                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3660                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3661                 if (!(item_flags & layer))
3662                         return rte_flow_error_set(error, EINVAL,
3663                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3664                                                   NULL,
3665                                                   "no ipv4 item in pattern");
3666         }
3667         return ret;
3668 }
3669
3670 /**
3671  * Validate the modify-header IPv6 address actions.
3672  *
3673  * @param[in] action_flags
3674  *   Holds the actions detected until now.
3675  * @param[in] action
3676  *   Pointer to the modify action.
3677  * @param[in] item_flags
3678  *   Holds the items detected.
3679  * @param[out] error
3680  *   Pointer to error structure.
3681  *
3682  * @return
3683  *   0 on success, a negative errno value otherwise and rte_errno is set.
3684  */
3685 static int
3686 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3687                                     const struct rte_flow_action *action,
3688                                     const uint64_t item_flags,
3689                                     struct rte_flow_error *error)
3690 {
3691         int ret = 0;
3692         uint64_t layer;
3693
3694         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3695         if (!ret) {
3696                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3697                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3698                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3699                 if (!(item_flags & layer))
3700                         return rte_flow_error_set(error, EINVAL,
3701                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3702                                                   NULL,
3703                                                   "no ipv6 item in pattern");
3704         }
3705         return ret;
3706 }
3707
3708 /**
3709  * Validate the modify-header TP actions.
3710  *
3711  * @param[in] action_flags
3712  *   Holds the actions detected until now.
3713  * @param[in] action
3714  *   Pointer to the modify action.
3715  * @param[in] item_flags
3716  *   Holds the items detected.
3717  * @param[out] error
3718  *   Pointer to error structure.
3719  *
3720  * @return
3721  *   0 on success, a negative errno value otherwise and rte_errno is set.
3722  */
3723 static int
3724 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3725                                   const struct rte_flow_action *action,
3726                                   const uint64_t item_flags,
3727                                   struct rte_flow_error *error)
3728 {
3729         int ret = 0;
3730         uint64_t layer;
3731
3732         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3733         if (!ret) {
3734                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3735                                  MLX5_FLOW_LAYER_INNER_L4 :
3736                                  MLX5_FLOW_LAYER_OUTER_L4;
3737                 if (!(item_flags & layer))
3738                         return rte_flow_error_set(error, EINVAL,
3739                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3740                                                   NULL, "no transport layer "
3741                                                   "in pattern");
3742         }
3743         return ret;
3744 }
3745
3746 /**
3747  * Validate the modify-header actions of increment/decrement
3748  * TCP Sequence-number.
3749  *
3750  * @param[in] action_flags
3751  *   Holds the actions detected until now.
3752  * @param[in] action
3753  *   Pointer to the modify action.
3754  * @param[in] item_flags
3755  *   Holds the items detected.
3756  * @param[out] error
3757  *   Pointer to error structure.
3758  *
3759  * @return
3760  *   0 on success, a negative errno value otherwise and rte_errno is set.
3761  */
3762 static int
3763 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3764                                        const struct rte_flow_action *action,
3765                                        const uint64_t item_flags,
3766                                        struct rte_flow_error *error)
3767 {
3768         int ret = 0;
3769         uint64_t layer;
3770
3771         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3772         if (!ret) {
3773                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3774                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3775                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3776                 if (!(item_flags & layer))
3777                         return rte_flow_error_set(error, EINVAL,
3778                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3779                                                   NULL, "no TCP item in"
3780                                                   " pattern");
3781                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3782                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3783                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3784                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3785                         return rte_flow_error_set(error, EINVAL,
3786                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3787                                                   NULL,
3788                                                   "cannot decrease and increase"
3789                                                   " TCP sequence number"
3790                                                   " at the same time");
3791         }
3792         return ret;
3793 }
3794
3795 /**
3796  * Validate the modify-header actions of increment/decrement
3797  * TCP Acknowledgment number.
3798  *
3799  * @param[in] action_flags
3800  *   Holds the actions detected until now.
3801  * @param[in] action
3802  *   Pointer to the modify action.
3803  * @param[in] item_flags
3804  *   Holds the items detected.
3805  * @param[out] error
3806  *   Pointer to error structure.
3807  *
3808  * @return
3809  *   0 on success, a negative errno value otherwise and rte_errno is set.
3810  */
3811 static int
3812 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3813                                        const struct rte_flow_action *action,
3814                                        const uint64_t item_flags,
3815                                        struct rte_flow_error *error)
3816 {
3817         int ret = 0;
3818         uint64_t layer;
3819
3820         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3821         if (!ret) {
3822                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3823                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3824                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3825                 if (!(item_flags & layer))
3826                         return rte_flow_error_set(error, EINVAL,
3827                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3828                                                   NULL, "no TCP item in"
3829                                                   " pattern");
3830                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3831                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3832                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3833                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3834                         return rte_flow_error_set(error, EINVAL,
3835                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3836                                                   NULL,
3837                                                   "cannot decrease and increase"
3838                                                   " TCP acknowledgment number"
3839                                                   " at the same time");
3840         }
3841         return ret;
3842 }
3843
3844 /**
3845  * Validate the modify-header TTL actions.
3846  *
3847  * @param[in] action_flags
3848  *   Holds the actions detected until now.
3849  * @param[in] action
3850  *   Pointer to the modify action.
3851  * @param[in] item_flags
3852  *   Holds the items detected.
3853  * @param[out] error
3854  *   Pointer to error structure.
3855  *
3856  * @return
3857  *   0 on success, a negative errno value otherwise and rte_errno is set.
3858  */
3859 static int
3860 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3861                                    const struct rte_flow_action *action,
3862                                    const uint64_t item_flags,
3863                                    struct rte_flow_error *error)
3864 {
3865         int ret = 0;
3866         uint64_t layer;
3867
3868         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3869         if (!ret) {
3870                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3871                                  MLX5_FLOW_LAYER_INNER_L3 :
3872                                  MLX5_FLOW_LAYER_OUTER_L3;
3873                 if (!(item_flags & layer))
3874                         return rte_flow_error_set(error, EINVAL,
3875                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3876                                                   NULL,
3877                                                   "no IP protocol in pattern");
3878         }
3879         return ret;
3880 }
3881
3882 /**
3883  * Validate jump action.
3884  *
3885  * @param[in] action
3886  *   Pointer to the jump action.
3887  * @param[in] action_flags
3888  *   Holds the actions detected until now.
3889  * @param[in] attributes
3890  *   Pointer to flow attributes
3891  * @param[in] external
3892  *   Action belongs to flow rule created by request external to PMD.
3893  * @param[out] error
3894  *   Pointer to error structure.
3895  *
3896  * @return
3897  *   0 on success, a negative errno value otherwise and rte_errno is set.
3898  */
3899 static int
3900 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3901                              const struct mlx5_flow_tunnel *tunnel,
3902                              const struct rte_flow_action *action,
3903                              uint64_t action_flags,
3904                              const struct rte_flow_attr *attributes,
3905                              bool external, struct rte_flow_error *error)
3906 {
3907         uint32_t target_group, table;
3908         int ret = 0;
3909         struct flow_grp_info grp_info = {
3910                 .external = !!external,
3911                 .transfer = !!attributes->transfer,
3912                 .fdb_def_rule = 1,
3913                 .std_tbl_fix = 0
3914         };
3915         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3916                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3917                 return rte_flow_error_set(error, EINVAL,
3918                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3919                                           "can't have 2 fate actions in"
3920                                           " same flow");
3921         if (action_flags & MLX5_FLOW_ACTION_METER)
3922                 return rte_flow_error_set(error, ENOTSUP,
3923                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3924                                           "jump with meter not support");
3925         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3926                 return rte_flow_error_set(error, EINVAL,
3927                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3928                                           "E-Switch mirroring can't support"
3929                                           " Sample action and jump action in"
3930                                           " same flow now");
3931         if (!action->conf)
3932                 return rte_flow_error_set(error, EINVAL,
3933                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3934                                           NULL, "action configuration not set");
3935         target_group =
3936                 ((const struct rte_flow_action_jump *)action->conf)->group;
3937         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3938                                        &grp_info, error);
3939         if (ret)
3940                 return ret;
3941         if (attributes->group == target_group &&
3942             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3943                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3944                 return rte_flow_error_set(error, EINVAL,
3945                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3946                                           "target group must be other than"
3947                                           " the current flow group");
3948         return 0;
3949 }
3950
3951 /*
3952  * Validate the port_id action.
3953  *
3954  * @param[in] dev
3955  *   Pointer to rte_eth_dev structure.
3956  * @param[in] action_flags
3957  *   Bit-fields that holds the actions detected until now.
3958  * @param[in] action
3959  *   Port_id RTE action structure.
3960  * @param[in] attr
3961  *   Attributes of flow that includes this action.
3962  * @param[out] error
3963  *   Pointer to error structure.
3964  *
3965  * @return
3966  *   0 on success, a negative errno value otherwise and rte_errno is set.
3967  */
3968 static int
3969 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3970                                 uint64_t action_flags,
3971                                 const struct rte_flow_action *action,
3972                                 const struct rte_flow_attr *attr,
3973                                 struct rte_flow_error *error)
3974 {
3975         const struct rte_flow_action_port_id *port_id;
3976         struct mlx5_priv *act_priv;
3977         struct mlx5_priv *dev_priv;
3978         uint16_t port;
3979
3980         if (!attr->transfer)
3981                 return rte_flow_error_set(error, ENOTSUP,
3982                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3983                                           NULL,
3984                                           "port id action is valid in transfer"
3985                                           " mode only");
3986         if (!action || !action->conf)
3987                 return rte_flow_error_set(error, ENOTSUP,
3988                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3989                                           NULL,
3990                                           "port id action parameters must be"
3991                                           " specified");
3992         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3993                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3994                 return rte_flow_error_set(error, EINVAL,
3995                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3996                                           "can have only one fate actions in"
3997                                           " a flow");
3998         dev_priv = mlx5_dev_to_eswitch_info(dev);
3999         if (!dev_priv)
4000                 return rte_flow_error_set(error, rte_errno,
4001                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4002                                           NULL,
4003                                           "failed to obtain E-Switch info");
4004         port_id = action->conf;
4005         port = port_id->original ? dev->data->port_id : port_id->id;
4006         act_priv = mlx5_port_to_eswitch_info(port, false);
4007         if (!act_priv)
4008                 return rte_flow_error_set
4009                                 (error, rte_errno,
4010                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4011                                  "failed to obtain E-Switch port id for port");
4012         if (act_priv->domain_id != dev_priv->domain_id)
4013                 return rte_flow_error_set
4014                                 (error, EINVAL,
4015                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4016                                  "port does not belong to"
4017                                  " E-Switch being configured");
4018         return 0;
4019 }
4020
4021 /**
4022  * Get the maximum number of modify header actions.
4023  *
4024  * @param dev
4025  *   Pointer to rte_eth_dev structure.
4026  * @param flags
4027  *   Flags bits to check if root level.
4028  *
4029  * @return
4030  *   Max number of modify header actions device can support.
4031  */
4032 static inline unsigned int
4033 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4034                               uint64_t flags)
4035 {
4036         /*
4037          * There's no way to directly query the max capacity from FW.
4038          * The maximal value on root table should be assumed to be supported.
4039          */
4040         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4041                 return MLX5_MAX_MODIFY_NUM;
4042         else
4043                 return MLX5_ROOT_TBL_MODIFY_NUM;
4044 }
4045
4046 /**
4047  * Validate the meter action.
4048  *
4049  * @param[in] dev
4050  *   Pointer to rte_eth_dev structure.
4051  * @param[in] action_flags
4052  *   Bit-fields that holds the actions detected until now.
4053  * @param[in] action
4054  *   Pointer to the meter action.
4055  * @param[in] attr
4056  *   Attributes of flow that includes this action.
4057  * @param[out] error
4058  *   Pointer to error structure.
4059  *
4060  * @return
4061  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4062  */
4063 static int
4064 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4065                                 uint64_t action_flags,
4066                                 const struct rte_flow_action *action,
4067                                 const struct rte_flow_attr *attr,
4068                                 struct rte_flow_error *error)
4069 {
4070         struct mlx5_priv *priv = dev->data->dev_private;
4071         const struct rte_flow_action_meter *am = action->conf;
4072         struct mlx5_flow_meter *fm;
4073
4074         if (!am)
4075                 return rte_flow_error_set(error, EINVAL,
4076                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4077                                           "meter action conf is NULL");
4078
4079         if (action_flags & MLX5_FLOW_ACTION_METER)
4080                 return rte_flow_error_set(error, ENOTSUP,
4081                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4082                                           "meter chaining not support");
4083         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4084                 return rte_flow_error_set(error, ENOTSUP,
4085                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4086                                           "meter with jump not support");
4087         if (!priv->mtr_en)
4088                 return rte_flow_error_set(error, ENOTSUP,
4089                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4090                                           NULL,
4091                                           "meter action not supported");
4092         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4093         if (!fm)
4094                 return rte_flow_error_set(error, EINVAL,
4095                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4096                                           "Meter not found");
4097         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4098               (!fm->ingress && !attr->ingress && attr->egress) ||
4099               (!fm->egress && !attr->egress && attr->ingress))))
4100                 return rte_flow_error_set(error, EINVAL,
4101                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4102                                           "Flow attributes are either invalid "
4103                                           "or have a conflict with current "
4104                                           "meter attributes");
4105         return 0;
4106 }
4107
4108 /**
4109  * Validate the age action.
4110  *
4111  * @param[in] action_flags
4112  *   Holds the actions detected until now.
4113  * @param[in] action
4114  *   Pointer to the age action.
4115  * @param[in] dev
4116  *   Pointer to the Ethernet device structure.
4117  * @param[out] error
4118  *   Pointer to error structure.
4119  *
4120  * @return
4121  *   0 on success, a negative errno value otherwise and rte_errno is set.
4122  */
4123 static int
4124 flow_dv_validate_action_age(uint64_t action_flags,
4125                             const struct rte_flow_action *action,
4126                             struct rte_eth_dev *dev,
4127                             struct rte_flow_error *error)
4128 {
4129         struct mlx5_priv *priv = dev->data->dev_private;
4130         const struct rte_flow_action_age *age = action->conf;
4131
4132         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4133             !priv->sh->aso_age_mng))
4134                 return rte_flow_error_set(error, ENOTSUP,
4135                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4136                                           NULL,
4137                                           "age action not supported");
4138         if (!(action->conf))
4139                 return rte_flow_error_set(error, EINVAL,
4140                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4141                                           "configuration cannot be null");
4142         if (!(age->timeout))
4143                 return rte_flow_error_set(error, EINVAL,
4144                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4145                                           "invalid timeout value 0");
4146         if (action_flags & MLX5_FLOW_ACTION_AGE)
4147                 return rte_flow_error_set(error, EINVAL,
4148                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4149                                           "duplicate age actions set");
4150         return 0;
4151 }
4152
4153 /**
4154  * Validate the modify-header IPv4 DSCP actions.
4155  *
4156  * @param[in] action_flags
4157  *   Holds the actions detected until now.
4158  * @param[in] action
4159  *   Pointer to the modify action.
4160  * @param[in] item_flags
4161  *   Holds the items detected.
4162  * @param[out] error
4163  *   Pointer to error structure.
4164  *
4165  * @return
4166  *   0 on success, a negative errno value otherwise and rte_errno is set.
4167  */
4168 static int
4169 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4170                                          const struct rte_flow_action *action,
4171                                          const uint64_t item_flags,
4172                                          struct rte_flow_error *error)
4173 {
4174         int ret = 0;
4175
4176         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4177         if (!ret) {
4178                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4179                         return rte_flow_error_set(error, EINVAL,
4180                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4181                                                   NULL,
4182                                                   "no ipv4 item in pattern");
4183         }
4184         return ret;
4185 }
4186
4187 /**
4188  * Validate the modify-header IPv6 DSCP actions.
4189  *
4190  * @param[in] action_flags
4191  *   Holds the actions detected until now.
4192  * @param[in] action
4193  *   Pointer to the modify action.
4194  * @param[in] item_flags
4195  *   Holds the items detected.
4196  * @param[out] error
4197  *   Pointer to error structure.
4198  *
4199  * @return
4200  *   0 on success, a negative errno value otherwise and rte_errno is set.
4201  */
4202 static int
4203 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4204                                          const struct rte_flow_action *action,
4205                                          const uint64_t item_flags,
4206                                          struct rte_flow_error *error)
4207 {
4208         int ret = 0;
4209
4210         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4211         if (!ret) {
4212                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4213                         return rte_flow_error_set(error, EINVAL,
4214                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4215                                                   NULL,
4216                                                   "no ipv6 item in pattern");
4217         }
4218         return ret;
4219 }
4220
4221 /**
4222  * Match modify-header resource.
4223  *
4224  * @param list
4225  *   Pointer to the hash list.
4226  * @param entry
4227  *   Pointer to exist resource entry object.
4228  * @param key
4229  *   Key of the new entry.
4230  * @param ctx
4231  *   Pointer to new modify-header resource.
4232  *
4233  * @return
4234  *   0 on matching, non-zero otherwise.
4235  */
4236 int
4237 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4238                         struct mlx5_hlist_entry *entry,
4239                         uint64_t key __rte_unused, void *cb_ctx)
4240 {
4241         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4242         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4243         struct mlx5_flow_dv_modify_hdr_resource *resource =
4244                         container_of(entry, typeof(*resource), entry);
4245         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4246
4247         key_len += ref->actions_num * sizeof(ref->actions[0]);
4248         return ref->actions_num != resource->actions_num ||
4249                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4250 }
4251
4252 struct mlx5_hlist_entry *
4253 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4254                          void *cb_ctx)
4255 {
4256         struct mlx5_dev_ctx_shared *sh = list->ctx;
4257         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4258         struct mlx5dv_dr_domain *ns;
4259         struct mlx5_flow_dv_modify_hdr_resource *entry;
4260         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4261         int ret;
4262         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4263         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4264
4265         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4266                             SOCKET_ID_ANY);
4267         if (!entry) {
4268                 rte_flow_error_set(ctx->error, ENOMEM,
4269                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4270                                    "cannot allocate resource memory");
4271                 return NULL;
4272         }
4273         rte_memcpy(&entry->ft_type,
4274                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4275                    key_len + data_len);
4276         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4277                 ns = sh->fdb_domain;
4278         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4279                 ns = sh->tx_domain;
4280         else
4281                 ns = sh->rx_domain;
4282         ret = mlx5_flow_os_create_flow_action_modify_header
4283                                         (sh->ctx, ns, entry,
4284                                          data_len, &entry->action);
4285         if (ret) {
4286                 mlx5_free(entry);
4287                 rte_flow_error_set(ctx->error, ENOMEM,
4288                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4289                                    NULL, "cannot create modification action");
4290                 return NULL;
4291         }
4292         return &entry->entry;
4293 }
4294
4295 /**
4296  * Validate the sample action.
4297  *
4298  * @param[in] action_flags
4299  *   Holds the actions detected until now.
4300  * @param[in] action
4301  *   Pointer to the sample action.
4302  * @param[in] dev
4303  *   Pointer to the Ethernet device structure.
4304  * @param[in] attr
4305  *   Attributes of flow that includes this action.
4306  * @param[out] error
4307  *   Pointer to error structure.
4308  *
4309  * @return
4310  *   0 on success, a negative errno value otherwise and rte_errno is set.
4311  */
4312 static int
4313 flow_dv_validate_action_sample(uint64_t action_flags,
4314                                const struct rte_flow_action *action,
4315                                struct rte_eth_dev *dev,
4316                                const struct rte_flow_attr *attr,
4317                                struct rte_flow_error *error)
4318 {
4319         struct mlx5_priv *priv = dev->data->dev_private;
4320         struct mlx5_dev_config *dev_conf = &priv->config;
4321         const struct rte_flow_action_sample *sample = action->conf;
4322         const struct rte_flow_action *act;
4323         uint64_t sub_action_flags = 0;
4324         uint16_t queue_index = 0xFFFF;
4325         int actions_n = 0;
4326         int ret;
4327         fdb_mirror = 0;
4328
4329         if (!sample)
4330                 return rte_flow_error_set(error, EINVAL,
4331                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4332                                           "configuration cannot be NULL");
4333         if (sample->ratio == 0)
4334                 return rte_flow_error_set(error, EINVAL,
4335                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4336                                           "ratio value starts from 1");
4337         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4338                 return rte_flow_error_set(error, ENOTSUP,
4339                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4340                                           NULL,
4341                                           "sample action not supported");
4342         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4343                 return rte_flow_error_set(error, EINVAL,
4344                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4345                                           "Multiple sample actions not "
4346                                           "supported");
4347         if (action_flags & MLX5_FLOW_ACTION_METER)
4348                 return rte_flow_error_set(error, EINVAL,
4349                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4350                                           "wrong action order, meter should "
4351                                           "be after sample action");
4352         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4353                 return rte_flow_error_set(error, EINVAL,
4354                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4355                                           "wrong action order, jump should "
4356                                           "be after sample action");
4357         act = sample->actions;
4358         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4359                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4360                         return rte_flow_error_set(error, ENOTSUP,
4361                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4362                                                   act, "too many actions");
4363                 switch (act->type) {
4364                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4365                         ret = mlx5_flow_validate_action_queue(act,
4366                                                               sub_action_flags,
4367                                                               dev,
4368                                                               attr, error);
4369                         if (ret < 0)
4370                                 return ret;
4371                         queue_index = ((const struct rte_flow_action_queue *)
4372                                                         (act->conf))->index;
4373                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4374                         ++actions_n;
4375                         break;
4376                 case RTE_FLOW_ACTION_TYPE_MARK:
4377                         ret = flow_dv_validate_action_mark(dev, act,
4378                                                            sub_action_flags,
4379                                                            attr, error);
4380                         if (ret < 0)
4381                                 return ret;
4382                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4383                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4384                                                 MLX5_FLOW_ACTION_MARK_EXT;
4385                         else
4386                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4387                         ++actions_n;
4388                         break;
4389                 case RTE_FLOW_ACTION_TYPE_COUNT:
4390                         ret = flow_dv_validate_action_count(dev, error);
4391                         if (ret < 0)
4392                                 return ret;
4393                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4394                         ++actions_n;
4395                         break;
4396                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4397                         ret = flow_dv_validate_action_port_id(dev,
4398                                                               sub_action_flags,
4399                                                               act,
4400                                                               attr,
4401                                                               error);
4402                         if (ret)
4403                                 return ret;
4404                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4405                         ++actions_n;
4406                         break;
4407                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4408                         ret = flow_dv_validate_action_raw_encap_decap
4409                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4410                                  &actions_n, error);
4411                         if (ret < 0)
4412                                 return ret;
4413                         ++actions_n;
4414                         break;
4415                 default:
4416                         return rte_flow_error_set(error, ENOTSUP,
4417                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4418                                                   NULL,
4419                                                   "Doesn't support optional "
4420                                                   "action");
4421                 }
4422         }
4423         if (attr->ingress && !attr->transfer) {
4424                 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4425                         return rte_flow_error_set(error, EINVAL,
4426                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4427                                                   NULL,
4428                                                   "Ingress must has a dest "
4429                                                   "QUEUE for Sample");
4430         } else if (attr->egress && !attr->transfer) {
4431                 return rte_flow_error_set(error, ENOTSUP,
4432                                           RTE_FLOW_ERROR_TYPE_ACTION,
4433                                           NULL,
4434                                           "Sample Only support Ingress "
4435                                           "or E-Switch");
4436         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4437                 MLX5_ASSERT(attr->transfer);
4438                 if (sample->ratio > 1)
4439                         return rte_flow_error_set(error, ENOTSUP,
4440                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4441                                                   NULL,
4442                                                   "E-Switch doesn't support "
4443                                                   "any optional action "
4444                                                   "for sampling");
4445                 fdb_mirror = 1;
4446                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4447                         return rte_flow_error_set(error, ENOTSUP,
4448                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4449                                                   NULL,
4450                                                   "unsupported action QUEUE");
4451                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4452                         return rte_flow_error_set(error, EINVAL,
4453                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4454                                                   NULL,
4455                                                   "E-Switch must has a dest "
4456                                                   "port for mirroring");
4457         }
4458         /* Continue validation for Xcap actions.*/
4459         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4460             (queue_index == 0xFFFF ||
4461              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4462                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4463                      MLX5_FLOW_XCAP_ACTIONS)
4464                         return rte_flow_error_set(error, ENOTSUP,
4465                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4466                                                   NULL, "encap and decap "
4467                                                   "combination aren't "
4468                                                   "supported");
4469                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4470                                                         MLX5_FLOW_ACTION_ENCAP))
4471                         return rte_flow_error_set(error, ENOTSUP,
4472                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4473                                                   NULL, "encap is not supported"
4474                                                   " for ingress traffic");
4475         }
4476         return 0;
4477 }
4478
4479 /**
4480  * Find existing modify-header resource or create and register a new one.
4481  *
4482  * @param dev[in, out]
4483  *   Pointer to rte_eth_dev structure.
4484  * @param[in, out] resource
4485  *   Pointer to modify-header resource.
4486  * @parm[in, out] dev_flow
4487  *   Pointer to the dev_flow.
4488  * @param[out] error
4489  *   pointer to error structure.
4490  *
4491  * @return
4492  *   0 on success otherwise -errno and errno is set.
4493  */
4494 static int
4495 flow_dv_modify_hdr_resource_register
4496                         (struct rte_eth_dev *dev,
4497                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4498                          struct mlx5_flow *dev_flow,
4499                          struct rte_flow_error *error)
4500 {
4501         struct mlx5_priv *priv = dev->data->dev_private;
4502         struct mlx5_dev_ctx_shared *sh = priv->sh;
4503         uint32_t key_len = sizeof(*resource) -
4504                            offsetof(typeof(*resource), ft_type) +
4505                            resource->actions_num * sizeof(resource->actions[0]);
4506         struct mlx5_hlist_entry *entry;
4507         struct mlx5_flow_cb_ctx ctx = {
4508                 .error = error,
4509                 .data = resource,
4510         };
4511
4512         resource->flags = dev_flow->dv.group ? 0 :
4513                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4514         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4515                                     resource->flags))
4516                 return rte_flow_error_set(error, EOVERFLOW,
4517                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4518                                           "too many modify header items");
4519         resource->entry.key = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4520         entry = mlx5_hlist_register(sh->modify_cmds, resource->entry.key, &ctx);
4521         if (!entry)
4522                 return -rte_errno;
4523         resource = container_of(entry, typeof(*resource), entry);
4524         dev_flow->handle->dvh.modify_hdr = resource;
4525         return 0;
4526 }
4527
4528 /**
4529  * Get DV flow counter by index.
4530  *
4531  * @param[in] dev
4532  *   Pointer to the Ethernet device structure.
4533  * @param[in] idx
4534  *   mlx5 flow counter index in the container.
4535  * @param[out] ppool
4536  *   mlx5 flow counter pool in the container,
4537  *
4538  * @return
4539  *   Pointer to the counter, NULL otherwise.
4540  */
4541 static struct mlx5_flow_counter *
4542 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4543                            uint32_t idx,
4544                            struct mlx5_flow_counter_pool **ppool)
4545 {
4546         struct mlx5_priv *priv = dev->data->dev_private;
4547         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4548         struct mlx5_flow_counter_pool *pool;
4549
4550         /* Decrease to original index and clear shared bit. */
4551         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4552         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4553         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4554         MLX5_ASSERT(pool);
4555         if (ppool)
4556                 *ppool = pool;
4557         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4558 }
4559
4560 /**
4561  * Check the devx counter belongs to the pool.
4562  *
4563  * @param[in] pool
4564  *   Pointer to the counter pool.
4565  * @param[in] id
4566  *   The counter devx ID.
4567  *
4568  * @return
4569  *   True if counter belongs to the pool, false otherwise.
4570  */
4571 static bool
4572 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4573 {
4574         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4575                    MLX5_COUNTERS_PER_POOL;
4576
4577         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4578                 return true;
4579         return false;
4580 }
4581
4582 /**
4583  * Get a pool by devx counter ID.
4584  *
4585  * @param[in] cmng
4586  *   Pointer to the counter management.
4587  * @param[in] id
4588  *   The counter devx ID.
4589  *
4590  * @return
4591  *   The counter pool pointer if exists, NULL otherwise,
4592  */
4593 static struct mlx5_flow_counter_pool *
4594 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4595 {
4596         uint32_t i;
4597         struct mlx5_flow_counter_pool *pool = NULL;
4598
4599         rte_spinlock_lock(&cmng->pool_update_sl);
4600         /* Check last used pool. */
4601         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4602             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4603                 pool = cmng->pools[cmng->last_pool_idx];
4604                 goto out;
4605         }
4606         /* ID out of range means no suitable pool in the container. */
4607         if (id > cmng->max_id || id < cmng->min_id)
4608                 goto out;
4609         /*
4610          * Find the pool from the end of the container, since mostly counter
4611          * ID is sequence increasing, and the last pool should be the needed
4612          * one.
4613          */
4614         i = cmng->n_valid;
4615         while (i--) {
4616                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4617
4618                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4619                         pool = pool_tmp;
4620                         break;
4621                 }
4622         }
4623 out:
4624         rte_spinlock_unlock(&cmng->pool_update_sl);
4625         return pool;
4626 }
4627
4628 /**
4629  * Resize a counter container.
4630  *
4631  * @param[in] dev
4632  *   Pointer to the Ethernet device structure.
4633  *
4634  * @return
4635  *   0 on success, otherwise negative errno value and rte_errno is set.
4636  */
4637 static int
4638 flow_dv_container_resize(struct rte_eth_dev *dev)
4639 {
4640         struct mlx5_priv *priv = dev->data->dev_private;
4641         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4642         void *old_pools = cmng->pools;
4643         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4644         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4645         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4646
4647         if (!pools) {
4648                 rte_errno = ENOMEM;
4649                 return -ENOMEM;
4650         }
4651         if (old_pools)
4652                 memcpy(pools, old_pools, cmng->n *
4653                                        sizeof(struct mlx5_flow_counter_pool *));
4654         cmng->n = resize;
4655         cmng->pools = pools;
4656         if (old_pools)
4657                 mlx5_free(old_pools);
4658         return 0;
4659 }
4660
4661 /**
4662  * Query a devx flow counter.
4663  *
4664  * @param[in] dev
4665  *   Pointer to the Ethernet device structure.
4666  * @param[in] cnt
4667  *   Index to the flow counter.
4668  * @param[out] pkts
4669  *   The statistics value of packets.
4670  * @param[out] bytes
4671  *   The statistics value of bytes.
4672  *
4673  * @return
4674  *   0 on success, otherwise a negative errno value and rte_errno is set.
4675  */
4676 static inline int
4677 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4678                      uint64_t *bytes)
4679 {
4680         struct mlx5_priv *priv = dev->data->dev_private;
4681         struct mlx5_flow_counter_pool *pool = NULL;
4682         struct mlx5_flow_counter *cnt;
4683         int offset;
4684
4685         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4686         MLX5_ASSERT(pool);
4687         if (priv->sh->cmng.counter_fallback)
4688                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4689                                         0, pkts, bytes, 0, NULL, NULL, 0);
4690         rte_spinlock_lock(&pool->sl);
4691         if (!pool->raw) {
4692                 *pkts = 0;
4693                 *bytes = 0;
4694         } else {
4695                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4696                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4697                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4698         }
4699         rte_spinlock_unlock(&pool->sl);
4700         return 0;
4701 }
4702
4703 /**
4704  * Create and initialize a new counter pool.
4705  *
4706  * @param[in] dev
4707  *   Pointer to the Ethernet device structure.
4708  * @param[out] dcs
4709  *   The devX counter handle.
4710  * @param[in] age
4711  *   Whether the pool is for counter that was allocated for aging.
4712  * @param[in/out] cont_cur
4713  *   Pointer to the container pointer, it will be update in pool resize.
4714  *
4715  * @return
4716  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4717  */
4718 static struct mlx5_flow_counter_pool *
4719 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4720                     uint32_t age)
4721 {
4722         struct mlx5_priv *priv = dev->data->dev_private;
4723         struct mlx5_flow_counter_pool *pool;
4724         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4725         bool fallback = priv->sh->cmng.counter_fallback;
4726         uint32_t size = sizeof(*pool);
4727
4728         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4729         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4730         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4731         if (!pool) {
4732                 rte_errno = ENOMEM;
4733                 return NULL;
4734         }
4735         pool->raw = NULL;
4736         pool->is_aged = !!age;
4737         pool->query_gen = 0;
4738         pool->min_dcs = dcs;
4739         rte_spinlock_init(&pool->sl);
4740         rte_spinlock_init(&pool->csl);
4741         TAILQ_INIT(&pool->counters[0]);
4742         TAILQ_INIT(&pool->counters[1]);
4743         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4744         rte_spinlock_lock(&cmng->pool_update_sl);
4745         pool->index = cmng->n_valid;
4746         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4747                 mlx5_free(pool);
4748                 rte_spinlock_unlock(&cmng->pool_update_sl);
4749                 return NULL;
4750         }
4751         cmng->pools[pool->index] = pool;
4752         cmng->n_valid++;
4753         if (unlikely(fallback)) {
4754                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4755
4756                 if (base < cmng->min_id)
4757                         cmng->min_id = base;
4758                 if (base > cmng->max_id)
4759                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4760                 cmng->last_pool_idx = pool->index;
4761         }
4762         rte_spinlock_unlock(&cmng->pool_update_sl);
4763         return pool;
4764 }
4765
4766 /**
4767  * Prepare a new counter and/or a new counter pool.
4768  *
4769  * @param[in] dev
4770  *   Pointer to the Ethernet device structure.
4771  * @param[out] cnt_free
4772  *   Where to put the pointer of a new counter.
4773  * @param[in] age
4774  *   Whether the pool is for counter that was allocated for aging.
4775  *
4776  * @return
4777  *   The counter pool pointer and @p cnt_free is set on success,
4778  *   NULL otherwise and rte_errno is set.
4779  */
4780 static struct mlx5_flow_counter_pool *
4781 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4782                              struct mlx5_flow_counter **cnt_free,
4783                              uint32_t age)
4784 {
4785         struct mlx5_priv *priv = dev->data->dev_private;
4786         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4787         struct mlx5_flow_counter_pool *pool;
4788         struct mlx5_counters tmp_tq;
4789         struct mlx5_devx_obj *dcs = NULL;
4790         struct mlx5_flow_counter *cnt;
4791         enum mlx5_counter_type cnt_type =
4792                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4793         bool fallback = priv->sh->cmng.counter_fallback;
4794         uint32_t i;
4795
4796         if (fallback) {
4797                 /* bulk_bitmap must be 0 for single counter allocation. */
4798                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4799                 if (!dcs)
4800                         return NULL;
4801                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4802                 if (!pool) {
4803                         pool = flow_dv_pool_create(dev, dcs, age);
4804                         if (!pool) {
4805                                 mlx5_devx_cmd_destroy(dcs);
4806                                 return NULL;
4807                         }
4808                 }
4809                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4810                 cnt = MLX5_POOL_GET_CNT(pool, i);
4811                 cnt->pool = pool;
4812                 cnt->dcs_when_free = dcs;
4813                 *cnt_free = cnt;
4814                 return pool;
4815         }
4816         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4817         if (!dcs) {
4818                 rte_errno = ENODATA;
4819                 return NULL;
4820         }
4821         pool = flow_dv_pool_create(dev, dcs, age);
4822         if (!pool) {
4823                 mlx5_devx_cmd_destroy(dcs);
4824                 return NULL;
4825         }
4826         TAILQ_INIT(&tmp_tq);
4827         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4828                 cnt = MLX5_POOL_GET_CNT(pool, i);
4829                 cnt->pool = pool;
4830                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4831         }
4832         rte_spinlock_lock(&cmng->csl[cnt_type]);
4833         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4834         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4835         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4836         (*cnt_free)->pool = pool;
4837         return pool;
4838 }
4839
4840 /**
4841  * Allocate a flow counter.
4842  *
4843  * @param[in] dev
4844  *   Pointer to the Ethernet device structure.
4845  * @param[in] age
4846  *   Whether the counter was allocated for aging.
4847  *
4848  * @return
4849  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4850  */
4851 static uint32_t
4852 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4853 {
4854         struct mlx5_priv *priv = dev->data->dev_private;
4855         struct mlx5_flow_counter_pool *pool = NULL;
4856         struct mlx5_flow_counter *cnt_free = NULL;
4857         bool fallback = priv->sh->cmng.counter_fallback;
4858         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4859         enum mlx5_counter_type cnt_type =
4860                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4861         uint32_t cnt_idx;
4862
4863         if (!priv->config.devx) {
4864                 rte_errno = ENOTSUP;
4865                 return 0;
4866         }
4867         /* Get free counters from container. */
4868         rte_spinlock_lock(&cmng->csl[cnt_type]);
4869         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4870         if (cnt_free)
4871                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4872         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4873         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4874                 goto err;
4875         pool = cnt_free->pool;
4876         if (fallback)
4877                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4878         /* Create a DV counter action only in the first time usage. */
4879         if (!cnt_free->action) {
4880                 uint16_t offset;
4881                 struct mlx5_devx_obj *dcs;
4882                 int ret;
4883
4884                 if (!fallback) {
4885                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4886                         dcs = pool->min_dcs;
4887                 } else {
4888                         offset = 0;
4889                         dcs = cnt_free->dcs_when_free;
4890                 }
4891                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4892                                                             &cnt_free->action);
4893                 if (ret) {
4894                         rte_errno = errno;
4895                         goto err;
4896                 }
4897         }
4898         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4899                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4900         /* Update the counter reset values. */
4901         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4902                                  &cnt_free->bytes))
4903                 goto err;
4904         if (!fallback && !priv->sh->cmng.query_thread_on)
4905                 /* Start the asynchronous batch query by the host thread. */
4906                 mlx5_set_query_alarm(priv->sh);
4907         return cnt_idx;
4908 err:
4909         if (cnt_free) {
4910                 cnt_free->pool = pool;
4911                 if (fallback)
4912                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4913                 rte_spinlock_lock(&cmng->csl[cnt_type]);
4914                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4915                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4916         }
4917         return 0;
4918 }
4919
4920 /**
4921  * Allocate a shared flow counter.
4922  *
4923  * @param[in] ctx
4924  *   Pointer to the shared counter configuration.
4925  * @param[in] data
4926  *   Pointer to save the allocated counter index.
4927  *
4928  * @return
4929  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4930  */
4931
4932 static int32_t
4933 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4934 {
4935         struct mlx5_shared_counter_conf *conf = ctx;
4936         struct rte_eth_dev *dev = conf->dev;
4937         struct mlx5_flow_counter *cnt;
4938
4939         data->dword = flow_dv_counter_alloc(dev, 0);
4940         data->dword |= MLX5_CNT_SHARED_OFFSET;
4941         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4942         cnt->shared_info.id = conf->id;
4943         return 0;
4944 }
4945
4946 /**
4947  * Get a shared flow counter.
4948  *
4949  * @param[in] dev
4950  *   Pointer to the Ethernet device structure.
4951  * @param[in] id
4952  *   Counter identifier.
4953  *
4954  * @return
4955  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4956  */
4957 static uint32_t
4958 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4959 {
4960         struct mlx5_priv *priv = dev->data->dev_private;
4961         struct mlx5_shared_counter_conf conf = {
4962                 .dev = dev,
4963                 .id = id,
4964         };
4965         union mlx5_l3t_data data = {
4966                 .dword = 0,
4967         };
4968
4969         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
4970                                flow_dv_counter_alloc_shared_cb, &conf);
4971         return data.dword;
4972 }
4973
4974 /**
4975  * Get age param from counter index.
4976  *
4977  * @param[in] dev
4978  *   Pointer to the Ethernet device structure.
4979  * @param[in] counter
4980  *   Index to the counter handler.
4981  *
4982  * @return
4983  *   The aging parameter specified for the counter index.
4984  */
4985 static struct mlx5_age_param*
4986 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
4987                                 uint32_t counter)
4988 {
4989         struct mlx5_flow_counter *cnt;
4990         struct mlx5_flow_counter_pool *pool = NULL;
4991
4992         flow_dv_counter_get_by_idx(dev, counter, &pool);
4993         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
4994         cnt = MLX5_POOL_GET_CNT(pool, counter);
4995         return MLX5_CNT_TO_AGE(cnt);
4996 }
4997
4998 /**
4999  * Remove a flow counter from aged counter list.
5000  *
5001  * @param[in] dev
5002  *   Pointer to the Ethernet device structure.
5003  * @param[in] counter
5004  *   Index to the counter handler.
5005  * @param[in] cnt
5006  *   Pointer to the counter handler.
5007  */
5008 static void
5009 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5010                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5011 {
5012         struct mlx5_age_info *age_info;
5013         struct mlx5_age_param *age_param;
5014         struct mlx5_priv *priv = dev->data->dev_private;
5015         uint16_t expected = AGE_CANDIDATE;
5016
5017         age_info = GET_PORT_AGE_INFO(priv);
5018         age_param = flow_dv_counter_idx_get_age(dev, counter);
5019         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5020                                          AGE_FREE, false, __ATOMIC_RELAXED,
5021                                          __ATOMIC_RELAXED)) {
5022                 /**
5023                  * We need the lock even it is age timeout,
5024                  * since counter may still in process.
5025                  */
5026                 rte_spinlock_lock(&age_info->aged_sl);
5027                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5028                 rte_spinlock_unlock(&age_info->aged_sl);
5029                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5030         }
5031 }
5032
5033 /**
5034  * Release a flow counter.
5035  *
5036  * @param[in] dev
5037  *   Pointer to the Ethernet device structure.
5038  * @param[in] counter
5039  *   Index to the counter handler.
5040  */
5041 static void
5042 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5043 {
5044         struct mlx5_priv *priv = dev->data->dev_private;
5045         struct mlx5_flow_counter_pool *pool = NULL;
5046         struct mlx5_flow_counter *cnt;
5047         enum mlx5_counter_type cnt_type;
5048
5049         if (!counter)
5050                 return;
5051         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5052         MLX5_ASSERT(pool);
5053         if (IS_SHARED_CNT(counter) &&
5054             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5055                 return;
5056         if (pool->is_aged)
5057                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5058         cnt->pool = pool;
5059         /*
5060          * Put the counter back to list to be updated in none fallback mode.
5061          * Currently, we are using two list alternately, while one is in query,
5062          * add the freed counter to the other list based on the pool query_gen
5063          * value. After query finishes, add counter the list to the global
5064          * container counter list. The list changes while query starts. In
5065          * this case, lock will not be needed as query callback and release
5066          * function both operate with the different list.
5067          *
5068          */
5069         if (!priv->sh->cmng.counter_fallback) {
5070                 rte_spinlock_lock(&pool->csl);
5071                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5072                 rte_spinlock_unlock(&pool->csl);
5073         } else {
5074                 cnt->dcs_when_free = cnt->dcs_when_active;
5075                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5076                                            MLX5_COUNTER_TYPE_ORIGIN;
5077                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5078                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5079                                   cnt, next);
5080                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5081         }
5082 }
5083
5084 /**
5085  * Verify the @p attributes will be correctly understood by the NIC and store
5086  * them in the @p flow if everything is correct.
5087  *
5088  * @param[in] dev
5089  *   Pointer to dev struct.
5090  * @param[in] attributes
5091  *   Pointer to flow attributes
5092  * @param[in] external
5093  *   This flow rule is created by request external to PMD.
5094  * @param[out] error
5095  *   Pointer to error structure.
5096  *
5097  * @return
5098  *   - 0 on success and non root table.
5099  *   - 1 on success and root table.
5100  *   - a negative errno value otherwise and rte_errno is set.
5101  */
5102 static int
5103 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5104                             const struct mlx5_flow_tunnel *tunnel,
5105                             const struct rte_flow_attr *attributes,
5106                             const struct flow_grp_info *grp_info,
5107                             struct rte_flow_error *error)
5108 {
5109         struct mlx5_priv *priv = dev->data->dev_private;
5110         uint32_t priority_max = priv->config.flow_prio - 1;
5111         int ret = 0;
5112
5113 #ifndef HAVE_MLX5DV_DR
5114         RTE_SET_USED(tunnel);
5115         RTE_SET_USED(grp_info);
5116         if (attributes->group)
5117                 return rte_flow_error_set(error, ENOTSUP,
5118                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5119                                           NULL,
5120                                           "groups are not supported");
5121 #else
5122         uint32_t table = 0;
5123
5124         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5125                                        grp_info, error);
5126         if (ret)
5127                 return ret;
5128         if (!table)
5129                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5130 #endif
5131         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5132             attributes->priority >= priority_max)
5133                 return rte_flow_error_set(error, ENOTSUP,
5134                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5135                                           NULL,
5136                                           "priority out of range");
5137         if (attributes->transfer) {
5138                 if (!priv->config.dv_esw_en)
5139                         return rte_flow_error_set
5140                                 (error, ENOTSUP,
5141                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5142                                  "E-Switch dr is not supported");
5143                 if (!(priv->representor || priv->master))
5144                         return rte_flow_error_set
5145                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5146                                  NULL, "E-Switch configuration can only be"
5147                                  " done by a master or a representor device");
5148                 if (attributes->egress)
5149                         return rte_flow_error_set
5150                                 (error, ENOTSUP,
5151                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5152                                  "egress is not supported");
5153         }
5154         if (!(attributes->egress ^ attributes->ingress))
5155                 return rte_flow_error_set(error, ENOTSUP,
5156                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5157                                           "must specify exactly one of "
5158                                           "ingress or egress");
5159         return ret;
5160 }
5161
5162 /**
5163  * Internal validation function. For validating both actions and items.
5164  *
5165  * @param[in] dev
5166  *   Pointer to the rte_eth_dev structure.
5167  * @param[in] attr
5168  *   Pointer to the flow attributes.
5169  * @param[in] items
5170  *   Pointer to the list of items.
5171  * @param[in] actions
5172  *   Pointer to the list of actions.
5173  * @param[in] external
5174  *   This flow rule is created by request external to PMD.
5175  * @param[in] hairpin
5176  *   Number of hairpin TX actions, 0 means classic flow.
5177  * @param[out] error
5178  *   Pointer to the error structure.
5179  *
5180  * @return
5181  *   0 on success, a negative errno value otherwise and rte_errno is set.
5182  */
5183 static int
5184 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5185                  const struct rte_flow_item items[],
5186                  const struct rte_flow_action actions[],
5187                  bool external, int hairpin, struct rte_flow_error *error)
5188 {
5189         int ret;
5190         uint64_t action_flags = 0;
5191         uint64_t item_flags = 0;
5192         uint64_t last_item = 0;
5193         uint8_t next_protocol = 0xff;
5194         uint16_t ether_type = 0;
5195         int actions_n = 0;
5196         uint8_t item_ipv6_proto = 0;
5197         const struct rte_flow_item *gre_item = NULL;
5198         const struct rte_flow_action_raw_decap *decap;
5199         const struct rte_flow_action_raw_encap *encap;
5200         const struct rte_flow_action_rss *rss;
5201         const struct rte_flow_item_tcp nic_tcp_mask = {
5202                 .hdr = {
5203                         .tcp_flags = 0xFF,
5204                         .src_port = RTE_BE16(UINT16_MAX),
5205                         .dst_port = RTE_BE16(UINT16_MAX),
5206                 }
5207         };
5208         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5209                 .hdr = {
5210                         .src_addr =
5211                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5212                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5213                         .dst_addr =
5214                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5215                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5216                         .vtc_flow = RTE_BE32(0xffffffff),
5217                         .proto = 0xff,
5218                         .hop_limits = 0xff,
5219                 },
5220                 .has_frag_ext = 1,
5221         };
5222         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5223                 .hdr = {
5224                         .common = {
5225                                 .u32 =
5226                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5227                                         .type = 0xFF,
5228                                         }).u32),
5229                         },
5230                         .dummy[0] = 0xffffffff,
5231                 },
5232         };
5233         struct mlx5_priv *priv = dev->data->dev_private;
5234         struct mlx5_dev_config *dev_conf = &priv->config;
5235         uint16_t queue_index = 0xFFFF;
5236         const struct rte_flow_item_vlan *vlan_m = NULL;
5237         int16_t rw_act_num = 0;
5238         uint64_t is_root;
5239         const struct mlx5_flow_tunnel *tunnel;
5240         struct flow_grp_info grp_info = {
5241                 .external = !!external,
5242                 .transfer = !!attr->transfer,
5243                 .fdb_def_rule = !!priv->fdb_def_rule,
5244         };
5245         const struct rte_eth_hairpin_conf *conf;
5246
5247         if (items == NULL)
5248                 return -1;
5249         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5250                 tunnel = flow_items_to_tunnel(items);
5251                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5252                                 MLX5_FLOW_ACTION_DECAP;
5253         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5254                 tunnel = flow_actions_to_tunnel(actions);
5255                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5256         } else {
5257                 tunnel = NULL;
5258         }
5259         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5260                                 (dev, tunnel, attr, items, actions);
5261         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
5262         if (ret < 0)
5263                 return ret;
5264         is_root = (uint64_t)ret;
5265         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5266                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5267                 int type = items->type;
5268
5269                 if (!mlx5_flow_os_item_supported(type))
5270                         return rte_flow_error_set(error, ENOTSUP,
5271                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5272                                                   NULL, "item not supported");
5273                 switch (type) {
5274                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5275                         if (items[0].type != (typeof(items[0].type))
5276                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5277                                 return rte_flow_error_set
5278                                                 (error, EINVAL,
5279                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5280                                                 NULL, "MLX5 private items "
5281                                                 "must be the first");
5282                         break;
5283                 case RTE_FLOW_ITEM_TYPE_VOID:
5284                         break;
5285                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5286                         ret = flow_dv_validate_item_port_id
5287                                         (dev, items, attr, item_flags, error);
5288                         if (ret < 0)
5289                                 return ret;
5290                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5291                         break;
5292                 case RTE_FLOW_ITEM_TYPE_ETH:
5293                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5294                                                           true, error);
5295                         if (ret < 0)
5296                                 return ret;
5297                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5298                                              MLX5_FLOW_LAYER_OUTER_L2;
5299                         if (items->mask != NULL && items->spec != NULL) {
5300                                 ether_type =
5301                                         ((const struct rte_flow_item_eth *)
5302                                          items->spec)->type;
5303                                 ether_type &=
5304                                         ((const struct rte_flow_item_eth *)
5305                                          items->mask)->type;
5306                                 ether_type = rte_be_to_cpu_16(ether_type);
5307                         } else {
5308                                 ether_type = 0;
5309                         }
5310                         break;
5311                 case RTE_FLOW_ITEM_TYPE_VLAN:
5312                         ret = flow_dv_validate_item_vlan(items, item_flags,
5313                                                          dev, error);
5314                         if (ret < 0)
5315                                 return ret;
5316                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5317                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5318                         if (items->mask != NULL && items->spec != NULL) {
5319                                 ether_type =
5320                                         ((const struct rte_flow_item_vlan *)
5321                                          items->spec)->inner_type;
5322                                 ether_type &=
5323                                         ((const struct rte_flow_item_vlan *)
5324                                          items->mask)->inner_type;
5325                                 ether_type = rte_be_to_cpu_16(ether_type);
5326                         } else {
5327                                 ether_type = 0;
5328                         }
5329                         /* Store outer VLAN mask for of_push_vlan action. */
5330                         if (!tunnel)
5331                                 vlan_m = items->mask;
5332                         break;
5333                 case RTE_FLOW_ITEM_TYPE_IPV4:
5334                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5335                                                   &item_flags, &tunnel);
5336                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5337                                                          last_item, ether_type,
5338                                                          error);
5339                         if (ret < 0)
5340                                 return ret;
5341                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5342                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5343                         if (items->mask != NULL &&
5344                             ((const struct rte_flow_item_ipv4 *)
5345                              items->mask)->hdr.next_proto_id) {
5346                                 next_protocol =
5347                                         ((const struct rte_flow_item_ipv4 *)
5348                                          (items->spec))->hdr.next_proto_id;
5349                                 next_protocol &=
5350                                         ((const struct rte_flow_item_ipv4 *)
5351                                          (items->mask))->hdr.next_proto_id;
5352                         } else {
5353                                 /* Reset for inner layer. */
5354                                 next_protocol = 0xff;
5355                         }
5356                         break;
5357                 case RTE_FLOW_ITEM_TYPE_IPV6:
5358                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5359                                                   &item_flags, &tunnel);
5360                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5361                                                            last_item,
5362                                                            ether_type,
5363                                                            &nic_ipv6_mask,
5364                                                            error);
5365                         if (ret < 0)
5366                                 return ret;
5367                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5368                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5369                         if (items->mask != NULL &&
5370                             ((const struct rte_flow_item_ipv6 *)
5371                              items->mask)->hdr.proto) {
5372                                 item_ipv6_proto =
5373                                         ((const struct rte_flow_item_ipv6 *)
5374                                          items->spec)->hdr.proto;
5375                                 next_protocol =
5376                                         ((const struct rte_flow_item_ipv6 *)
5377                                          items->spec)->hdr.proto;
5378                                 next_protocol &=
5379                                         ((const struct rte_flow_item_ipv6 *)
5380                                          items->mask)->hdr.proto;
5381                         } else {
5382                                 /* Reset for inner layer. */
5383                                 next_protocol = 0xff;
5384                         }
5385                         break;
5386                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5387                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5388                                                                   item_flags,
5389                                                                   error);
5390                         if (ret < 0)
5391                                 return ret;
5392                         last_item = tunnel ?
5393                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5394                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5395                         if (items->mask != NULL &&
5396                             ((const struct rte_flow_item_ipv6_frag_ext *)
5397                              items->mask)->hdr.next_header) {
5398                                 next_protocol =
5399                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5400                                  items->spec)->hdr.next_header;
5401                                 next_protocol &=
5402                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5403                                  items->mask)->hdr.next_header;
5404                         } else {
5405                                 /* Reset for inner layer. */
5406                                 next_protocol = 0xff;
5407                         }
5408                         break;
5409                 case RTE_FLOW_ITEM_TYPE_TCP:
5410                         ret = mlx5_flow_validate_item_tcp
5411                                                 (items, item_flags,
5412                                                  next_protocol,
5413                                                  &nic_tcp_mask,
5414                                                  error);
5415                         if (ret < 0)
5416                                 return ret;
5417                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5418                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5419                         break;
5420                 case RTE_FLOW_ITEM_TYPE_UDP:
5421                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5422                                                           next_protocol,
5423                                                           error);
5424                         if (ret < 0)
5425                                 return ret;
5426                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5427                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5428                         break;
5429                 case RTE_FLOW_ITEM_TYPE_GRE:
5430                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5431                                                           next_protocol, error);
5432                         if (ret < 0)
5433                                 return ret;
5434                         gre_item = items;
5435                         last_item = MLX5_FLOW_LAYER_GRE;
5436                         break;
5437                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5438                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5439                                                             next_protocol,
5440                                                             error);
5441                         if (ret < 0)
5442                                 return ret;
5443                         last_item = MLX5_FLOW_LAYER_NVGRE;
5444                         break;
5445                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5446                         ret = mlx5_flow_validate_item_gre_key
5447                                 (items, item_flags, gre_item, error);
5448                         if (ret < 0)
5449                                 return ret;
5450                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5451                         break;
5452                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5453                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5454                                                             error);
5455                         if (ret < 0)
5456                                 return ret;
5457                         last_item = MLX5_FLOW_LAYER_VXLAN;
5458                         break;
5459                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5460                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5461                                                                 item_flags, dev,
5462                                                                 error);
5463                         if (ret < 0)
5464                                 return ret;
5465                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5466                         break;
5467                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5468                         ret = mlx5_flow_validate_item_geneve(items,
5469                                                              item_flags, dev,
5470                                                              error);
5471                         if (ret < 0)
5472                                 return ret;
5473                         last_item = MLX5_FLOW_LAYER_GENEVE;
5474                         break;
5475                 case RTE_FLOW_ITEM_TYPE_MPLS:
5476                         ret = mlx5_flow_validate_item_mpls(dev, items,
5477                                                            item_flags,
5478                                                            last_item, error);
5479                         if (ret < 0)
5480                                 return ret;
5481                         last_item = MLX5_FLOW_LAYER_MPLS;
5482                         break;
5483
5484                 case RTE_FLOW_ITEM_TYPE_MARK:
5485                         ret = flow_dv_validate_item_mark(dev, items, attr,
5486                                                          error);
5487                         if (ret < 0)
5488                                 return ret;
5489                         last_item = MLX5_FLOW_ITEM_MARK;
5490                         break;
5491                 case RTE_FLOW_ITEM_TYPE_META:
5492                         ret = flow_dv_validate_item_meta(dev, items, attr,
5493                                                          error);
5494                         if (ret < 0)
5495                                 return ret;
5496                         last_item = MLX5_FLOW_ITEM_METADATA;
5497                         break;
5498                 case RTE_FLOW_ITEM_TYPE_ICMP:
5499                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5500                                                            next_protocol,
5501                                                            error);
5502                         if (ret < 0)
5503                                 return ret;
5504                         last_item = MLX5_FLOW_LAYER_ICMP;
5505                         break;
5506                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5507                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5508                                                             next_protocol,
5509                                                             error);
5510                         if (ret < 0)
5511                                 return ret;
5512                         item_ipv6_proto = IPPROTO_ICMPV6;
5513                         last_item = MLX5_FLOW_LAYER_ICMP6;
5514                         break;
5515                 case RTE_FLOW_ITEM_TYPE_TAG:
5516                         ret = flow_dv_validate_item_tag(dev, items,
5517                                                         attr, error);
5518                         if (ret < 0)
5519                                 return ret;
5520                         last_item = MLX5_FLOW_ITEM_TAG;
5521                         break;
5522                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5523                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5524                         break;
5525                 case RTE_FLOW_ITEM_TYPE_GTP:
5526                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5527                                                         error);
5528                         if (ret < 0)
5529                                 return ret;
5530                         last_item = MLX5_FLOW_LAYER_GTP;
5531                         break;
5532                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5533                         /* Capacity will be checked in the translate stage. */
5534                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5535                                                             last_item,
5536                                                             ether_type,
5537                                                             &nic_ecpri_mask,
5538                                                             error);
5539                         if (ret < 0)
5540                                 return ret;
5541                         last_item = MLX5_FLOW_LAYER_ECPRI;
5542                         break;
5543                 default:
5544                         return rte_flow_error_set(error, ENOTSUP,
5545                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5546                                                   NULL, "item not supported");
5547                 }
5548                 item_flags |= last_item;
5549         }
5550         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5551                 int type = actions->type;
5552
5553                 if (!mlx5_flow_os_action_supported(type))
5554                         return rte_flow_error_set(error, ENOTSUP,
5555                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5556                                                   actions,
5557                                                   "action not supported");
5558                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5559                         return rte_flow_error_set(error, ENOTSUP,
5560                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5561                                                   actions, "too many actions");
5562                 switch (type) {
5563                 case RTE_FLOW_ACTION_TYPE_VOID:
5564                         break;
5565                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5566                         ret = flow_dv_validate_action_port_id(dev,
5567                                                               action_flags,
5568                                                               actions,
5569                                                               attr,
5570                                                               error);
5571                         if (ret)
5572                                 return ret;
5573                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5574                         ++actions_n;
5575                         break;
5576                 case RTE_FLOW_ACTION_TYPE_FLAG:
5577                         ret = flow_dv_validate_action_flag(dev, action_flags,
5578                                                            attr, error);
5579                         if (ret < 0)
5580                                 return ret;
5581                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5582                                 /* Count all modify-header actions as one. */
5583                                 if (!(action_flags &
5584                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5585                                         ++actions_n;
5586                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5587                                                 MLX5_FLOW_ACTION_MARK_EXT;
5588                         } else {
5589                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5590                                 ++actions_n;
5591                         }
5592                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5593                         break;
5594                 case RTE_FLOW_ACTION_TYPE_MARK:
5595                         ret = flow_dv_validate_action_mark(dev, actions,
5596                                                            action_flags,
5597                                                            attr, error);
5598                         if (ret < 0)
5599                                 return ret;
5600                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5601                                 /* Count all modify-header actions as one. */
5602                                 if (!(action_flags &
5603                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5604                                         ++actions_n;
5605                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5606                                                 MLX5_FLOW_ACTION_MARK_EXT;
5607                         } else {
5608                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5609                                 ++actions_n;
5610                         }
5611                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5612                         break;
5613                 case RTE_FLOW_ACTION_TYPE_SET_META:
5614                         ret = flow_dv_validate_action_set_meta(dev, actions,
5615                                                                action_flags,
5616                                                                attr, error);
5617                         if (ret < 0)
5618                                 return ret;
5619                         /* Count all modify-header actions as one action. */
5620                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5621                                 ++actions_n;
5622                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5623                         rw_act_num += MLX5_ACT_NUM_SET_META;
5624                         break;
5625                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5626                         ret = flow_dv_validate_action_set_tag(dev, actions,
5627                                                               action_flags,
5628                                                               attr, error);
5629                         if (ret < 0)
5630                                 return ret;
5631                         /* Count all modify-header actions as one action. */
5632                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5633                                 ++actions_n;
5634                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5635                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5636                         break;
5637                 case RTE_FLOW_ACTION_TYPE_DROP:
5638                         ret = mlx5_flow_validate_action_drop(action_flags,
5639                                                              attr, error);
5640                         if (ret < 0)
5641                                 return ret;
5642                         action_flags |= MLX5_FLOW_ACTION_DROP;
5643                         ++actions_n;
5644                         break;
5645                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5646                         ret = mlx5_flow_validate_action_queue(actions,
5647                                                               action_flags, dev,
5648                                                               attr, error);
5649                         if (ret < 0)
5650                                 return ret;
5651                         queue_index = ((const struct rte_flow_action_queue *)
5652                                                         (actions->conf))->index;
5653                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5654                         ++actions_n;
5655                         break;
5656                 case RTE_FLOW_ACTION_TYPE_RSS:
5657                         rss = actions->conf;
5658                         ret = mlx5_flow_validate_action_rss(actions,
5659                                                             action_flags, dev,
5660                                                             attr, item_flags,
5661                                                             error);
5662                         if (ret < 0)
5663                                 return ret;
5664                         if (rss != NULL && rss->queue_num)
5665                                 queue_index = rss->queue[0];
5666                         action_flags |= MLX5_FLOW_ACTION_RSS;
5667                         ++actions_n;
5668                         break;
5669                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5670                         ret =
5671                         mlx5_flow_validate_action_default_miss(action_flags,
5672                                         attr, error);
5673                         if (ret < 0)
5674                                 return ret;
5675                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5676                         ++actions_n;
5677                         break;
5678                 case RTE_FLOW_ACTION_TYPE_COUNT:
5679                         ret = flow_dv_validate_action_count(dev, error);
5680                         if (ret < 0)
5681                                 return ret;
5682                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5683                         ++actions_n;
5684                         break;
5685                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5686                         if (flow_dv_validate_action_pop_vlan(dev,
5687                                                              action_flags,
5688                                                              actions,
5689                                                              item_flags, attr,
5690                                                              error))
5691                                 return -rte_errno;
5692                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5693                         ++actions_n;
5694                         break;
5695                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5696                         ret = flow_dv_validate_action_push_vlan(dev,
5697                                                                 action_flags,
5698                                                                 vlan_m,
5699                                                                 actions, attr,
5700                                                                 error);
5701                         if (ret < 0)
5702                                 return ret;
5703                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5704                         ++actions_n;
5705                         break;
5706                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5707                         ret = flow_dv_validate_action_set_vlan_pcp
5708                                                 (action_flags, actions, error);
5709                         if (ret < 0)
5710                                 return ret;
5711                         /* Count PCP with push_vlan command. */
5712                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5713                         break;
5714                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5715                         ret = flow_dv_validate_action_set_vlan_vid
5716                                                 (item_flags, action_flags,
5717                                                  actions, error);
5718                         if (ret < 0)
5719                                 return ret;
5720                         /* Count VID with push_vlan command. */
5721                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5722                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5723                         break;
5724                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5725                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5726                         ret = flow_dv_validate_action_l2_encap(dev,
5727                                                                action_flags,
5728                                                                actions, attr,
5729                                                                error);
5730                         if (ret < 0)
5731                                 return ret;
5732                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5733                         ++actions_n;
5734                         break;
5735                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5736                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5737                         ret = flow_dv_validate_action_decap(dev, action_flags,
5738                                                             attr, error);
5739                         if (ret < 0)
5740                                 return ret;
5741                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5742                         ++actions_n;
5743                         break;
5744                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5745                         ret = flow_dv_validate_action_raw_encap_decap
5746                                 (dev, NULL, actions->conf, attr, &action_flags,
5747                                  &actions_n, error);
5748                         if (ret < 0)
5749                                 return ret;
5750                         break;
5751                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5752                         decap = actions->conf;
5753                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5754                                 ;
5755                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5756                                 encap = NULL;
5757                                 actions--;
5758                         } else {
5759                                 encap = actions->conf;
5760                         }
5761                         ret = flow_dv_validate_action_raw_encap_decap
5762                                            (dev,
5763                                             decap ? decap : &empty_decap, encap,
5764                                             attr, &action_flags, &actions_n,
5765                                             error);
5766                         if (ret < 0)
5767                                 return ret;
5768                         break;
5769                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5770                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5771                         ret = flow_dv_validate_action_modify_mac(action_flags,
5772                                                                  actions,
5773                                                                  item_flags,
5774                                                                  error);
5775                         if (ret < 0)
5776                                 return ret;
5777                         /* Count all modify-header actions as one action. */
5778                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5779                                 ++actions_n;
5780                         action_flags |= actions->type ==
5781                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5782                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5783                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5784                         /*
5785                          * Even if the source and destination MAC addresses have
5786                          * overlap in the header with 4B alignment, the convert
5787                          * function will handle them separately and 4 SW actions
5788                          * will be created. And 2 actions will be added each
5789                          * time no matter how many bytes of address will be set.
5790                          */
5791                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5792                         break;
5793                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5794                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5795                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5796                                                                   actions,
5797                                                                   item_flags,
5798                                                                   error);
5799                         if (ret < 0)
5800                                 return ret;
5801                         /* Count all modify-header actions as one action. */
5802                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5803                                 ++actions_n;
5804                         action_flags |= actions->type ==
5805                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5806                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5807                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5808                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5809                         break;
5810                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5811                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5812                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5813                                                                   actions,
5814                                                                   item_flags,
5815                                                                   error);
5816                         if (ret < 0)
5817                                 return ret;
5818                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5819                                 return rte_flow_error_set(error, ENOTSUP,
5820                                         RTE_FLOW_ERROR_TYPE_ACTION,
5821                                         actions,
5822                                         "Can't change header "
5823                                         "with ICMPv6 proto");
5824                         /* Count all modify-header actions as one action. */
5825                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5826                                 ++actions_n;
5827                         action_flags |= actions->type ==
5828                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5829                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5830                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5831                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5832                         break;
5833                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5834                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5835                         ret = flow_dv_validate_action_modify_tp(action_flags,
5836                                                                 actions,
5837                                                                 item_flags,
5838                                                                 error);
5839                         if (ret < 0)
5840                                 return ret;
5841                         /* Count all modify-header actions as one action. */
5842                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5843                                 ++actions_n;
5844                         action_flags |= actions->type ==
5845                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5846                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5847                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5848                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5849                         break;
5850                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5851                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5852                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5853                                                                  actions,
5854                                                                  item_flags,
5855                                                                  error);
5856                         if (ret < 0)
5857                                 return ret;
5858                         /* Count all modify-header actions as one action. */
5859                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5860                                 ++actions_n;
5861                         action_flags |= actions->type ==
5862                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5863                                                 MLX5_FLOW_ACTION_SET_TTL :
5864                                                 MLX5_FLOW_ACTION_DEC_TTL;
5865                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5866                         break;
5867                 case RTE_FLOW_ACTION_TYPE_JUMP:
5868                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5869                                                            action_flags,
5870                                                            attr, external,
5871                                                            error);
5872                         if (ret)
5873                                 return ret;
5874                         ++actions_n;
5875                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5876                         break;
5877                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5878                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5879                         ret = flow_dv_validate_action_modify_tcp_seq
5880                                                                 (action_flags,
5881                                                                  actions,
5882                                                                  item_flags,
5883                                                                  error);
5884                         if (ret < 0)
5885                                 return ret;
5886                         /* Count all modify-header actions as one action. */
5887                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5888                                 ++actions_n;
5889                         action_flags |= actions->type ==
5890                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5891                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5892                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5893                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5894                         break;
5895                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5896                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5897                         ret = flow_dv_validate_action_modify_tcp_ack
5898                                                                 (action_flags,
5899                                                                  actions,
5900                                                                  item_flags,
5901                                                                  error);
5902                         if (ret < 0)
5903                                 return ret;
5904                         /* Count all modify-header actions as one action. */
5905                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5906                                 ++actions_n;
5907                         action_flags |= actions->type ==
5908                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5909                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5910                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5911                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5912                         break;
5913                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5914                         break;
5915                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5916                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5917                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5918                         break;
5919                 case RTE_FLOW_ACTION_TYPE_METER:
5920                         ret = mlx5_flow_validate_action_meter(dev,
5921                                                               action_flags,
5922                                                               actions, attr,
5923                                                               error);
5924                         if (ret < 0)
5925                                 return ret;
5926                         action_flags |= MLX5_FLOW_ACTION_METER;
5927                         ++actions_n;
5928                         /* Meter action will add one more TAG action. */
5929                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5930                         break;
5931                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
5932                         if (!attr->group)
5933                                 return rte_flow_error_set(error, ENOTSUP,
5934                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5935                                                                            NULL,
5936                           "Shared ASO age action is not supported for group 0");
5937                         action_flags |= MLX5_FLOW_ACTION_AGE;
5938                         ++actions_n;
5939                         break;
5940                 case RTE_FLOW_ACTION_TYPE_AGE:
5941                         ret = flow_dv_validate_action_age(action_flags,
5942                                                           actions, dev,
5943                                                           error);
5944                         if (ret < 0)
5945                                 return ret;
5946                         action_flags |= MLX5_FLOW_ACTION_AGE;
5947                         ++actions_n;
5948                         break;
5949                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5950                         ret = flow_dv_validate_action_modify_ipv4_dscp
5951                                                          (action_flags,
5952                                                           actions,
5953                                                           item_flags,
5954                                                           error);
5955                         if (ret < 0)
5956                                 return ret;
5957                         /* Count all modify-header actions as one action. */
5958                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5959                                 ++actions_n;
5960                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5961                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5962                         break;
5963                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5964                         ret = flow_dv_validate_action_modify_ipv6_dscp
5965                                                                 (action_flags,
5966                                                                  actions,
5967                                                                  item_flags,
5968                                                                  error);
5969                         if (ret < 0)
5970                                 return ret;
5971                         /* Count all modify-header actions as one action. */
5972                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5973                                 ++actions_n;
5974                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5975                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5976                         break;
5977                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
5978                         ret = flow_dv_validate_action_sample(action_flags,
5979                                                              actions, dev,
5980                                                              attr, error);
5981                         if (ret < 0)
5982                                 return ret;
5983                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
5984                         ++actions_n;
5985                         break;
5986                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
5987                         if (actions[0].type != (typeof(actions[0].type))
5988                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
5989                                 return rte_flow_error_set
5990                                                 (error, EINVAL,
5991                                                 RTE_FLOW_ERROR_TYPE_ACTION,
5992                                                 NULL, "MLX5 private action "
5993                                                 "must be the first");
5994
5995                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5996                         break;
5997                 default:
5998                         return rte_flow_error_set(error, ENOTSUP,
5999                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6000                                                   actions,
6001                                                   "action not supported");
6002                 }
6003         }
6004         /*
6005          * Validate actions in flow rules
6006          * - Explicit decap action is prohibited by the tunnel offload API.
6007          * - Drop action in tunnel steer rule is prohibited by the API.
6008          * - Application cannot use MARK action because it's value can mask
6009          *   tunnel default miss nitification.
6010          * - JUMP in tunnel match rule has no support in current PMD
6011          *   implementation.
6012          * - TAG & META are reserved for future uses.
6013          */
6014         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6015                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6016                                             MLX5_FLOW_ACTION_MARK     |
6017                                             MLX5_FLOW_ACTION_SET_TAG  |
6018                                             MLX5_FLOW_ACTION_SET_META |
6019                                             MLX5_FLOW_ACTION_DROP;
6020
6021                 if (action_flags & bad_actions_mask)
6022                         return rte_flow_error_set
6023                                         (error, EINVAL,
6024                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6025                                         "Invalid RTE action in tunnel "
6026                                         "set decap rule");
6027                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6028                         return rte_flow_error_set
6029                                         (error, EINVAL,
6030                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6031                                         "tunnel set decap rule must terminate "
6032                                         "with JUMP");
6033                 if (!attr->ingress)
6034                         return rte_flow_error_set
6035                                         (error, EINVAL,
6036                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6037                                         "tunnel flows for ingress traffic only");
6038         }
6039         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6040                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6041                                             MLX5_FLOW_ACTION_MARK    |
6042                                             MLX5_FLOW_ACTION_SET_TAG |
6043                                             MLX5_FLOW_ACTION_SET_META;
6044
6045                 if (action_flags & bad_actions_mask)
6046                         return rte_flow_error_set
6047                                         (error, EINVAL,
6048                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6049                                         "Invalid RTE action in tunnel "
6050                                         "set match rule");
6051         }
6052         /*
6053          * Validate the drop action mutual exclusion with other actions.
6054          * Drop action is mutually-exclusive with any other action, except for
6055          * Count action.
6056          */
6057         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6058             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6059                 return rte_flow_error_set(error, EINVAL,
6060                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6061                                           "Drop action is mutually-exclusive "
6062                                           "with any other action, except for "
6063                                           "Count action");
6064         /* Eswitch has few restrictions on using items and actions */
6065         if (attr->transfer) {
6066                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6067                     action_flags & MLX5_FLOW_ACTION_FLAG)
6068                         return rte_flow_error_set(error, ENOTSUP,
6069                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6070                                                   NULL,
6071                                                   "unsupported action FLAG");
6072                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6073                     action_flags & MLX5_FLOW_ACTION_MARK)
6074                         return rte_flow_error_set(error, ENOTSUP,
6075                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6076                                                   NULL,
6077                                                   "unsupported action MARK");
6078                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6079                         return rte_flow_error_set(error, ENOTSUP,
6080                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6081                                                   NULL,
6082                                                   "unsupported action QUEUE");
6083                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6084                         return rte_flow_error_set(error, ENOTSUP,
6085                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6086                                                   NULL,
6087                                                   "unsupported action RSS");
6088                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6089                         return rte_flow_error_set(error, EINVAL,
6090                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6091                                                   actions,
6092                                                   "no fate action is found");
6093         } else {
6094                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6095                         return rte_flow_error_set(error, EINVAL,
6096                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6097                                                   actions,
6098                                                   "no fate action is found");
6099         }
6100         /*
6101          * Continue validation for Xcap and VLAN actions.
6102          * If hairpin is working in explicit TX rule mode, there is no actions
6103          * splitting and the validation of hairpin ingress flow should be the
6104          * same as other standard flows.
6105          */
6106         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6107                              MLX5_FLOW_VLAN_ACTIONS)) &&
6108             (queue_index == 0xFFFF ||
6109              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6110              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6111              conf->tx_explicit != 0))) {
6112                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6113                     MLX5_FLOW_XCAP_ACTIONS)
6114                         return rte_flow_error_set(error, ENOTSUP,
6115                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6116                                                   NULL, "encap and decap "
6117                                                   "combination aren't supported");
6118                 if (!attr->transfer && attr->ingress) {
6119                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6120                                 return rte_flow_error_set
6121                                                 (error, ENOTSUP,
6122                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6123                                                  NULL, "encap is not supported"
6124                                                  " for ingress traffic");
6125                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6126                                 return rte_flow_error_set
6127                                                 (error, ENOTSUP,
6128                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6129                                                  NULL, "push VLAN action not "
6130                                                  "supported for ingress");
6131                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6132                                         MLX5_FLOW_VLAN_ACTIONS)
6133                                 return rte_flow_error_set
6134                                                 (error, ENOTSUP,
6135                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6136                                                  NULL, "no support for "
6137                                                  "multiple VLAN actions");
6138                 }
6139         }
6140         /*
6141          * Hairpin flow will add one more TAG action in TX implicit mode.
6142          * In TX explicit mode, there will be no hairpin flow ID.
6143          */
6144         if (hairpin > 0)
6145                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6146         /* extra metadata enabled: one more TAG action will be add. */
6147         if (dev_conf->dv_flow_en &&
6148             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6149             mlx5_flow_ext_mreg_supported(dev))
6150                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6151         if ((uint32_t)rw_act_num >
6152                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6153                 return rte_flow_error_set(error, ENOTSUP,
6154                                           RTE_FLOW_ERROR_TYPE_ACTION,
6155                                           NULL, "too many header modify"
6156                                           " actions to support");
6157         }
6158         return 0;
6159 }
6160
6161 /**
6162  * Internal preparation function. Allocates the DV flow size,
6163  * this size is constant.
6164  *
6165  * @param[in] dev
6166  *   Pointer to the rte_eth_dev structure.
6167  * @param[in] attr
6168  *   Pointer to the flow attributes.
6169  * @param[in] items
6170  *   Pointer to the list of items.
6171  * @param[in] actions
6172  *   Pointer to the list of actions.
6173  * @param[out] error
6174  *   Pointer to the error structure.
6175  *
6176  * @return
6177  *   Pointer to mlx5_flow object on success,
6178  *   otherwise NULL and rte_errno is set.
6179  */
6180 static struct mlx5_flow *
6181 flow_dv_prepare(struct rte_eth_dev *dev,
6182                 const struct rte_flow_attr *attr __rte_unused,
6183                 const struct rte_flow_item items[] __rte_unused,
6184                 const struct rte_flow_action actions[] __rte_unused,
6185                 struct rte_flow_error *error)
6186 {
6187         uint32_t handle_idx = 0;
6188         struct mlx5_flow *dev_flow;
6189         struct mlx5_flow_handle *dev_handle;
6190         struct mlx5_priv *priv = dev->data->dev_private;
6191         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6192
6193         MLX5_ASSERT(wks);
6194         /* In case of corrupting the memory. */
6195         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6196                 rte_flow_error_set(error, ENOSPC,
6197                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6198                                    "not free temporary device flow");
6199                 return NULL;
6200         }
6201         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6202                                    &handle_idx);
6203         if (!dev_handle) {
6204                 rte_flow_error_set(error, ENOMEM,
6205                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6206                                    "not enough memory to create flow handle");
6207                 return NULL;
6208         }
6209         MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows));
6210         dev_flow = &wks->flows[wks->flow_idx++];
6211         dev_flow->handle = dev_handle;
6212         dev_flow->handle_idx = handle_idx;
6213         /*
6214          * In some old rdma-core releases, before continuing, a check of the
6215          * length of matching parameter will be done at first. It needs to use
6216          * the length without misc4 param. If the flow has misc4 support, then
6217          * the length needs to be adjusted accordingly. Each param member is
6218          * aligned with a 64B boundary naturally.
6219          */
6220         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6221                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6222         /*
6223          * The matching value needs to be cleared to 0 before using. In the
6224          * past, it will be automatically cleared when using rte_*alloc
6225          * API. The time consumption will be almost the same as before.
6226          */
6227         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
6228         dev_flow->ingress = attr->ingress;
6229         dev_flow->dv.transfer = attr->transfer;
6230         return dev_flow;
6231 }
6232
6233 #ifdef RTE_LIBRTE_MLX5_DEBUG
6234 /**
6235  * Sanity check for match mask and value. Similar to check_valid_spec() in
6236  * kernel driver. If unmasked bit is present in value, it returns failure.
6237  *
6238  * @param match_mask
6239  *   pointer to match mask buffer.
6240  * @param match_value
6241  *   pointer to match value buffer.
6242  *
6243  * @return
6244  *   0 if valid, -EINVAL otherwise.
6245  */
6246 static int
6247 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6248 {
6249         uint8_t *m = match_mask;
6250         uint8_t *v = match_value;
6251         unsigned int i;
6252
6253         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6254                 if (v[i] & ~m[i]) {
6255                         DRV_LOG(ERR,
6256                                 "match_value differs from match_criteria"
6257                                 " %p[%u] != %p[%u]",
6258                                 match_value, i, match_mask, i);
6259                         return -EINVAL;
6260                 }
6261         }
6262         return 0;
6263 }
6264 #endif
6265
6266 /**
6267  * Add match of ip_version.
6268  *
6269  * @param[in] group
6270  *   Flow group.
6271  * @param[in] headers_v
6272  *   Values header pointer.
6273  * @param[in] headers_m
6274  *   Masks header pointer.
6275  * @param[in] ip_version
6276  *   The IP version to set.
6277  */
6278 static inline void
6279 flow_dv_set_match_ip_version(uint32_t group,
6280                              void *headers_v,
6281                              void *headers_m,
6282                              uint8_t ip_version)
6283 {
6284         if (group == 0)
6285                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6286         else
6287                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6288                          ip_version);
6289         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6290         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6291         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6292 }
6293
6294 /**
6295  * Add Ethernet item to matcher and to the value.
6296  *
6297  * @param[in, out] matcher
6298  *   Flow matcher.
6299  * @param[in, out] key
6300  *   Flow matcher value.
6301  * @param[in] item
6302  *   Flow pattern to translate.
6303  * @param[in] inner
6304  *   Item is inner pattern.
6305  */
6306 static void
6307 flow_dv_translate_item_eth(void *matcher, void *key,
6308                            const struct rte_flow_item *item, int inner,
6309                            uint32_t group)
6310 {
6311         const struct rte_flow_item_eth *eth_m = item->mask;
6312         const struct rte_flow_item_eth *eth_v = item->spec;
6313         const struct rte_flow_item_eth nic_mask = {
6314                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6315                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6316                 .type = RTE_BE16(0xffff),
6317                 .has_vlan = 0,
6318         };
6319         void *hdrs_m;
6320         void *hdrs_v;
6321         char *l24_v;
6322         unsigned int i;
6323
6324         if (!eth_v)
6325                 return;
6326         if (!eth_m)
6327                 eth_m = &nic_mask;
6328         if (inner) {
6329                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6330                                          inner_headers);
6331                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6332         } else {
6333                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6334                                          outer_headers);
6335                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6336         }
6337         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6338                &eth_m->dst, sizeof(eth_m->dst));
6339         /* The value must be in the range of the mask. */
6340         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6341         for (i = 0; i < sizeof(eth_m->dst); ++i)
6342                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6343         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6344                &eth_m->src, sizeof(eth_m->src));
6345         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6346         /* The value must be in the range of the mask. */
6347         for (i = 0; i < sizeof(eth_m->dst); ++i)
6348                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6349         /*
6350          * HW supports match on one Ethertype, the Ethertype following the last
6351          * VLAN tag of the packet (see PRM).
6352          * Set match on ethertype only if ETH header is not followed by VLAN.
6353          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6354          * ethertype, and use ip_version field instead.
6355          * eCPRI over Ether layer will use type value 0xAEFE.
6356          */
6357         if (eth_m->type == 0xFFFF) {
6358                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6359                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6360                 switch (eth_v->type) {
6361                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6362                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6363                         return;
6364                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6365                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6366                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6367                         return;
6368                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6369                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6370                         return;
6371                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6372                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6373                         return;
6374                 default:
6375                         break;
6376                 }
6377         }
6378         if (eth_m->has_vlan) {
6379                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6380                 if (eth_v->has_vlan) {
6381                         /*
6382                          * Here, when also has_more_vlan field in VLAN item is
6383                          * not set, only single-tagged packets will be matched.
6384                          */
6385                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6386                         return;
6387                 }
6388         }
6389         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6390                  rte_be_to_cpu_16(eth_m->type));
6391         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6392         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6393 }
6394
6395 /**
6396  * Add VLAN item to matcher and to the value.
6397  *
6398  * @param[in, out] dev_flow
6399  *   Flow descriptor.
6400  * @param[in, out] matcher
6401  *   Flow matcher.
6402  * @param[in, out] key
6403  *   Flow matcher value.
6404  * @param[in] item
6405  *   Flow pattern to translate.
6406  * @param[in] inner
6407  *   Item is inner pattern.
6408  */
6409 static void
6410 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6411                             void *matcher, void *key,
6412                             const struct rte_flow_item *item,
6413                             int inner, uint32_t group)
6414 {
6415         const struct rte_flow_item_vlan *vlan_m = item->mask;
6416         const struct rte_flow_item_vlan *vlan_v = item->spec;
6417         void *hdrs_m;
6418         void *hdrs_v;
6419         uint16_t tci_m;
6420         uint16_t tci_v;
6421
6422         if (inner) {
6423                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6424                                          inner_headers);
6425                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6426         } else {
6427                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6428                                          outer_headers);
6429                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6430                 /*
6431                  * This is workaround, masks are not supported,
6432                  * and pre-validated.
6433                  */
6434                 if (vlan_v)
6435                         dev_flow->handle->vf_vlan.tag =
6436                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6437         }
6438         /*
6439          * When VLAN item exists in flow, mark packet as tagged,
6440          * even if TCI is not specified.
6441          */
6442         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6443                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6444                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6445         }
6446         if (!vlan_v)
6447                 return;
6448         if (!vlan_m)
6449                 vlan_m = &rte_flow_item_vlan_mask;
6450         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6451         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6452         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6453         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6454         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6455         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6456         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6457         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6458         /*
6459          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6460          * ethertype, and use ip_version field instead.
6461          */
6462         if (vlan_m->inner_type == 0xFFFF) {
6463                 switch (vlan_v->inner_type) {
6464                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6465                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6466                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6467                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6468                         return;
6469                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6470                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6471                         return;
6472                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6473                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6474                         return;
6475                 default:
6476                         break;
6477                 }
6478         }
6479         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6480                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6481                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6482                 /* Only one vlan_tag bit can be set. */
6483                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6484                 return;
6485         }
6486         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6487                  rte_be_to_cpu_16(vlan_m->inner_type));
6488         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6489                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6490 }
6491
6492 /**
6493  * Add IPV4 item to matcher and to the value.
6494  *
6495  * @param[in, out] matcher
6496  *   Flow matcher.
6497  * @param[in, out] key
6498  *   Flow matcher value.
6499  * @param[in] item
6500  *   Flow pattern to translate.
6501  * @param[in] inner
6502  *   Item is inner pattern.
6503  * @param[in] group
6504  *   The group to insert the rule.
6505  */
6506 static void
6507 flow_dv_translate_item_ipv4(void *matcher, void *key,
6508                             const struct rte_flow_item *item,
6509                             int inner, uint32_t group)
6510 {
6511         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6512         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6513         const struct rte_flow_item_ipv4 nic_mask = {
6514                 .hdr = {
6515                         .src_addr = RTE_BE32(0xffffffff),
6516                         .dst_addr = RTE_BE32(0xffffffff),
6517                         .type_of_service = 0xff,
6518                         .next_proto_id = 0xff,
6519                         .time_to_live = 0xff,
6520                 },
6521         };
6522         void *headers_m;
6523         void *headers_v;
6524         char *l24_m;
6525         char *l24_v;
6526         uint8_t tos;
6527
6528         if (inner) {
6529                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6530                                          inner_headers);
6531                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6532         } else {
6533                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6534                                          outer_headers);
6535                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6536         }
6537         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6538         if (!ipv4_v)
6539                 return;
6540         if (!ipv4_m)
6541                 ipv4_m = &nic_mask;
6542         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6543                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6544         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6545                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6546         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6547         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6548         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6549                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6550         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6551                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6552         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6553         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6554         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6555         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6556                  ipv4_m->hdr.type_of_service);
6557         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6558         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6559                  ipv4_m->hdr.type_of_service >> 2);
6560         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6561         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6562                  ipv4_m->hdr.next_proto_id);
6563         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6564                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6565         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6566                  ipv4_m->hdr.time_to_live);
6567         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6568                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6569         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6570                  !!(ipv4_m->hdr.fragment_offset));
6571         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6572                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6573 }
6574
6575 /**
6576  * Add IPV6 item to matcher and to the value.
6577  *
6578  * @param[in, out] matcher
6579  *   Flow matcher.
6580  * @param[in, out] key
6581  *   Flow matcher value.
6582  * @param[in] item
6583  *   Flow pattern to translate.
6584  * @param[in] inner
6585  *   Item is inner pattern.
6586  * @param[in] group
6587  *   The group to insert the rule.
6588  */
6589 static void
6590 flow_dv_translate_item_ipv6(void *matcher, void *key,
6591                             const struct rte_flow_item *item,
6592                             int inner, uint32_t group)
6593 {
6594         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6595         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6596         const struct rte_flow_item_ipv6 nic_mask = {
6597                 .hdr = {
6598                         .src_addr =
6599                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6600                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6601                         .dst_addr =
6602                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6603                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6604                         .vtc_flow = RTE_BE32(0xffffffff),
6605                         .proto = 0xff,
6606                         .hop_limits = 0xff,
6607                 },
6608         };
6609         void *headers_m;
6610         void *headers_v;
6611         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6612         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6613         char *l24_m;
6614         char *l24_v;
6615         uint32_t vtc_m;
6616         uint32_t vtc_v;
6617         int i;
6618         int size;
6619
6620         if (inner) {
6621                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6622                                          inner_headers);
6623                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6624         } else {
6625                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6626                                          outer_headers);
6627                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6628         }
6629         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6630         if (!ipv6_v)
6631                 return;
6632         if (!ipv6_m)
6633                 ipv6_m = &nic_mask;
6634         size = sizeof(ipv6_m->hdr.dst_addr);
6635         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6636                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6637         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6638                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6639         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6640         for (i = 0; i < size; ++i)
6641                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6642         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6643                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6644         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6645                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6646         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6647         for (i = 0; i < size; ++i)
6648                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6649         /* TOS. */
6650         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6651         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6652         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6653         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6654         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6655         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6656         /* Label. */
6657         if (inner) {
6658                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6659                          vtc_m);
6660                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6661                          vtc_v);
6662         } else {
6663                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6664                          vtc_m);
6665                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6666                          vtc_v);
6667         }
6668         /* Protocol. */
6669         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6670                  ipv6_m->hdr.proto);
6671         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6672                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6673         /* Hop limit. */
6674         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6675                  ipv6_m->hdr.hop_limits);
6676         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6677                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6678         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6679                  !!(ipv6_m->has_frag_ext));
6680         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6681                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6682 }
6683
6684 /**
6685  * Add IPV6 fragment extension item to matcher and to the value.
6686  *
6687  * @param[in, out] matcher
6688  *   Flow matcher.
6689  * @param[in, out] key
6690  *   Flow matcher value.
6691  * @param[in] item
6692  *   Flow pattern to translate.
6693  * @param[in] inner
6694  *   Item is inner pattern.
6695  */
6696 static void
6697 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6698                                      const struct rte_flow_item *item,
6699                                      int inner)
6700 {
6701         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6702         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6703         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6704                 .hdr = {
6705                         .next_header = 0xff,
6706                         .frag_data = RTE_BE16(0xffff),
6707                 },
6708         };
6709         void *headers_m;
6710         void *headers_v;
6711
6712         if (inner) {
6713                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6714                                          inner_headers);
6715                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6716         } else {
6717                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6718                                          outer_headers);
6719                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6720         }
6721         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6722         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6723         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6724         if (!ipv6_frag_ext_v)
6725                 return;
6726         if (!ipv6_frag_ext_m)
6727                 ipv6_frag_ext_m = &nic_mask;
6728         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6729                  ipv6_frag_ext_m->hdr.next_header);
6730         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6731                  ipv6_frag_ext_v->hdr.next_header &
6732                  ipv6_frag_ext_m->hdr.next_header);
6733 }
6734
6735 /**
6736  * Add TCP item to matcher and to the value.
6737  *
6738  * @param[in, out] matcher
6739  *   Flow matcher.
6740  * @param[in, out] key
6741  *   Flow matcher value.
6742  * @param[in] item
6743  *   Flow pattern to translate.
6744  * @param[in] inner
6745  *   Item is inner pattern.
6746  */
6747 static void
6748 flow_dv_translate_item_tcp(void *matcher, void *key,
6749                            const struct rte_flow_item *item,
6750                            int inner)
6751 {
6752         const struct rte_flow_item_tcp *tcp_m = item->mask;
6753         const struct rte_flow_item_tcp *tcp_v = item->spec;
6754         void *headers_m;
6755         void *headers_v;
6756
6757         if (inner) {
6758                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6759                                          inner_headers);
6760                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6761         } else {
6762                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6763                                          outer_headers);
6764                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6765         }
6766         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6767         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6768         if (!tcp_v)
6769                 return;
6770         if (!tcp_m)
6771                 tcp_m = &rte_flow_item_tcp_mask;
6772         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6773                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6774         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6775                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6776         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6777                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6778         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6779                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6780         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6781                  tcp_m->hdr.tcp_flags);
6782         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6783                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6784 }
6785
6786 /**
6787  * Add UDP item to matcher and to the value.
6788  *
6789  * @param[in, out] matcher
6790  *   Flow matcher.
6791  * @param[in, out] key
6792  *   Flow matcher value.
6793  * @param[in] item
6794  *   Flow pattern to translate.
6795  * @param[in] inner
6796  *   Item is inner pattern.
6797  */
6798 static void
6799 flow_dv_translate_item_udp(void *matcher, void *key,
6800                            const struct rte_flow_item *item,
6801                            int inner)
6802 {
6803         const struct rte_flow_item_udp *udp_m = item->mask;
6804         const struct rte_flow_item_udp *udp_v = item->spec;
6805         void *headers_m;
6806         void *headers_v;
6807
6808         if (inner) {
6809                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6810                                          inner_headers);
6811                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6812         } else {
6813                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6814                                          outer_headers);
6815                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6816         }
6817         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6818         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6819         if (!udp_v)
6820                 return;
6821         if (!udp_m)
6822                 udp_m = &rte_flow_item_udp_mask;
6823         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6824                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6825         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6826                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6827         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6828                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6829         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6830                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6831 }
6832
6833 /**
6834  * Add GRE optional Key item to matcher and to the value.
6835  *
6836  * @param[in, out] matcher
6837  *   Flow matcher.
6838  * @param[in, out] key
6839  *   Flow matcher value.
6840  * @param[in] item
6841  *   Flow pattern to translate.
6842  * @param[in] inner
6843  *   Item is inner pattern.
6844  */
6845 static void
6846 flow_dv_translate_item_gre_key(void *matcher, void *key,
6847                                    const struct rte_flow_item *item)
6848 {
6849         const rte_be32_t *key_m = item->mask;
6850         const rte_be32_t *key_v = item->spec;
6851         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6852         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6853         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6854
6855         /* GRE K bit must be on and should already be validated */
6856         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6857         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6858         if (!key_v)
6859                 return;
6860         if (!key_m)
6861                 key_m = &gre_key_default_mask;
6862         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6863                  rte_be_to_cpu_32(*key_m) >> 8);
6864         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6865                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6866         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6867                  rte_be_to_cpu_32(*key_m) & 0xFF);
6868         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6869                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6870 }
6871
6872 /**
6873  * Add GRE item to matcher and to the value.
6874  *
6875  * @param[in, out] matcher
6876  *   Flow matcher.
6877  * @param[in, out] key
6878  *   Flow matcher value.
6879  * @param[in] item
6880  *   Flow pattern to translate.
6881  * @param[in] inner
6882  *   Item is inner pattern.
6883  */
6884 static void
6885 flow_dv_translate_item_gre(void *matcher, void *key,
6886                            const struct rte_flow_item *item,
6887                            int inner)
6888 {
6889         const struct rte_flow_item_gre *gre_m = item->mask;
6890         const struct rte_flow_item_gre *gre_v = item->spec;
6891         void *headers_m;
6892         void *headers_v;
6893         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6894         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6895         struct {
6896                 union {
6897                         __extension__
6898                         struct {
6899                                 uint16_t version:3;
6900                                 uint16_t rsvd0:9;
6901                                 uint16_t s_present:1;
6902                                 uint16_t k_present:1;
6903                                 uint16_t rsvd_bit1:1;
6904                                 uint16_t c_present:1;
6905                         };
6906                         uint16_t value;
6907                 };
6908         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6909
6910         if (inner) {
6911                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6912                                          inner_headers);
6913                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6914         } else {
6915                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6916                                          outer_headers);
6917                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6918         }
6919         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6920         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6921         if (!gre_v)
6922                 return;
6923         if (!gre_m)
6924                 gre_m = &rte_flow_item_gre_mask;
6925         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6926                  rte_be_to_cpu_16(gre_m->protocol));
6927         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6928                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6929         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6930         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6931         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6932                  gre_crks_rsvd0_ver_m.c_present);
6933         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6934                  gre_crks_rsvd0_ver_v.c_present &
6935                  gre_crks_rsvd0_ver_m.c_present);
6936         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6937                  gre_crks_rsvd0_ver_m.k_present);
6938         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6939                  gre_crks_rsvd0_ver_v.k_present &
6940                  gre_crks_rsvd0_ver_m.k_present);
6941         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6942                  gre_crks_rsvd0_ver_m.s_present);
6943         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6944                  gre_crks_rsvd0_ver_v.s_present &
6945                  gre_crks_rsvd0_ver_m.s_present);
6946 }
6947
6948 /**
6949  * Add NVGRE item to matcher and to the value.
6950  *
6951  * @param[in, out] matcher
6952  *   Flow matcher.
6953  * @param[in, out] key
6954  *   Flow matcher value.
6955  * @param[in] item
6956  *   Flow pattern to translate.
6957  * @param[in] inner
6958  *   Item is inner pattern.
6959  */
6960 static void
6961 flow_dv_translate_item_nvgre(void *matcher, void *key,
6962                              const struct rte_flow_item *item,
6963                              int inner)
6964 {
6965         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6966         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6967         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6968         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6969         const char *tni_flow_id_m;
6970         const char *tni_flow_id_v;
6971         char *gre_key_m;
6972         char *gre_key_v;
6973         int size;
6974         int i;
6975
6976         /* For NVGRE, GRE header fields must be set with defined values. */
6977         const struct rte_flow_item_gre gre_spec = {
6978                 .c_rsvd0_ver = RTE_BE16(0x2000),
6979                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
6980         };
6981         const struct rte_flow_item_gre gre_mask = {
6982                 .c_rsvd0_ver = RTE_BE16(0xB000),
6983                 .protocol = RTE_BE16(UINT16_MAX),
6984         };
6985         const struct rte_flow_item gre_item = {
6986                 .spec = &gre_spec,
6987                 .mask = &gre_mask,
6988                 .last = NULL,
6989         };
6990         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
6991         if (!nvgre_v)
6992                 return;
6993         if (!nvgre_m)
6994                 nvgre_m = &rte_flow_item_nvgre_mask;
6995         tni_flow_id_m = (const char *)nvgre_m->tni;
6996         tni_flow_id_v = (const char *)nvgre_v->tni;
6997         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
6998         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
6999         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7000         memcpy(gre_key_m, tni_flow_id_m, size);
7001         for (i = 0; i < size; ++i)
7002                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7003 }
7004
7005 /**
7006  * Add VXLAN item to matcher and to the value.
7007  *
7008  * @param[in, out] matcher
7009  *   Flow matcher.
7010  * @param[in, out] key
7011  *   Flow matcher value.
7012  * @param[in] item
7013  *   Flow pattern to translate.
7014  * @param[in] inner
7015  *   Item is inner pattern.
7016  */
7017 static void
7018 flow_dv_translate_item_vxlan(void *matcher, void *key,
7019                              const struct rte_flow_item *item,
7020                              int inner)
7021 {
7022         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7023         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7024         void *headers_m;
7025         void *headers_v;
7026         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7027         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7028         char *vni_m;
7029         char *vni_v;
7030         uint16_t dport;
7031         int size;
7032         int i;
7033
7034         if (inner) {
7035                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7036                                          inner_headers);
7037                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7038         } else {
7039                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7040                                          outer_headers);
7041                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7042         }
7043         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7044                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7045         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7046                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7047                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7048         }
7049         if (!vxlan_v)
7050                 return;
7051         if (!vxlan_m)
7052                 vxlan_m = &rte_flow_item_vxlan_mask;
7053         size = sizeof(vxlan_m->vni);
7054         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7055         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7056         memcpy(vni_m, vxlan_m->vni, size);
7057         for (i = 0; i < size; ++i)
7058                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7059 }
7060
7061 /**
7062  * Add VXLAN-GPE item to matcher and to the value.
7063  *
7064  * @param[in, out] matcher
7065  *   Flow matcher.
7066  * @param[in, out] key
7067  *   Flow matcher value.
7068  * @param[in] item
7069  *   Flow pattern to translate.
7070  * @param[in] inner
7071  *   Item is inner pattern.
7072  */
7073
7074 static void
7075 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7076                                  const struct rte_flow_item *item, int inner)
7077 {
7078         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7079         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7080         void *headers_m;
7081         void *headers_v;
7082         void *misc_m =
7083                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7084         void *misc_v =
7085                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7086         char *vni_m;
7087         char *vni_v;
7088         uint16_t dport;
7089         int size;
7090         int i;
7091         uint8_t flags_m = 0xff;
7092         uint8_t flags_v = 0xc;
7093
7094         if (inner) {
7095                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7096                                          inner_headers);
7097                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7098         } else {
7099                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7100                                          outer_headers);
7101                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7102         }
7103         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7104                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7105         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7106                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7107                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7108         }
7109         if (!vxlan_v)
7110                 return;
7111         if (!vxlan_m)
7112                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7113         size = sizeof(vxlan_m->vni);
7114         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7115         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7116         memcpy(vni_m, vxlan_m->vni, size);
7117         for (i = 0; i < size; ++i)
7118                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7119         if (vxlan_m->flags) {
7120                 flags_m = vxlan_m->flags;
7121                 flags_v = vxlan_v->flags;
7122         }
7123         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7124         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7125         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7126                  vxlan_m->protocol);
7127         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7128                  vxlan_v->protocol);
7129 }
7130
7131 /**
7132  * Add Geneve item to matcher and to the value.
7133  *
7134  * @param[in, out] matcher
7135  *   Flow matcher.
7136  * @param[in, out] key
7137  *   Flow matcher value.
7138  * @param[in] item
7139  *   Flow pattern to translate.
7140  * @param[in] inner
7141  *   Item is inner pattern.
7142  */
7143
7144 static void
7145 flow_dv_translate_item_geneve(void *matcher, void *key,
7146                               const struct rte_flow_item *item, int inner)
7147 {
7148         const struct rte_flow_item_geneve *geneve_m = item->mask;
7149         const struct rte_flow_item_geneve *geneve_v = item->spec;
7150         void *headers_m;
7151         void *headers_v;
7152         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7153         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7154         uint16_t dport;
7155         uint16_t gbhdr_m;
7156         uint16_t gbhdr_v;
7157         char *vni_m;
7158         char *vni_v;
7159         size_t size, i;
7160
7161         if (inner) {
7162                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7163                                          inner_headers);
7164                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7165         } else {
7166                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7167                                          outer_headers);
7168                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7169         }
7170         dport = MLX5_UDP_PORT_GENEVE;
7171         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7172                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7173                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7174         }
7175         if (!geneve_v)
7176                 return;
7177         if (!geneve_m)
7178                 geneve_m = &rte_flow_item_geneve_mask;
7179         size = sizeof(geneve_m->vni);
7180         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7181         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7182         memcpy(vni_m, geneve_m->vni, size);
7183         for (i = 0; i < size; ++i)
7184                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7185         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7186                  rte_be_to_cpu_16(geneve_m->protocol));
7187         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7188                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7189         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7190         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7191         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7192                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7193         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7194                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7195         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7196                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7197         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7198                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7199                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7200 }
7201
7202 /**
7203  * Add MPLS item to matcher and to the value.
7204  *
7205  * @param[in, out] matcher
7206  *   Flow matcher.
7207  * @param[in, out] key
7208  *   Flow matcher value.
7209  * @param[in] item
7210  *   Flow pattern to translate.
7211  * @param[in] prev_layer
7212  *   The protocol layer indicated in previous item.
7213  * @param[in] inner
7214  *   Item is inner pattern.
7215  */
7216 static void
7217 flow_dv_translate_item_mpls(void *matcher, void *key,
7218                             const struct rte_flow_item *item,
7219                             uint64_t prev_layer,
7220                             int inner)
7221 {
7222         const uint32_t *in_mpls_m = item->mask;
7223         const uint32_t *in_mpls_v = item->spec;
7224         uint32_t *out_mpls_m = 0;
7225         uint32_t *out_mpls_v = 0;
7226         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7227         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7228         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7229                                      misc_parameters_2);
7230         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7231         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7232         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7233
7234         switch (prev_layer) {
7235         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7236                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7237                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7238                          MLX5_UDP_PORT_MPLS);
7239                 break;
7240         case MLX5_FLOW_LAYER_GRE:
7241                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7242                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7243                          RTE_ETHER_TYPE_MPLS);
7244                 break;
7245         default:
7246                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7247                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7248                          IPPROTO_MPLS);
7249                 break;
7250         }
7251         if (!in_mpls_v)
7252                 return;
7253         if (!in_mpls_m)
7254                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7255         switch (prev_layer) {
7256         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7257                 out_mpls_m =
7258                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7259                                                  outer_first_mpls_over_udp);
7260                 out_mpls_v =
7261                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7262                                                  outer_first_mpls_over_udp);
7263                 break;
7264         case MLX5_FLOW_LAYER_GRE:
7265                 out_mpls_m =
7266                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7267                                                  outer_first_mpls_over_gre);
7268                 out_mpls_v =
7269                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7270                                                  outer_first_mpls_over_gre);
7271                 break;
7272         default:
7273                 /* Inner MPLS not over GRE is not supported. */
7274                 if (!inner) {
7275                         out_mpls_m =
7276                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7277                                                          misc2_m,
7278                                                          outer_first_mpls);
7279                         out_mpls_v =
7280                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7281                                                          misc2_v,
7282                                                          outer_first_mpls);
7283                 }
7284                 break;
7285         }
7286         if (out_mpls_m && out_mpls_v) {
7287                 *out_mpls_m = *in_mpls_m;
7288                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7289         }
7290 }
7291
7292 /**
7293  * Add metadata register item to matcher
7294  *
7295  * @param[in, out] matcher
7296  *   Flow matcher.
7297  * @param[in, out] key
7298  *   Flow matcher value.
7299  * @param[in] reg_type
7300  *   Type of device metadata register
7301  * @param[in] value
7302  *   Register value
7303  * @param[in] mask
7304  *   Register mask
7305  */
7306 static void
7307 flow_dv_match_meta_reg(void *matcher, void *key,
7308                        enum modify_reg reg_type,
7309                        uint32_t data, uint32_t mask)
7310 {
7311         void *misc2_m =
7312                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7313         void *misc2_v =
7314                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7315         uint32_t temp;
7316
7317         data &= mask;
7318         switch (reg_type) {
7319         case REG_A:
7320                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7321                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7322                 break;
7323         case REG_B:
7324                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7325                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7326                 break;
7327         case REG_C_0:
7328                 /*
7329                  * The metadata register C0 field might be divided into
7330                  * source vport index and META item value, we should set
7331                  * this field according to specified mask, not as whole one.
7332                  */
7333                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7334                 temp |= mask;
7335                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7336                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7337                 temp &= ~mask;
7338                 temp |= data;
7339                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7340                 break;
7341         case REG_C_1:
7342                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7343                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7344                 break;
7345         case REG_C_2:
7346                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7347                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7348                 break;
7349         case REG_C_3:
7350                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7351                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7352                 break;
7353         case REG_C_4:
7354                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7355                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7356                 break;
7357         case REG_C_5:
7358                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7359                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7360                 break;
7361         case REG_C_6:
7362                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7363                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7364                 break;
7365         case REG_C_7:
7366                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7367                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7368                 break;
7369         default:
7370                 MLX5_ASSERT(false);
7371                 break;
7372         }
7373 }
7374
7375 /**
7376  * Add MARK item to matcher
7377  *
7378  * @param[in] dev
7379  *   The device to configure through.
7380  * @param[in, out] matcher
7381  *   Flow matcher.
7382  * @param[in, out] key
7383  *   Flow matcher value.
7384  * @param[in] item
7385  *   Flow pattern to translate.
7386  */
7387 static void
7388 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7389                             void *matcher, void *key,
7390                             const struct rte_flow_item *item)
7391 {
7392         struct mlx5_priv *priv = dev->data->dev_private;
7393         const struct rte_flow_item_mark *mark;
7394         uint32_t value;
7395         uint32_t mask;
7396
7397         mark = item->mask ? (const void *)item->mask :
7398                             &rte_flow_item_mark_mask;
7399         mask = mark->id & priv->sh->dv_mark_mask;
7400         mark = (const void *)item->spec;
7401         MLX5_ASSERT(mark);
7402         value = mark->id & priv->sh->dv_mark_mask & mask;
7403         if (mask) {
7404                 enum modify_reg reg;
7405
7406                 /* Get the metadata register index for the mark. */
7407                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7408                 MLX5_ASSERT(reg > 0);
7409                 if (reg == REG_C_0) {
7410                         struct mlx5_priv *priv = dev->data->dev_private;
7411                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7412                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7413
7414                         mask &= msk_c0;
7415                         mask <<= shl_c0;
7416                         value <<= shl_c0;
7417                 }
7418                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7419         }
7420 }
7421
7422 /**
7423  * Add META item to matcher
7424  *
7425  * @param[in] dev
7426  *   The devich to configure through.
7427  * @param[in, out] matcher
7428  *   Flow matcher.
7429  * @param[in, out] key
7430  *   Flow matcher value.
7431  * @param[in] attr
7432  *   Attributes of flow that includes this item.
7433  * @param[in] item
7434  *   Flow pattern to translate.
7435  */
7436 static void
7437 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7438                             void *matcher, void *key,
7439                             const struct rte_flow_attr *attr,
7440                             const struct rte_flow_item *item)
7441 {
7442         const struct rte_flow_item_meta *meta_m;
7443         const struct rte_flow_item_meta *meta_v;
7444
7445         meta_m = (const void *)item->mask;
7446         if (!meta_m)
7447                 meta_m = &rte_flow_item_meta_mask;
7448         meta_v = (const void *)item->spec;
7449         if (meta_v) {
7450                 int reg;
7451                 uint32_t value = meta_v->data;
7452                 uint32_t mask = meta_m->data;
7453
7454                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7455                 if (reg < 0)
7456                         return;
7457                 /*
7458                  * In datapath code there is no endianness
7459                  * coversions for perfromance reasons, all
7460                  * pattern conversions are done in rte_flow.
7461                  */
7462                 value = rte_cpu_to_be_32(value);
7463                 mask = rte_cpu_to_be_32(mask);
7464                 if (reg == REG_C_0) {
7465                         struct mlx5_priv *priv = dev->data->dev_private;
7466                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7467                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7468 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7469                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7470
7471                         value >>= shr_c0;
7472                         mask >>= shr_c0;
7473 #endif
7474                         value <<= shl_c0;
7475                         mask <<= shl_c0;
7476                         MLX5_ASSERT(msk_c0);
7477                         MLX5_ASSERT(!(~msk_c0 & mask));
7478                 }
7479                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7480         }
7481 }
7482
7483 /**
7484  * Add vport metadata Reg C0 item to matcher
7485  *
7486  * @param[in, out] matcher
7487  *   Flow matcher.
7488  * @param[in, out] key
7489  *   Flow matcher value.
7490  * @param[in] reg
7491  *   Flow pattern to translate.
7492  */
7493 static void
7494 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7495                                   uint32_t value, uint32_t mask)
7496 {
7497         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7498 }
7499
7500 /**
7501  * Add tag item to matcher
7502  *
7503  * @param[in] dev
7504  *   The devich to configure through.
7505  * @param[in, out] matcher
7506  *   Flow matcher.
7507  * @param[in, out] key
7508  *   Flow matcher value.
7509  * @param[in] item
7510  *   Flow pattern to translate.
7511  */
7512 static void
7513 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7514                                 void *matcher, void *key,
7515                                 const struct rte_flow_item *item)
7516 {
7517         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7518         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7519         uint32_t mask, value;
7520
7521         MLX5_ASSERT(tag_v);
7522         value = tag_v->data;
7523         mask = tag_m ? tag_m->data : UINT32_MAX;
7524         if (tag_v->id == REG_C_0) {
7525                 struct mlx5_priv *priv = dev->data->dev_private;
7526                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7527                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7528
7529                 mask &= msk_c0;
7530                 mask <<= shl_c0;
7531                 value <<= shl_c0;
7532         }
7533         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7534 }
7535
7536 /**
7537  * Add TAG item to matcher
7538  *
7539  * @param[in] dev
7540  *   The devich to configure through.
7541  * @param[in, out] matcher
7542  *   Flow matcher.
7543  * @param[in, out] key
7544  *   Flow matcher value.
7545  * @param[in] item
7546  *   Flow pattern to translate.
7547  */
7548 static void
7549 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7550                            void *matcher, void *key,
7551                            const struct rte_flow_item *item)
7552 {
7553         const struct rte_flow_item_tag *tag_v = item->spec;
7554         const struct rte_flow_item_tag *tag_m = item->mask;
7555         enum modify_reg reg;
7556
7557         MLX5_ASSERT(tag_v);
7558         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7559         /* Get the metadata register index for the tag. */
7560         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7561         MLX5_ASSERT(reg > 0);
7562         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7563 }
7564
7565 /**
7566  * Add source vport match to the specified matcher.
7567  *
7568  * @param[in, out] matcher
7569  *   Flow matcher.
7570  * @param[in, out] key
7571  *   Flow matcher value.
7572  * @param[in] port
7573  *   Source vport value to match
7574  * @param[in] mask
7575  *   Mask
7576  */
7577 static void
7578 flow_dv_translate_item_source_vport(void *matcher, void *key,
7579                                     int16_t port, uint16_t mask)
7580 {
7581         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7582         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7583
7584         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7585         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7586 }
7587
7588 /**
7589  * Translate port-id item to eswitch match on  port-id.
7590  *
7591  * @param[in] dev
7592  *   The devich to configure through.
7593  * @param[in, out] matcher
7594  *   Flow matcher.
7595  * @param[in, out] key
7596  *   Flow matcher value.
7597  * @param[in] item
7598  *   Flow pattern to translate.
7599  * @param[in]
7600  *   Flow attributes.
7601  *
7602  * @return
7603  *   0 on success, a negative errno value otherwise.
7604  */
7605 static int
7606 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7607                                void *key, const struct rte_flow_item *item,
7608                                const struct rte_flow_attr *attr)
7609 {
7610         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7611         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7612         struct mlx5_priv *priv;
7613         uint16_t mask, id;
7614
7615         mask = pid_m ? pid_m->id : 0xffff;
7616         id = pid_v ? pid_v->id : dev->data->port_id;
7617         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7618         if (!priv)
7619                 return -rte_errno;
7620         /*
7621          * Translate to vport field or to metadata, depending on mode.
7622          * Kernel can use either misc.source_port or half of C0 metadata
7623          * register.
7624          */
7625         if (priv->vport_meta_mask) {
7626                 /*
7627                  * Provide the hint for SW steering library
7628                  * to insert the flow into ingress domain and
7629                  * save the extra vport match.
7630                  */
7631                 if (mask == 0xffff && priv->vport_id == 0xffff &&
7632                     priv->pf_bond < 0 && attr->transfer)
7633                         flow_dv_translate_item_source_vport
7634                                 (matcher, key, priv->vport_id, mask);
7635                 else
7636                         flow_dv_translate_item_meta_vport
7637                                 (matcher, key,
7638                                  priv->vport_meta_tag,
7639                                  priv->vport_meta_mask);
7640         } else {
7641                 flow_dv_translate_item_source_vport(matcher, key,
7642                                                     priv->vport_id, mask);
7643         }
7644         return 0;
7645 }
7646
7647 /**
7648  * Add ICMP6 item to matcher and to the value.
7649  *
7650  * @param[in, out] matcher
7651  *   Flow matcher.
7652  * @param[in, out] key
7653  *   Flow matcher value.
7654  * @param[in] item
7655  *   Flow pattern to translate.
7656  * @param[in] inner
7657  *   Item is inner pattern.
7658  */
7659 static void
7660 flow_dv_translate_item_icmp6(void *matcher, void *key,
7661                               const struct rte_flow_item *item,
7662                               int inner)
7663 {
7664         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7665         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7666         void *headers_m;
7667         void *headers_v;
7668         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7669                                      misc_parameters_3);
7670         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7671         if (inner) {
7672                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7673                                          inner_headers);
7674                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7675         } else {
7676                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7677                                          outer_headers);
7678                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7679         }
7680         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7681         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7682         if (!icmp6_v)
7683                 return;
7684         if (!icmp6_m)
7685                 icmp6_m = &rte_flow_item_icmp6_mask;
7686         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7687         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7688                  icmp6_v->type & icmp6_m->type);
7689         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7690         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7691                  icmp6_v->code & icmp6_m->code);
7692 }
7693
7694 /**
7695  * Add ICMP item to matcher and to the value.
7696  *
7697  * @param[in, out] matcher
7698  *   Flow matcher.
7699  * @param[in, out] key
7700  *   Flow matcher value.
7701  * @param[in] item
7702  *   Flow pattern to translate.
7703  * @param[in] inner
7704  *   Item is inner pattern.
7705  */
7706 static void
7707 flow_dv_translate_item_icmp(void *matcher, void *key,
7708                             const struct rte_flow_item *item,
7709                             int inner)
7710 {
7711         const struct rte_flow_item_icmp *icmp_m = item->mask;
7712         const struct rte_flow_item_icmp *icmp_v = item->spec;
7713         uint32_t icmp_header_data_m = 0;
7714         uint32_t icmp_header_data_v = 0;
7715         void *headers_m;
7716         void *headers_v;
7717         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7718                                      misc_parameters_3);
7719         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7720         if (inner) {
7721                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7722                                          inner_headers);
7723                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7724         } else {
7725                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7726                                          outer_headers);
7727                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7728         }
7729         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7730         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7731         if (!icmp_v)
7732                 return;
7733         if (!icmp_m)
7734                 icmp_m = &rte_flow_item_icmp_mask;
7735         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7736                  icmp_m->hdr.icmp_type);
7737         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7738                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7739         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7740                  icmp_m->hdr.icmp_code);
7741         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7742                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7743         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7744         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7745         if (icmp_header_data_m) {
7746                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7747                 icmp_header_data_v |=
7748                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7749                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7750                          icmp_header_data_m);
7751                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7752                          icmp_header_data_v & icmp_header_data_m);
7753         }
7754 }
7755
7756 /**
7757  * Add GTP item to matcher and to the value.
7758  *
7759  * @param[in, out] matcher
7760  *   Flow matcher.
7761  * @param[in, out] key
7762  *   Flow matcher value.
7763  * @param[in] item
7764  *   Flow pattern to translate.
7765  * @param[in] inner
7766  *   Item is inner pattern.
7767  */
7768 static void
7769 flow_dv_translate_item_gtp(void *matcher, void *key,
7770                            const struct rte_flow_item *item, int inner)
7771 {
7772         const struct rte_flow_item_gtp *gtp_m = item->mask;
7773         const struct rte_flow_item_gtp *gtp_v = item->spec;
7774         void *headers_m;
7775         void *headers_v;
7776         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7777                                      misc_parameters_3);
7778         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7779         uint16_t dport = RTE_GTPU_UDP_PORT;
7780
7781         if (inner) {
7782                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7783                                          inner_headers);
7784                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7785         } else {
7786                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7787                                          outer_headers);
7788                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7789         }
7790         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7791                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7792                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7793         }
7794         if (!gtp_v)
7795                 return;
7796         if (!gtp_m)
7797                 gtp_m = &rte_flow_item_gtp_mask;
7798         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7799                  gtp_m->v_pt_rsv_flags);
7800         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7801                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7802         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7803         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7804                  gtp_v->msg_type & gtp_m->msg_type);
7805         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7806                  rte_be_to_cpu_32(gtp_m->teid));
7807         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7808                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7809 }
7810
7811 /**
7812  * Add eCPRI item to matcher and to the value.
7813  *
7814  * @param[in] dev
7815  *   The devich to configure through.
7816  * @param[in, out] matcher
7817  *   Flow matcher.
7818  * @param[in, out] key
7819  *   Flow matcher value.
7820  * @param[in] item
7821  *   Flow pattern to translate.
7822  * @param[in] samples
7823  *   Sample IDs to be used in the matching.
7824  */
7825 static void
7826 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7827                              void *key, const struct rte_flow_item *item)
7828 {
7829         struct mlx5_priv *priv = dev->data->dev_private;
7830         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7831         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7832         struct rte_ecpri_common_hdr common;
7833         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7834                                      misc_parameters_4);
7835         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7836         uint32_t *samples;
7837         void *dw_m;
7838         void *dw_v;
7839
7840         if (!ecpri_v)
7841                 return;
7842         if (!ecpri_m)
7843                 ecpri_m = &rte_flow_item_ecpri_mask;
7844         /*
7845          * Maximal four DW samples are supported in a single matching now.
7846          * Two are used now for a eCPRI matching:
7847          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7848          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7849          *    if any.
7850          */
7851         if (!ecpri_m->hdr.common.u32)
7852                 return;
7853         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7854         /* Need to take the whole DW as the mask to fill the entry. */
7855         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7856                             prog_sample_field_value_0);
7857         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7858                             prog_sample_field_value_0);
7859         /* Already big endian (network order) in the header. */
7860         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7861         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7862         /* Sample#0, used for matching type, offset 0. */
7863         MLX5_SET(fte_match_set_misc4, misc4_m,
7864                  prog_sample_field_id_0, samples[0]);
7865         /* It makes no sense to set the sample ID in the mask field. */
7866         MLX5_SET(fte_match_set_misc4, misc4_v,
7867                  prog_sample_field_id_0, samples[0]);
7868         /*
7869          * Checking if message body part needs to be matched.
7870          * Some wildcard rules only matching type field should be supported.
7871          */
7872         if (ecpri_m->hdr.dummy[0]) {
7873                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
7874                 switch (common.type) {
7875                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7876                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7877                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7878                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7879                                             prog_sample_field_value_1);
7880                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7881                                             prog_sample_field_value_1);
7882                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7883                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7884                         /* Sample#1, to match message body, offset 4. */
7885                         MLX5_SET(fte_match_set_misc4, misc4_m,
7886                                  prog_sample_field_id_1, samples[1]);
7887                         MLX5_SET(fte_match_set_misc4, misc4_v,
7888                                  prog_sample_field_id_1, samples[1]);
7889                         break;
7890                 default:
7891                         /* Others, do not match any sample ID. */
7892                         break;
7893                 }
7894         }
7895 }
7896
7897 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7898
7899 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7900         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7901                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7902
7903 /**
7904  * Calculate flow matcher enable bitmap.
7905  *
7906  * @param match_criteria
7907  *   Pointer to flow matcher criteria.
7908  *
7909  * @return
7910  *   Bitmap of enabled fields.
7911  */
7912 static uint8_t
7913 flow_dv_matcher_enable(uint32_t *match_criteria)
7914 {
7915         uint8_t match_criteria_enable;
7916
7917         match_criteria_enable =
7918                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7919                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7920         match_criteria_enable |=
7921                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7922                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7923         match_criteria_enable |=
7924                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7925                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7926         match_criteria_enable |=
7927                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7928                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7929         match_criteria_enable |=
7930                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7931                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7932         match_criteria_enable |=
7933                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7934                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7935         return match_criteria_enable;
7936 }
7937
7938 struct mlx5_hlist_entry *
7939 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7940 {
7941         struct mlx5_dev_ctx_shared *sh = list->ctx;
7942         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7943         struct rte_eth_dev *dev = ctx->dev;
7944         struct mlx5_flow_tbl_data_entry *tbl_data;
7945         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7946         struct rte_flow_error *error = ctx->error;
7947         union mlx5_flow_tbl_key key = { .v64 = key64 };
7948         struct mlx5_flow_tbl_resource *tbl;
7949         void *domain;
7950         uint32_t idx = 0;
7951         int ret;
7952
7953         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7954         if (!tbl_data) {
7955                 rte_flow_error_set(error, ENOMEM,
7956                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7957                                    NULL,
7958                                    "cannot allocate flow table data entry");
7959                 return NULL;
7960         }
7961         tbl_data->idx = idx;
7962         tbl_data->tunnel = tt_prm->tunnel;
7963         tbl_data->group_id = tt_prm->group_id;
7964         tbl_data->external = tt_prm->external;
7965         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7966         tbl_data->is_egress = !!key.direction;
7967         tbl = &tbl_data->tbl;
7968         if (key.dummy)
7969                 return &tbl_data->entry;
7970         if (key.domain)
7971                 domain = sh->fdb_domain;
7972         else if (key.direction)
7973                 domain = sh->tx_domain;
7974         else
7975                 domain = sh->rx_domain;
7976         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
7977         if (ret) {
7978                 rte_flow_error_set(error, ENOMEM,
7979                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7980                                    NULL, "cannot create flow table object");
7981                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7982                 return NULL;
7983         }
7984         if (key.table_id) {
7985                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
7986                                         (tbl->obj, &tbl_data->jump.action);
7987                 if (ret) {
7988                         rte_flow_error_set(error, ENOMEM,
7989                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7990                                            NULL,
7991                                            "cannot create flow jump action");
7992                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7993                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7994                         return NULL;
7995                 }
7996         }
7997         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
7998               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
7999               key.table_id);
8000         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8001                              flow_dv_matcher_create_cb,
8002                              flow_dv_matcher_match_cb,
8003                              flow_dv_matcher_remove_cb);
8004         return &tbl_data->entry;
8005 }
8006
8007 /**
8008  * Get a flow table.
8009  *
8010  * @param[in, out] dev
8011  *   Pointer to rte_eth_dev structure.
8012  * @param[in] table_id
8013  *   Table id to use.
8014  * @param[in] egress
8015  *   Direction of the table.
8016  * @param[in] transfer
8017  *   E-Switch or NIC flow.
8018  * @param[in] dummy
8019  *   Dummy entry for dv API.
8020  * @param[out] error
8021  *   pointer to error structure.
8022  *
8023  * @return
8024  *   Returns tables resource based on the index, NULL in case of failed.
8025  */
8026 struct mlx5_flow_tbl_resource *
8027 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8028                          uint32_t table_id, uint8_t egress,
8029                          uint8_t transfer,
8030                          bool external,
8031                          const struct mlx5_flow_tunnel *tunnel,
8032                          uint32_t group_id, uint8_t dummy,
8033                          struct rte_flow_error *error)
8034 {
8035         struct mlx5_priv *priv = dev->data->dev_private;
8036         union mlx5_flow_tbl_key table_key = {
8037                 {
8038                         .table_id = table_id,
8039                         .dummy = dummy,
8040                         .domain = !!transfer,
8041                         .direction = !!egress,
8042                 }
8043         };
8044         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8045                 .tunnel = tunnel,
8046                 .group_id = group_id,
8047                 .external = external,
8048         };
8049         struct mlx5_flow_cb_ctx ctx = {
8050                 .dev = dev,
8051                 .error = error,
8052                 .data = &tt_prm,
8053         };
8054         struct mlx5_hlist_entry *entry;
8055         struct mlx5_flow_tbl_data_entry *tbl_data;
8056
8057         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8058         if (!entry) {
8059                 rte_flow_error_set(error, ENOMEM,
8060                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8061                                    "cannot get table");
8062                 return NULL;
8063         }
8064         DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
8065                 table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
8066         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8067         return &tbl_data->tbl;
8068 }
8069
8070 void
8071 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8072                       struct mlx5_hlist_entry *entry)
8073 {
8074         struct mlx5_dev_ctx_shared *sh = list->ctx;
8075         struct mlx5_flow_tbl_data_entry *tbl_data =
8076                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8077
8078         MLX5_ASSERT(entry && sh);
8079         if (tbl_data->jump.action)
8080                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8081         if (tbl_data->tbl.obj)
8082                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8083         if (tbl_data->tunnel_offload && tbl_data->external) {
8084                 struct mlx5_hlist_entry *he;
8085                 struct mlx5_hlist *tunnel_grp_hash;
8086                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8087                 union tunnel_tbl_key tunnel_key = {
8088                         .tunnel_id = tbl_data->tunnel ?
8089                                         tbl_data->tunnel->tunnel_id : 0,
8090                         .group = tbl_data->group_id
8091                 };
8092                 union mlx5_flow_tbl_key table_key = {
8093                         .v64 = entry->key
8094                 };
8095                 uint32_t table_id = table_key.table_id;
8096
8097                 tunnel_grp_hash = tbl_data->tunnel ?
8098                                         tbl_data->tunnel->groups :
8099                                         thub->groups;
8100                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8101                 if (he)
8102                         mlx5_hlist_unregister(tunnel_grp_hash, he);
8103                 DRV_LOG(DEBUG,
8104                         "Table_id %u tunnel %u group %u released.",
8105                         table_id,
8106                         tbl_data->tunnel ?
8107                         tbl_data->tunnel->tunnel_id : 0,
8108                         tbl_data->group_id);
8109         }
8110         mlx5_cache_list_destroy(&tbl_data->matchers);
8111         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8112 }
8113
8114 /**
8115  * Release a flow table.
8116  *
8117  * @param[in] sh
8118  *   Pointer to device shared structure.
8119  * @param[in] tbl
8120  *   Table resource to be released.
8121  *
8122  * @return
8123  *   Returns 0 if table was released, else return 1;
8124  */
8125 static int
8126 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8127                              struct mlx5_flow_tbl_resource *tbl)
8128 {
8129         struct mlx5_flow_tbl_data_entry *tbl_data =
8130                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8131
8132         if (!tbl)
8133                 return 0;
8134         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8135 }
8136
8137 int
8138 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8139                          struct mlx5_cache_entry *entry, void *cb_ctx)
8140 {
8141         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8142         struct mlx5_flow_dv_matcher *ref = ctx->data;
8143         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8144                                                         entry);
8145
8146         return cur->crc != ref->crc ||
8147                cur->priority != ref->priority ||
8148                memcmp((const void *)cur->mask.buf,
8149                       (const void *)ref->mask.buf, ref->mask.size);
8150 }
8151
8152 struct mlx5_cache_entry *
8153 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8154                           struct mlx5_cache_entry *entry __rte_unused,
8155                           void *cb_ctx)
8156 {
8157         struct mlx5_dev_ctx_shared *sh = list->ctx;
8158         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8159         struct mlx5_flow_dv_matcher *ref = ctx->data;
8160         struct mlx5_flow_dv_matcher *cache;
8161         struct mlx5dv_flow_matcher_attr dv_attr = {
8162                 .type = IBV_FLOW_ATTR_NORMAL,
8163                 .match_mask = (void *)&ref->mask,
8164         };
8165         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8166                                                             typeof(*tbl), tbl);
8167         int ret;
8168
8169         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8170         if (!cache) {
8171                 rte_flow_error_set(ctx->error, ENOMEM,
8172                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8173                                    "cannot create matcher");
8174                 return NULL;
8175         }
8176         *cache = *ref;
8177         dv_attr.match_criteria_enable =
8178                 flow_dv_matcher_enable(cache->mask.buf);
8179         dv_attr.priority = ref->priority;
8180         if (tbl->is_egress)
8181                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8182         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8183                                                &cache->matcher_object);
8184         if (ret) {
8185                 mlx5_free(cache);
8186                 rte_flow_error_set(ctx->error, ENOMEM,
8187                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8188                                    "cannot create matcher");
8189                 return NULL;
8190         }
8191         return &cache->entry;
8192 }
8193
8194 /**
8195  * Register the flow matcher.
8196  *
8197  * @param[in, out] dev
8198  *   Pointer to rte_eth_dev structure.
8199  * @param[in, out] matcher
8200  *   Pointer to flow matcher.
8201  * @param[in, out] key
8202  *   Pointer to flow table key.
8203  * @parm[in, out] dev_flow
8204  *   Pointer to the dev_flow.
8205  * @param[out] error
8206  *   pointer to error structure.
8207  *
8208  * @return
8209  *   0 on success otherwise -errno and errno is set.
8210  */
8211 static int
8212 flow_dv_matcher_register(struct rte_eth_dev *dev,
8213                          struct mlx5_flow_dv_matcher *ref,
8214                          union mlx5_flow_tbl_key *key,
8215                          struct mlx5_flow *dev_flow,
8216                          const struct mlx5_flow_tunnel *tunnel,
8217                          uint32_t group_id,
8218                          struct rte_flow_error *error)
8219 {
8220         struct mlx5_cache_entry *entry;
8221         struct mlx5_flow_dv_matcher *cache;
8222         struct mlx5_flow_tbl_resource *tbl;
8223         struct mlx5_flow_tbl_data_entry *tbl_data;
8224         struct mlx5_flow_cb_ctx ctx = {
8225                 .error = error,
8226                 .data = ref,
8227         };
8228
8229         /**
8230          * tunnel offload API requires this registration for cases when
8231          * tunnel match rule was inserted before tunnel set rule.
8232          */
8233         tbl = flow_dv_tbl_resource_get(dev, key->table_id,
8234                                        key->direction, key->domain,
8235                                        dev_flow->external, tunnel,
8236                                        group_id, 0, error);
8237         if (!tbl)
8238                 return -rte_errno;      /* No need to refill the error info */
8239         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8240         ref->tbl = tbl;
8241         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8242         if (!entry) {
8243                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8244                 return rte_flow_error_set(error, ENOMEM,
8245                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8246                                           "cannot allocate ref memory");
8247         }
8248         cache = container_of(entry, typeof(*cache), entry);
8249         dev_flow->handle->dvh.matcher = cache;
8250         return 0;
8251 }
8252
8253 struct mlx5_hlist_entry *
8254 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8255 {
8256         struct mlx5_dev_ctx_shared *sh = list->ctx;
8257         struct rte_flow_error *error = ctx;
8258         struct mlx5_flow_dv_tag_resource *entry;
8259         uint32_t idx = 0;
8260         int ret;
8261
8262         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8263         if (!entry) {
8264                 rte_flow_error_set(error, ENOMEM,
8265                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8266                                    "cannot allocate resource memory");
8267                 return NULL;
8268         }
8269         entry->idx = idx;
8270         ret = mlx5_flow_os_create_flow_action_tag(key,
8271                                                   &entry->action);
8272         if (ret) {
8273                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8274                 rte_flow_error_set(error, ENOMEM,
8275                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8276                                    NULL, "cannot create action");
8277                 return NULL;
8278         }
8279         return &entry->entry;
8280 }
8281
8282 /**
8283  * Find existing tag resource or create and register a new one.
8284  *
8285  * @param dev[in, out]
8286  *   Pointer to rte_eth_dev structure.
8287  * @param[in, out] tag_be24
8288  *   Tag value in big endian then R-shift 8.
8289  * @parm[in, out] dev_flow
8290  *   Pointer to the dev_flow.
8291  * @param[out] error
8292  *   pointer to error structure.
8293  *
8294  * @return
8295  *   0 on success otherwise -errno and errno is set.
8296  */
8297 static int
8298 flow_dv_tag_resource_register
8299                         (struct rte_eth_dev *dev,
8300                          uint32_t tag_be24,
8301                          struct mlx5_flow *dev_flow,
8302                          struct rte_flow_error *error)
8303 {
8304         struct mlx5_priv *priv = dev->data->dev_private;
8305         struct mlx5_flow_dv_tag_resource *cache_resource;
8306         struct mlx5_hlist_entry *entry;
8307
8308         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8309         if (entry) {
8310                 cache_resource = container_of
8311                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8312                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8313                 dev_flow->dv.tag_resource = cache_resource;
8314                 return 0;
8315         }
8316         return -rte_errno;
8317 }
8318
8319 void
8320 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8321                       struct mlx5_hlist_entry *entry)
8322 {
8323         struct mlx5_dev_ctx_shared *sh = list->ctx;
8324         struct mlx5_flow_dv_tag_resource *tag =
8325                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8326
8327         MLX5_ASSERT(tag && sh && tag->action);
8328         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8329         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8330         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8331 }
8332
8333 /**
8334  * Release the tag.
8335  *
8336  * @param dev
8337  *   Pointer to Ethernet device.
8338  * @param tag_idx
8339  *   Tag index.
8340  *
8341  * @return
8342  *   1 while a reference on it exists, 0 when freed.
8343  */
8344 static int
8345 flow_dv_tag_release(struct rte_eth_dev *dev,
8346                     uint32_t tag_idx)
8347 {
8348         struct mlx5_priv *priv = dev->data->dev_private;
8349         struct mlx5_flow_dv_tag_resource *tag;
8350
8351         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8352         if (!tag)
8353                 return 0;
8354         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8355                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8356         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8357 }
8358
8359 /**
8360  * Translate port ID action to vport.
8361  *
8362  * @param[in] dev
8363  *   Pointer to rte_eth_dev structure.
8364  * @param[in] action
8365  *   Pointer to the port ID action.
8366  * @param[out] dst_port_id
8367  *   The target port ID.
8368  * @param[out] error
8369  *   Pointer to the error structure.
8370  *
8371  * @return
8372  *   0 on success, a negative errno value otherwise and rte_errno is set.
8373  */
8374 static int
8375 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8376                                  const struct rte_flow_action *action,
8377                                  uint32_t *dst_port_id,
8378                                  struct rte_flow_error *error)
8379 {
8380         uint32_t port;
8381         struct mlx5_priv *priv;
8382         const struct rte_flow_action_port_id *conf =
8383                         (const struct rte_flow_action_port_id *)action->conf;
8384
8385         port = conf->original ? dev->data->port_id : conf->id;
8386         priv = mlx5_port_to_eswitch_info(port, false);
8387         if (!priv)
8388                 return rte_flow_error_set(error, -rte_errno,
8389                                           RTE_FLOW_ERROR_TYPE_ACTION,
8390                                           NULL,
8391                                           "No eswitch info was found for port");
8392 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8393         /*
8394          * This parameter is transferred to
8395          * mlx5dv_dr_action_create_dest_ib_port().
8396          */
8397         *dst_port_id = priv->dev_port;
8398 #else
8399         /*
8400          * Legacy mode, no LAG configurations is supported.
8401          * This parameter is transferred to
8402          * mlx5dv_dr_action_create_dest_vport().
8403          */
8404         *dst_port_id = priv->vport_id;
8405 #endif
8406         return 0;
8407 }
8408
8409 /**
8410  * Create a counter with aging configuration.
8411  *
8412  * @param[in] dev
8413  *   Pointer to rte_eth_dev structure.
8414  * @param[out] count
8415  *   Pointer to the counter action configuration.
8416  * @param[in] age
8417  *   Pointer to the aging action configuration.
8418  *
8419  * @return
8420  *   Index to flow counter on success, 0 otherwise.
8421  */
8422 static uint32_t
8423 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8424                                 struct mlx5_flow *dev_flow,
8425                                 const struct rte_flow_action_count *count,
8426                                 const struct rte_flow_action_age *age)
8427 {
8428         uint32_t counter;
8429         struct mlx5_age_param *age_param;
8430
8431         if (count && count->shared)
8432                 counter = flow_dv_counter_get_shared(dev, count->id);
8433         else
8434                 counter = flow_dv_counter_alloc(dev, !!age);
8435         if (!counter || age == NULL)
8436                 return counter;
8437         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8438         age_param->context = age->context ? age->context :
8439                 (void *)(uintptr_t)(dev_flow->flow_idx);
8440         age_param->timeout = age->timeout;
8441         age_param->port_id = dev->data->port_id;
8442         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8443         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8444         return counter;
8445 }
8446
8447 /**
8448  * Add Tx queue matcher
8449  *
8450  * @param[in] dev
8451  *   Pointer to the dev struct.
8452  * @param[in, out] matcher
8453  *   Flow matcher.
8454  * @param[in, out] key
8455  *   Flow matcher value.
8456  * @param[in] item
8457  *   Flow pattern to translate.
8458  * @param[in] inner
8459  *   Item is inner pattern.
8460  */
8461 static void
8462 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8463                                 void *matcher, void *key,
8464                                 const struct rte_flow_item *item)
8465 {
8466         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8467         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8468         void *misc_m =
8469                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8470         void *misc_v =
8471                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8472         struct mlx5_txq_ctrl *txq;
8473         uint32_t queue;
8474
8475
8476         queue_m = (const void *)item->mask;
8477         if (!queue_m)
8478                 return;
8479         queue_v = (const void *)item->spec;
8480         if (!queue_v)
8481                 return;
8482         txq = mlx5_txq_get(dev, queue_v->queue);
8483         if (!txq)
8484                 return;
8485         queue = txq->obj->sq->id;
8486         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8487         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8488                  queue & queue_m->queue);
8489         mlx5_txq_release(dev, queue_v->queue);
8490 }
8491
8492 /**
8493  * Set the hash fields according to the @p flow information.
8494  *
8495  * @param[in] dev_flow
8496  *   Pointer to the mlx5_flow.
8497  * @param[in] rss_desc
8498  *   Pointer to the mlx5_flow_rss_desc.
8499  */
8500 static void
8501 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8502                        struct mlx5_flow_rss_desc *rss_desc)
8503 {
8504         uint64_t items = dev_flow->handle->layers;
8505         int rss_inner = 0;
8506         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8507
8508         dev_flow->hash_fields = 0;
8509 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8510         if (rss_desc->level >= 2) {
8511                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8512                 rss_inner = 1;
8513         }
8514 #endif
8515         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8516             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8517                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8518                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8519                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8520                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8521                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8522                         else
8523                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8524                 }
8525         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8526                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8527                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8528                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8529                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8530                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8531                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8532                         else
8533                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8534                 }
8535         }
8536         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8537             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8538                 if (rss_types & ETH_RSS_UDP) {
8539                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8540                                 dev_flow->hash_fields |=
8541                                                 IBV_RX_HASH_SRC_PORT_UDP;
8542                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8543                                 dev_flow->hash_fields |=
8544                                                 IBV_RX_HASH_DST_PORT_UDP;
8545                         else
8546                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8547                 }
8548         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8549                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8550                 if (rss_types & ETH_RSS_TCP) {
8551                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8552                                 dev_flow->hash_fields |=
8553                                                 IBV_RX_HASH_SRC_PORT_TCP;
8554                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8555                                 dev_flow->hash_fields |=
8556                                                 IBV_RX_HASH_DST_PORT_TCP;
8557                         else
8558                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8559                 }
8560         }
8561 }
8562
8563 /**
8564  * Prepare an Rx Hash queue.
8565  *
8566  * @param dev
8567  *   Pointer to Ethernet device.
8568  * @param[in] dev_flow
8569  *   Pointer to the mlx5_flow.
8570  * @param[in] rss_desc
8571  *   Pointer to the mlx5_flow_rss_desc.
8572  * @param[out] hrxq_idx
8573  *   Hash Rx queue index.
8574  *
8575  * @return
8576  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8577  */
8578 static struct mlx5_hrxq *
8579 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
8580                      struct mlx5_flow *dev_flow,
8581                      struct mlx5_flow_rss_desc *rss_desc,
8582                      uint32_t *hrxq_idx)
8583 {
8584         struct mlx5_priv *priv = dev->data->dev_private;
8585         struct mlx5_flow_handle *dh = dev_flow->handle;
8586         struct mlx5_hrxq *hrxq;
8587
8588         MLX5_ASSERT(rss_desc->queue_num);
8589         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
8590         rss_desc->hash_fields = dev_flow->hash_fields;
8591         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
8592         rss_desc->shared_rss = 0;
8593         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
8594         if (!*hrxq_idx)
8595                 return NULL;
8596         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8597                               *hrxq_idx);
8598         return hrxq;
8599 }
8600
8601 /**
8602  * Release sample sub action resource.
8603  *
8604  * @param[in, out] dev
8605  *   Pointer to rte_eth_dev structure.
8606  * @param[in] act_res
8607  *   Pointer to sample sub action resource.
8608  */
8609 static void
8610 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
8611                                    struct mlx5_flow_sub_actions_idx *act_res)
8612 {
8613         if (act_res->rix_hrxq) {
8614                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
8615                 act_res->rix_hrxq = 0;
8616         }
8617         if (act_res->rix_encap_decap) {
8618                 flow_dv_encap_decap_resource_release(dev,
8619                                                      act_res->rix_encap_decap);
8620                 act_res->rix_encap_decap = 0;
8621         }
8622         if (act_res->rix_port_id_action) {
8623                 flow_dv_port_id_action_resource_release(dev,
8624                                                 act_res->rix_port_id_action);
8625                 act_res->rix_port_id_action = 0;
8626         }
8627         if (act_res->rix_tag) {
8628                 flow_dv_tag_release(dev, act_res->rix_tag);
8629                 act_res->rix_tag = 0;
8630         }
8631         if (act_res->cnt) {
8632                 flow_dv_counter_free(dev, act_res->cnt);
8633                 act_res->cnt = 0;
8634         }
8635 }
8636
8637 int
8638 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
8639                         struct mlx5_cache_entry *entry, void *cb_ctx)
8640 {
8641         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8642         struct rte_eth_dev *dev = ctx->dev;
8643         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8644         struct mlx5_flow_dv_sample_resource *cache_resource =
8645                         container_of(entry, typeof(*cache_resource), entry);
8646
8647         if (resource->ratio == cache_resource->ratio &&
8648             resource->ft_type == cache_resource->ft_type &&
8649             resource->ft_id == cache_resource->ft_id &&
8650             resource->set_action == cache_resource->set_action &&
8651             !memcmp((void *)&resource->sample_act,
8652                     (void *)&cache_resource->sample_act,
8653                     sizeof(struct mlx5_flow_sub_actions_list))) {
8654                 /*
8655                  * Existing sample action should release the prepared
8656                  * sub-actions reference counter.
8657                  */
8658                 flow_dv_sample_sub_actions_release(dev,
8659                                                 &resource->sample_idx);
8660                 return 0;
8661         }
8662         return 1;
8663 }
8664
8665 struct mlx5_cache_entry *
8666 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
8667                          struct mlx5_cache_entry *entry __rte_unused,
8668                          void *cb_ctx)
8669 {
8670         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8671         struct rte_eth_dev *dev = ctx->dev;
8672         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8673         void **sample_dv_actions = resource->sub_actions;
8674         struct mlx5_flow_dv_sample_resource *cache_resource;
8675         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8676         struct mlx5_priv *priv = dev->data->dev_private;
8677         struct mlx5_dev_ctx_shared *sh = priv->sh;
8678         struct mlx5_flow_tbl_resource *tbl;
8679         uint32_t idx = 0;
8680         const uint32_t next_ft_step = 1;
8681         uint32_t next_ft_id = resource->ft_id + next_ft_step;
8682         uint8_t is_egress = 0;
8683         uint8_t is_transfer = 0;
8684         struct rte_flow_error *error = ctx->error;
8685
8686         /* Register new sample resource. */
8687         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
8688         if (!cache_resource) {
8689                 rte_flow_error_set(error, ENOMEM,
8690                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8691                                           NULL,
8692                                           "cannot allocate resource memory");
8693                 return NULL;
8694         }
8695         *cache_resource = *resource;
8696         /* Create normal path table level */
8697         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8698                 is_transfer = 1;
8699         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
8700                 is_egress = 1;
8701         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8702                                         is_egress, is_transfer,
8703                                         true, NULL, 0, 0, error);
8704         if (!tbl) {
8705                 rte_flow_error_set(error, ENOMEM,
8706                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8707                                           NULL,
8708                                           "fail to create normal path table "
8709                                           "for sample");
8710                 goto error;
8711         }
8712         cache_resource->normal_path_tbl = tbl;
8713         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8714                 cache_resource->default_miss =
8715                                 mlx5_glue->dr_create_flow_action_default_miss();
8716                 if (!cache_resource->default_miss) {
8717                         rte_flow_error_set(error, ENOMEM,
8718                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8719                                                 NULL,
8720                                                 "cannot create default miss "
8721                                                 "action");
8722                         goto error;
8723                 }
8724                 sample_dv_actions[resource->sample_act.actions_num++] =
8725                                                 cache_resource->default_miss;
8726         }
8727         /* Create a DR sample action */
8728         sampler_attr.sample_ratio = cache_resource->ratio;
8729         sampler_attr.default_next_table = tbl->obj;
8730         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8731         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8732                                                         &sample_dv_actions[0];
8733         sampler_attr.action = cache_resource->set_action;
8734         cache_resource->verbs_action =
8735                 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8736         if (!cache_resource->verbs_action) {
8737                 rte_flow_error_set(error, ENOMEM,
8738                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8739                                         NULL, "cannot create sample action");
8740                 goto error;
8741         }
8742         cache_resource->idx = idx;
8743         return &cache_resource->entry;
8744 error:
8745         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
8746             cache_resource->default_miss)
8747                 claim_zero(mlx5_glue->destroy_flow_action
8748                                 (cache_resource->default_miss));
8749         else
8750                 flow_dv_sample_sub_actions_release(dev,
8751                                                    &cache_resource->sample_idx);
8752         if (cache_resource->normal_path_tbl)
8753                 flow_dv_tbl_resource_release(MLX5_SH(dev),
8754                                 cache_resource->normal_path_tbl);
8755         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
8756         return NULL;
8757
8758 }
8759
8760 /**
8761  * Find existing sample resource or create and register a new one.
8762  *
8763  * @param[in, out] dev
8764  *   Pointer to rte_eth_dev structure.
8765  * @param[in] resource
8766  *   Pointer to sample resource.
8767  * @parm[in, out] dev_flow
8768  *   Pointer to the dev_flow.
8769  * @param[out] error
8770  *   pointer to error structure.
8771  *
8772  * @return
8773  *   0 on success otherwise -errno and errno is set.
8774  */
8775 static int
8776 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8777                          struct mlx5_flow_dv_sample_resource *resource,
8778                          struct mlx5_flow *dev_flow,
8779                          struct rte_flow_error *error)
8780 {
8781         struct mlx5_flow_dv_sample_resource *cache_resource;
8782         struct mlx5_cache_entry *entry;
8783         struct mlx5_priv *priv = dev->data->dev_private;
8784         struct mlx5_flow_cb_ctx ctx = {
8785                 .dev = dev,
8786                 .error = error,
8787                 .data = resource,
8788         };
8789
8790         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
8791         if (!entry)
8792                 return -rte_errno;
8793         cache_resource = container_of(entry, typeof(*cache_resource), entry);
8794         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
8795         dev_flow->dv.sample_res = cache_resource;
8796         return 0;
8797 }
8798
8799 int
8800 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
8801                             struct mlx5_cache_entry *entry, void *cb_ctx)
8802 {
8803         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8804         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8805         struct rte_eth_dev *dev = ctx->dev;
8806         struct mlx5_flow_dv_dest_array_resource *cache_resource =
8807                         container_of(entry, typeof(*cache_resource), entry);
8808         uint32_t idx = 0;
8809
8810         if (resource->num_of_dest == cache_resource->num_of_dest &&
8811             resource->ft_type == cache_resource->ft_type &&
8812             !memcmp((void *)cache_resource->sample_act,
8813                     (void *)resource->sample_act,
8814                    (resource->num_of_dest *
8815                    sizeof(struct mlx5_flow_sub_actions_list)))) {
8816                 /*
8817                  * Existing sample action should release the prepared
8818                  * sub-actions reference counter.
8819                  */
8820                 for (idx = 0; idx < resource->num_of_dest; idx++)
8821                         flow_dv_sample_sub_actions_release(dev,
8822                                         &resource->sample_idx[idx]);
8823                 return 0;
8824         }
8825         return 1;
8826 }
8827
8828 struct mlx5_cache_entry *
8829 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
8830                          struct mlx5_cache_entry *entry __rte_unused,
8831                          void *cb_ctx)
8832 {
8833         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8834         struct rte_eth_dev *dev = ctx->dev;
8835         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8836         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8837         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8838         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8839         struct mlx5_priv *priv = dev->data->dev_private;
8840         struct mlx5_dev_ctx_shared *sh = priv->sh;
8841         struct mlx5_flow_sub_actions_list *sample_act;
8842         struct mlx5dv_dr_domain *domain;
8843         uint32_t idx = 0, res_idx = 0;
8844         struct rte_flow_error *error = ctx->error;
8845
8846         /* Register new destination array resource. */
8847         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8848                                             &res_idx);
8849         if (!cache_resource) {
8850                 rte_flow_error_set(error, ENOMEM,
8851                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8852                                           NULL,
8853                                           "cannot allocate resource memory");
8854                 return NULL;
8855         }
8856         *cache_resource = *resource;
8857         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8858                 domain = sh->fdb_domain;
8859         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
8860                 domain = sh->rx_domain;
8861         else
8862                 domain = sh->tx_domain;
8863         for (idx = 0; idx < resource->num_of_dest; idx++) {
8864                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8865                                  mlx5_malloc(MLX5_MEM_ZERO,
8866                                  sizeof(struct mlx5dv_dr_action_dest_attr),
8867                                  0, SOCKET_ID_ANY);
8868                 if (!dest_attr[idx]) {
8869                         rte_flow_error_set(error, ENOMEM,
8870                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8871                                            NULL,
8872                                            "cannot allocate resource memory");
8873                         goto error;
8874                 }
8875                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8876                 sample_act = &resource->sample_act[idx];
8877                 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8878                         dest_attr[idx]->dest = sample_act->dr_queue_action;
8879                 } else if (sample_act->action_flags ==
8880                           (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8881                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8882                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8883                         dest_attr[idx]->dest_reformat->reformat =
8884                                         sample_act->dr_encap_action;
8885                         dest_attr[idx]->dest_reformat->dest =
8886                                         sample_act->dr_port_id_action;
8887                 } else if (sample_act->action_flags ==
8888                            MLX5_FLOW_ACTION_PORT_ID) {
8889                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
8890                 }
8891         }
8892         /* create a dest array actioin */
8893         cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8894                                                 (domain,
8895                                                  cache_resource->num_of_dest,
8896                                                  dest_attr);
8897         if (!cache_resource->action) {
8898                 rte_flow_error_set(error, ENOMEM,
8899                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8900                                    NULL,
8901                                    "cannot create destination array action");
8902                 goto error;
8903         }
8904         cache_resource->idx = res_idx;
8905         for (idx = 0; idx < resource->num_of_dest; idx++)
8906                 mlx5_free(dest_attr[idx]);
8907         return &cache_resource->entry;
8908 error:
8909         for (idx = 0; idx < resource->num_of_dest; idx++) {
8910                 struct mlx5_flow_sub_actions_idx *act_res =
8911                                         &cache_resource->sample_idx[idx];
8912                 if (act_res->rix_hrxq &&
8913                     !mlx5_hrxq_release(dev,
8914                                 act_res->rix_hrxq))
8915                         act_res->rix_hrxq = 0;
8916                 if (act_res->rix_encap_decap &&
8917                         !flow_dv_encap_decap_resource_release(dev,
8918                                 act_res->rix_encap_decap))
8919                         act_res->rix_encap_decap = 0;
8920                 if (act_res->rix_port_id_action &&
8921                         !flow_dv_port_id_action_resource_release(dev,
8922                                 act_res->rix_port_id_action))
8923                         act_res->rix_port_id_action = 0;
8924                 if (dest_attr[idx])
8925                         mlx5_free(dest_attr[idx]);
8926         }
8927
8928         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
8929         return NULL;
8930 }
8931
8932 /**
8933  * Find existing destination array resource or create and register a new one.
8934  *
8935  * @param[in, out] dev
8936  *   Pointer to rte_eth_dev structure.
8937  * @param[in] resource
8938  *   Pointer to destination array resource.
8939  * @parm[in, out] dev_flow
8940  *   Pointer to the dev_flow.
8941  * @param[out] error
8942  *   pointer to error structure.
8943  *
8944  * @return
8945  *   0 on success otherwise -errno and errno is set.
8946  */
8947 static int
8948 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
8949                          struct mlx5_flow_dv_dest_array_resource *resource,
8950                          struct mlx5_flow *dev_flow,
8951                          struct rte_flow_error *error)
8952 {
8953         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8954         struct mlx5_priv *priv = dev->data->dev_private;
8955         struct mlx5_cache_entry *entry;
8956         struct mlx5_flow_cb_ctx ctx = {
8957                 .dev = dev,
8958                 .error = error,
8959                 .data = resource,
8960         };
8961
8962         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
8963         if (!entry)
8964                 return -rte_errno;
8965         cache_resource = container_of(entry, typeof(*cache_resource), entry);
8966         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
8967         dev_flow->dv.dest_array_res = cache_resource;
8968         return 0;
8969 }
8970
8971 /**
8972  * Convert Sample action to DV specification.
8973  *
8974  * @param[in] dev
8975  *   Pointer to rte_eth_dev structure.
8976  * @param[in] action
8977  *   Pointer to action structure.
8978  * @param[in, out] dev_flow
8979  *   Pointer to the mlx5_flow.
8980  * @param[in] attr
8981  *   Pointer to the flow attributes.
8982  * @param[in, out] num_of_dest
8983  *   Pointer to the num of destination.
8984  * @param[in, out] sample_actions
8985  *   Pointer to sample actions list.
8986  * @param[in, out] res
8987  *   Pointer to sample resource.
8988  * @param[out] error
8989  *   Pointer to the error structure.
8990  *
8991  * @return
8992  *   0 on success, a negative errno value otherwise and rte_errno is set.
8993  */
8994 static int
8995 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
8996                                 const struct rte_flow_action *action,
8997                                 struct mlx5_flow *dev_flow,
8998                                 const struct rte_flow_attr *attr,
8999                                 uint32_t *num_of_dest,
9000                                 void **sample_actions,
9001                                 struct mlx5_flow_dv_sample_resource *res,
9002                                 struct rte_flow_error *error)
9003 {
9004         struct mlx5_priv *priv = dev->data->dev_private;
9005         const struct rte_flow_action_sample *sample_action;
9006         const struct rte_flow_action *sub_actions;
9007         const struct rte_flow_action_queue *queue;
9008         struct mlx5_flow_sub_actions_list *sample_act;
9009         struct mlx5_flow_sub_actions_idx *sample_idx;
9010         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9011         struct mlx5_flow_rss_desc *rss_desc;
9012         uint64_t action_flags = 0;
9013
9014         MLX5_ASSERT(wks);
9015         rss_desc = &wks->rss_desc;
9016         sample_act = &res->sample_act;
9017         sample_idx = &res->sample_idx;
9018         sample_action = (const struct rte_flow_action_sample *)action->conf;
9019         res->ratio = sample_action->ratio;
9020         sub_actions = sample_action->actions;
9021         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
9022                 int type = sub_actions->type;
9023                 uint32_t pre_rix = 0;
9024                 void *pre_r;
9025                 switch (type) {
9026                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9027                 {
9028                         struct mlx5_hrxq *hrxq;
9029                         uint32_t hrxq_idx;
9030
9031                         queue = sub_actions->conf;
9032                         rss_desc->queue_num = 1;
9033                         rss_desc->queue[0] = queue->index;
9034                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9035                                                     rss_desc, &hrxq_idx);
9036                         if (!hrxq)
9037                                 return rte_flow_error_set
9038                                         (error, rte_errno,
9039                                          RTE_FLOW_ERROR_TYPE_ACTION,
9040                                          NULL,
9041                                          "cannot create fate queue");
9042                         sample_act->dr_queue_action = hrxq->action;
9043                         sample_idx->rix_hrxq = hrxq_idx;
9044                         sample_actions[sample_act->actions_num++] =
9045                                                 hrxq->action;
9046                         (*num_of_dest)++;
9047                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9048                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9049                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9050                         dev_flow->handle->fate_action =
9051                                         MLX5_FLOW_FATE_QUEUE;
9052                         break;
9053                 }
9054                 case RTE_FLOW_ACTION_TYPE_MARK:
9055                 {
9056                         uint32_t tag_be = mlx5_flow_mark_set
9057                                 (((const struct rte_flow_action_mark *)
9058                                 (sub_actions->conf))->id);
9059
9060                         dev_flow->handle->mark = 1;
9061                         pre_rix = dev_flow->handle->dvh.rix_tag;
9062                         /* Save the mark resource before sample */
9063                         pre_r = dev_flow->dv.tag_resource;
9064                         if (flow_dv_tag_resource_register(dev, tag_be,
9065                                                   dev_flow, error))
9066                                 return -rte_errno;
9067                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9068                         sample_act->dr_tag_action =
9069                                 dev_flow->dv.tag_resource->action;
9070                         sample_idx->rix_tag =
9071                                 dev_flow->handle->dvh.rix_tag;
9072                         sample_actions[sample_act->actions_num++] =
9073                                                 sample_act->dr_tag_action;
9074                         /* Recover the mark resource after sample */
9075                         dev_flow->dv.tag_resource = pre_r;
9076                         dev_flow->handle->dvh.rix_tag = pre_rix;
9077                         action_flags |= MLX5_FLOW_ACTION_MARK;
9078                         break;
9079                 }
9080                 case RTE_FLOW_ACTION_TYPE_COUNT:
9081                 {
9082                         uint32_t counter;
9083
9084                         counter = flow_dv_translate_create_counter(dev,
9085                                         dev_flow, sub_actions->conf, 0);
9086                         if (!counter)
9087                                 return rte_flow_error_set
9088                                                 (error, rte_errno,
9089                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9090                                                  NULL,
9091                                                  "cannot create counter"
9092                                                  " object.");
9093                         sample_idx->cnt = counter;
9094                         sample_act->dr_cnt_action =
9095                                   (flow_dv_counter_get_by_idx(dev,
9096                                   counter, NULL))->action;
9097                         sample_actions[sample_act->actions_num++] =
9098                                                 sample_act->dr_cnt_action;
9099                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9100                         break;
9101                 }
9102                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9103                 {
9104                         struct mlx5_flow_dv_port_id_action_resource
9105                                         port_id_resource;
9106                         uint32_t port_id = 0;
9107
9108                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9109                         /* Save the port id resource before sample */
9110                         pre_rix = dev_flow->handle->rix_port_id_action;
9111                         pre_r = dev_flow->dv.port_id_action;
9112                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9113                                                              &port_id, error))
9114                                 return -rte_errno;
9115                         port_id_resource.port_id = port_id;
9116                         if (flow_dv_port_id_action_resource_register
9117                             (dev, &port_id_resource, dev_flow, error))
9118                                 return -rte_errno;
9119                         sample_act->dr_port_id_action =
9120                                 dev_flow->dv.port_id_action->action;
9121                         sample_idx->rix_port_id_action =
9122                                 dev_flow->handle->rix_port_id_action;
9123                         sample_actions[sample_act->actions_num++] =
9124                                                 sample_act->dr_port_id_action;
9125                         /* Recover the port id resource after sample */
9126                         dev_flow->dv.port_id_action = pre_r;
9127                         dev_flow->handle->rix_port_id_action = pre_rix;
9128                         (*num_of_dest)++;
9129                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9130                         break;
9131                 }
9132                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9133                         /* Save the encap resource before sample */
9134                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9135                         pre_r = dev_flow->dv.encap_decap;
9136                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9137                                                            dev_flow,
9138                                                            attr->transfer,
9139                                                            error))
9140                                 return -rte_errno;
9141                         sample_act->dr_encap_action =
9142                                 dev_flow->dv.encap_decap->action;
9143                         sample_idx->rix_encap_decap =
9144                                 dev_flow->handle->dvh.rix_encap_decap;
9145                         sample_actions[sample_act->actions_num++] =
9146                                                 sample_act->dr_encap_action;
9147                         /* Recover the encap resource after sample */
9148                         dev_flow->dv.encap_decap = pre_r;
9149                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9150                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9151                         break;
9152                 default:
9153                         return rte_flow_error_set(error, EINVAL,
9154                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9155                                 NULL,
9156                                 "Not support for sampler action");
9157                 }
9158         }
9159         sample_act->action_flags = action_flags;
9160         res->ft_id = dev_flow->dv.group;
9161         if (attr->transfer) {
9162                 union {
9163                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9164                         uint64_t set_action;
9165                 } action_ctx = { .set_action = 0 };
9166
9167                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9168                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9169                          MLX5_MODIFICATION_TYPE_SET);
9170                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9171                          MLX5_MODI_META_REG_C_0);
9172                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9173                          priv->vport_meta_tag);
9174                 res->set_action = action_ctx.set_action;
9175         } else if (attr->ingress) {
9176                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9177         } else {
9178                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9179         }
9180         return 0;
9181 }
9182
9183 /**
9184  * Convert Sample action to DV specification.
9185  *
9186  * @param[in] dev
9187  *   Pointer to rte_eth_dev structure.
9188  * @param[in, out] dev_flow
9189  *   Pointer to the mlx5_flow.
9190  * @param[in] num_of_dest
9191  *   The num of destination.
9192  * @param[in, out] res
9193  *   Pointer to sample resource.
9194  * @param[in, out] mdest_res
9195  *   Pointer to destination array resource.
9196  * @param[in] sample_actions
9197  *   Pointer to sample path actions list.
9198  * @param[in] action_flags
9199  *   Holds the actions detected until now.
9200  * @param[out] error
9201  *   Pointer to the error structure.
9202  *
9203  * @return
9204  *   0 on success, a negative errno value otherwise and rte_errno is set.
9205  */
9206 static int
9207 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9208                              struct mlx5_flow *dev_flow,
9209                              uint32_t num_of_dest,
9210                              struct mlx5_flow_dv_sample_resource *res,
9211                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9212                              void **sample_actions,
9213                              uint64_t action_flags,
9214                              struct rte_flow_error *error)
9215 {
9216         /* update normal path action resource into last index of array */
9217         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9218         struct mlx5_flow_sub_actions_list *sample_act =
9219                                         &mdest_res->sample_act[dest_index];
9220         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9221         struct mlx5_flow_rss_desc *rss_desc;
9222         uint32_t normal_idx = 0;
9223         struct mlx5_hrxq *hrxq;
9224         uint32_t hrxq_idx;
9225
9226         MLX5_ASSERT(wks);
9227         rss_desc = &wks->rss_desc;
9228         if (num_of_dest > 1) {
9229                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9230                         /* Handle QP action for mirroring */
9231                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9232                                                     rss_desc, &hrxq_idx);
9233                         if (!hrxq)
9234                                 return rte_flow_error_set
9235                                      (error, rte_errno,
9236                                       RTE_FLOW_ERROR_TYPE_ACTION,
9237                                       NULL,
9238                                       "cannot create rx queue");
9239                         normal_idx++;
9240                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9241                         sample_act->dr_queue_action = hrxq->action;
9242                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9243                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9244                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9245                 }
9246                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9247                         normal_idx++;
9248                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9249                                 dev_flow->handle->dvh.rix_encap_decap;
9250                         sample_act->dr_encap_action =
9251                                 dev_flow->dv.encap_decap->action;
9252                 }
9253                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9254                         normal_idx++;
9255                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9256                                 dev_flow->handle->rix_port_id_action;
9257                         sample_act->dr_port_id_action =
9258                                 dev_flow->dv.port_id_action->action;
9259                 }
9260                 sample_act->actions_num = normal_idx;
9261                 /* update sample action resource into first index of array */
9262                 mdest_res->ft_type = res->ft_type;
9263                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9264                                 sizeof(struct mlx5_flow_sub_actions_idx));
9265                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9266                                 sizeof(struct mlx5_flow_sub_actions_list));
9267                 mdest_res->num_of_dest = num_of_dest;
9268                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9269                                                          dev_flow, error))
9270                         return rte_flow_error_set(error, EINVAL,
9271                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9272                                                   NULL, "can't create sample "
9273                                                   "action");
9274         } else {
9275                 res->sub_actions = sample_actions;
9276                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
9277                         return rte_flow_error_set(error, EINVAL,
9278                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9279                                                   NULL,
9280                                                   "can't create sample action");
9281         }
9282         return 0;
9283 }
9284
9285 /**
9286  * Remove an ASO age action from age actions list.
9287  *
9288  * @param[in] dev
9289  *   Pointer to the Ethernet device structure.
9290  * @param[in] age
9291  *   Pointer to the aso age action handler.
9292  */
9293 static void
9294 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
9295                                 struct mlx5_aso_age_action *age)
9296 {
9297         struct mlx5_age_info *age_info;
9298         struct mlx5_age_param *age_param = &age->age_params;
9299         struct mlx5_priv *priv = dev->data->dev_private;
9300         uint16_t expected = AGE_CANDIDATE;
9301
9302         age_info = GET_PORT_AGE_INFO(priv);
9303         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
9304                                          AGE_FREE, false, __ATOMIC_RELAXED,
9305                                          __ATOMIC_RELAXED)) {
9306                 /**
9307                  * We need the lock even it is age timeout,
9308                  * since age action may still in process.
9309                  */
9310                 rte_spinlock_lock(&age_info->aged_sl);
9311                 LIST_REMOVE(age, next);
9312                 rte_spinlock_unlock(&age_info->aged_sl);
9313                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
9314         }
9315 }
9316
9317 /**
9318  * Release an ASO age action.
9319  *
9320  * @param[in] dev
9321  *   Pointer to the Ethernet device structure.
9322  * @param[in] age_idx
9323  *   Index of ASO age action to release.
9324  * @param[in] flow
9325  *   True if the release operation is during flow destroy operation.
9326  *   False if the release operation is during action destroy operation.
9327  *
9328  * @return
9329  *   0 when age action was removed, otherwise the number of references.
9330  */
9331 static int
9332 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
9333 {
9334         struct mlx5_priv *priv = dev->data->dev_private;
9335         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9336         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
9337         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
9338
9339         if (!ret) {
9340                 flow_dv_aso_age_remove_from_age(dev, age);
9341                 rte_spinlock_lock(&mng->free_sl);
9342                 LIST_INSERT_HEAD(&mng->free, age, next);
9343                 rte_spinlock_unlock(&mng->free_sl);
9344         }
9345         return ret;
9346 }
9347
9348 /**
9349  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
9350  *
9351  * @param[in] dev
9352  *   Pointer to the Ethernet device structure.
9353  *
9354  * @return
9355  *   0 on success, otherwise negative errno value and rte_errno is set.
9356  */
9357 static int
9358 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
9359 {
9360         struct mlx5_priv *priv = dev->data->dev_private;
9361         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9362         void *old_pools = mng->pools;
9363         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
9364         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
9365         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
9366
9367         if (!pools) {
9368                 rte_errno = ENOMEM;
9369                 return -ENOMEM;
9370         }
9371         if (old_pools) {
9372                 memcpy(pools, old_pools,
9373                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
9374                 mlx5_free(old_pools);
9375         } else {
9376                 /* First ASO flow hit allocation - starting ASO data-path. */
9377                 int ret = mlx5_aso_queue_start(priv->sh);
9378
9379                 if (ret) {
9380                         mlx5_free(pools);
9381                         return ret;
9382                 }
9383         }
9384         mng->n = resize;
9385         mng->pools = pools;
9386         return 0;
9387 }
9388
9389 /**
9390  * Create and initialize a new ASO aging pool.
9391  *
9392  * @param[in] dev
9393  *   Pointer to the Ethernet device structure.
9394  * @param[out] age_free
9395  *   Where to put the pointer of a new age action.
9396  *
9397  * @return
9398  *   The age actions pool pointer and @p age_free is set on success,
9399  *   NULL otherwise and rte_errno is set.
9400  */
9401 static struct mlx5_aso_age_pool *
9402 flow_dv_age_pool_create(struct rte_eth_dev *dev,
9403                         struct mlx5_aso_age_action **age_free)
9404 {
9405         struct mlx5_priv *priv = dev->data->dev_private;
9406         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9407         struct mlx5_aso_age_pool *pool = NULL;
9408         struct mlx5_devx_obj *obj = NULL;
9409         uint32_t i;
9410
9411         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
9412                                                     priv->sh->pdn);
9413         if (!obj) {
9414                 rte_errno = ENODATA;
9415                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
9416                 return NULL;
9417         }
9418         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
9419         if (!pool) {
9420                 claim_zero(mlx5_devx_cmd_destroy(obj));
9421                 rte_errno = ENOMEM;
9422                 return NULL;
9423         }
9424         pool->flow_hit_aso_obj = obj;
9425         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
9426         rte_spinlock_lock(&mng->resize_sl);
9427         pool->index = mng->next;
9428         /* Resize pools array if there is no room for the new pool in it. */
9429         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
9430                 claim_zero(mlx5_devx_cmd_destroy(obj));
9431                 mlx5_free(pool);
9432                 rte_spinlock_unlock(&mng->resize_sl);
9433                 return NULL;
9434         }
9435         mng->pools[pool->index] = pool;
9436         mng->next++;
9437         rte_spinlock_unlock(&mng->resize_sl);
9438         /* Assign the first action in the new pool, the rest go to free list. */
9439         *age_free = &pool->actions[0];
9440         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
9441                 pool->actions[i].offset = i;
9442                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
9443         }
9444         return pool;
9445 }
9446
9447 /**
9448  * Allocate a ASO aging bit.
9449  *
9450  * @param[in] dev
9451  *   Pointer to the Ethernet device structure.
9452  *
9453  * @return
9454  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
9455  */
9456 static uint32_t
9457 flow_dv_aso_age_alloc(struct rte_eth_dev *dev)
9458 {
9459         struct mlx5_priv *priv = dev->data->dev_private;
9460         const struct mlx5_aso_age_pool *pool;
9461         struct mlx5_aso_age_action *age_free = NULL;
9462         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9463
9464         MLX5_ASSERT(mng);
9465         /* Try to get the next free age action bit. */
9466         rte_spinlock_lock(&mng->free_sl);
9467         age_free = LIST_FIRST(&mng->free);
9468         if (age_free) {
9469                 LIST_REMOVE(age_free, next);
9470         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
9471                 rte_spinlock_unlock(&mng->free_sl);
9472                 return 0; /* 0 is an error.*/
9473         }
9474         rte_spinlock_unlock(&mng->free_sl);
9475         pool = container_of
9476           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
9477                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
9478                                                                        actions);
9479         if (!age_free->dr_action) {
9480                 age_free->dr_action = mlx5_glue->dr_action_create_flow_hit
9481                                                 (pool->flow_hit_aso_obj->obj,
9482                                                  age_free->offset, REG_C_5);
9483                 if (!age_free->dr_action) {
9484                         rte_errno = errno;
9485                         rte_spinlock_lock(&mng->free_sl);
9486                         LIST_INSERT_HEAD(&mng->free, age_free, next);
9487                         rte_spinlock_unlock(&mng->free_sl);
9488                         return 0; /* 0 is an error.*/
9489                 }
9490         }
9491         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
9492         return pool->index | ((age_free->offset + 1) << 16);
9493 }
9494
9495 /**
9496  * Create a age action using ASO mechanism.
9497  *
9498  * @param[in] dev
9499  *   Pointer to rte_eth_dev structure.
9500  * @param[in] age
9501  *   Pointer to the aging action configuration.
9502  *
9503  * @return
9504  *   Index to flow counter on success, 0 otherwise.
9505  */
9506 static uint32_t
9507 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
9508                                  const struct rte_flow_action_age *age)
9509 {
9510         uint32_t age_idx = 0;
9511         struct mlx5_aso_age_action *aso_age;
9512
9513         age_idx = flow_dv_aso_age_alloc(dev);
9514         if (!age_idx)
9515                 return 0;
9516         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
9517         aso_age->age_params.context = age->context;
9518         aso_age->age_params.timeout = age->timeout;
9519         aso_age->age_params.port_id = dev->data->port_id;
9520         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
9521                          __ATOMIC_RELAXED);
9522         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
9523                          __ATOMIC_RELAXED);
9524         return age_idx;
9525 }
9526
9527 /**
9528  * Fill the flow with DV spec, lock free
9529  * (mutex should be acquired by caller).
9530  *
9531  * @param[in] dev
9532  *   Pointer to rte_eth_dev structure.
9533  * @param[in, out] dev_flow
9534  *   Pointer to the sub flow.
9535  * @param[in] attr
9536  *   Pointer to the flow attributes.
9537  * @param[in] items
9538  *   Pointer to the list of items.
9539  * @param[in] actions
9540  *   Pointer to the list of actions.
9541  * @param[out] error
9542  *   Pointer to the error structure.
9543  *
9544  * @return
9545  *   0 on success, a negative errno value otherwise and rte_errno is set.
9546  */
9547 static int
9548 flow_dv_translate(struct rte_eth_dev *dev,
9549                   struct mlx5_flow *dev_flow,
9550                   const struct rte_flow_attr *attr,
9551                   const struct rte_flow_item items[],
9552                   const struct rte_flow_action actions[],
9553                   struct rte_flow_error *error)
9554 {
9555         struct mlx5_priv *priv = dev->data->dev_private;
9556         struct mlx5_dev_config *dev_conf = &priv->config;
9557         struct rte_flow *flow = dev_flow->flow;
9558         struct mlx5_flow_handle *handle = dev_flow->handle;
9559         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9560         struct mlx5_flow_rss_desc *rss_desc;
9561         uint64_t item_flags = 0;
9562         uint64_t last_item = 0;
9563         uint64_t action_flags = 0;
9564         uint64_t priority = attr->priority;
9565         struct mlx5_flow_dv_matcher matcher = {
9566                 .mask = {
9567                         .size = sizeof(matcher.mask.buf) -
9568                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9569                 },
9570         };
9571         int actions_n = 0;
9572         bool actions_end = false;
9573         union {
9574                 struct mlx5_flow_dv_modify_hdr_resource res;
9575                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9576                             sizeof(struct mlx5_modification_cmd) *
9577                             (MLX5_MAX_MODIFY_NUM + 1)];
9578         } mhdr_dummy;
9579         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9580         const struct rte_flow_action_count *count = NULL;
9581         const struct rte_flow_action_age *age = NULL;
9582         union flow_dv_attr flow_attr = { .attr = 0 };
9583         uint32_t tag_be;
9584         union mlx5_flow_tbl_key tbl_key;
9585         uint32_t modify_action_position = UINT32_MAX;
9586         void *match_mask = matcher.mask.buf;
9587         void *match_value = dev_flow->dv.value.buf;
9588         uint8_t next_protocol = 0xff;
9589         struct rte_vlan_hdr vlan = { 0 };
9590         struct mlx5_flow_dv_dest_array_resource mdest_res;
9591         struct mlx5_flow_dv_sample_resource sample_res;
9592         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9593         struct mlx5_flow_sub_actions_list *sample_act;
9594         uint32_t sample_act_pos = UINT32_MAX;
9595         uint32_t num_of_dest = 0;
9596         int tmp_actions_n = 0;
9597         uint32_t table;
9598         int ret = 0;
9599         const struct mlx5_flow_tunnel *tunnel;
9600         struct flow_grp_info grp_info = {
9601                 .external = !!dev_flow->external,
9602                 .transfer = !!attr->transfer,
9603                 .fdb_def_rule = !!priv->fdb_def_rule,
9604                 .skip_scale = !!dev_flow->skip_scale,
9605         };
9606
9607         if (!wks)
9608                 return rte_flow_error_set(error, ENOMEM,
9609                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9610                                           NULL,
9611                                           "failed to push flow workspace");
9612         rss_desc = &wks->rss_desc;
9613         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9614         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9615         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9616                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9617         /* update normal path action resource into last index of array */
9618         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9619         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9620                  flow_items_to_tunnel(items) :
9621                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9622                  flow_actions_to_tunnel(actions) :
9623                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
9624         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9625                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9626         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9627                                 (dev, tunnel, attr, items, actions);
9628         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9629                                        &grp_info, error);
9630         if (ret)
9631                 return ret;
9632         dev_flow->dv.group = table;
9633         if (attr->transfer)
9634                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9635         if (priority == MLX5_FLOW_PRIO_RSVD)
9636                 priority = dev_conf->flow_prio - 1;
9637         /* number of actions must be set to 0 in case of dirty stack. */
9638         mhdr_res->actions_num = 0;
9639         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9640                 /*
9641                  * do not add decap action if match rule drops packet
9642                  * HW rejects rules with decap & drop
9643                  *
9644                  * if tunnel match rule was inserted before matching tunnel set
9645                  * rule flow table used in the match rule must be registered.
9646                  * current implementation handles that in the
9647                  * flow_dv_match_register() at the function end.
9648                  */
9649                 bool add_decap = true;
9650                 const struct rte_flow_action *ptr = actions;
9651
9652                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9653                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9654                                 add_decap = false;
9655                                 break;
9656                         }
9657                 }
9658                 if (add_decap) {
9659                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9660                                                            attr->transfer,
9661                                                            error))
9662                                 return -rte_errno;
9663                         dev_flow->dv.actions[actions_n++] =
9664                                         dev_flow->dv.encap_decap->action;
9665                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9666                 }
9667         }
9668         for (; !actions_end ; actions++) {
9669                 const struct rte_flow_action_queue *queue;
9670                 const struct rte_flow_action_rss *rss;
9671                 const struct rte_flow_action *action = actions;
9672                 const uint8_t *rss_key;
9673                 const struct rte_flow_action_meter *mtr;
9674                 struct mlx5_flow_tbl_resource *tbl;
9675                 struct mlx5_aso_age_action *age_act;
9676                 uint32_t port_id = 0;
9677                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9678                 int action_type = actions->type;
9679                 const struct rte_flow_action *found_action = NULL;
9680                 struct mlx5_flow_meter *fm = NULL;
9681                 uint32_t jump_group = 0;
9682
9683                 if (!mlx5_flow_os_action_supported(action_type))
9684                         return rte_flow_error_set(error, ENOTSUP,
9685                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9686                                                   actions,
9687                                                   "action not supported");
9688                 switch (action_type) {
9689                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9690                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9691                         break;
9692                 case RTE_FLOW_ACTION_TYPE_VOID:
9693                         break;
9694                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9695                         if (flow_dv_translate_action_port_id(dev, action,
9696                                                              &port_id, error))
9697                                 return -rte_errno;
9698                         port_id_resource.port_id = port_id;
9699                         MLX5_ASSERT(!handle->rix_port_id_action);
9700                         if (flow_dv_port_id_action_resource_register
9701                             (dev, &port_id_resource, dev_flow, error))
9702                                 return -rte_errno;
9703                         dev_flow->dv.actions[actions_n++] =
9704                                         dev_flow->dv.port_id_action->action;
9705                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9706                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9707                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9708                         num_of_dest++;
9709                         break;
9710                 case RTE_FLOW_ACTION_TYPE_FLAG:
9711                         action_flags |= MLX5_FLOW_ACTION_FLAG;
9712                         dev_flow->handle->mark = 1;
9713                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9714                                 struct rte_flow_action_mark mark = {
9715                                         .id = MLX5_FLOW_MARK_DEFAULT,
9716                                 };
9717
9718                                 if (flow_dv_convert_action_mark(dev, &mark,
9719                                                                 mhdr_res,
9720                                                                 error))
9721                                         return -rte_errno;
9722                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9723                                 break;
9724                         }
9725                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9726                         /*
9727                          * Only one FLAG or MARK is supported per device flow
9728                          * right now. So the pointer to the tag resource must be
9729                          * zero before the register process.
9730                          */
9731                         MLX5_ASSERT(!handle->dvh.rix_tag);
9732                         if (flow_dv_tag_resource_register(dev, tag_be,
9733                                                           dev_flow, error))
9734                                 return -rte_errno;
9735                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9736                         dev_flow->dv.actions[actions_n++] =
9737                                         dev_flow->dv.tag_resource->action;
9738                         break;
9739                 case RTE_FLOW_ACTION_TYPE_MARK:
9740                         action_flags |= MLX5_FLOW_ACTION_MARK;
9741                         dev_flow->handle->mark = 1;
9742                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9743                                 const struct rte_flow_action_mark *mark =
9744                                         (const struct rte_flow_action_mark *)
9745                                                 actions->conf;
9746
9747                                 if (flow_dv_convert_action_mark(dev, mark,
9748                                                                 mhdr_res,
9749                                                                 error))
9750                                         return -rte_errno;
9751                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9752                                 break;
9753                         }
9754                         /* Fall-through */
9755                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9756                         /* Legacy (non-extensive) MARK action. */
9757                         tag_be = mlx5_flow_mark_set
9758                               (((const struct rte_flow_action_mark *)
9759                                (actions->conf))->id);
9760                         MLX5_ASSERT(!handle->dvh.rix_tag);
9761                         if (flow_dv_tag_resource_register(dev, tag_be,
9762                                                           dev_flow, error))
9763                                 return -rte_errno;
9764                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9765                         dev_flow->dv.actions[actions_n++] =
9766                                         dev_flow->dv.tag_resource->action;
9767                         break;
9768                 case RTE_FLOW_ACTION_TYPE_SET_META:
9769                         if (flow_dv_convert_action_set_meta
9770                                 (dev, mhdr_res, attr,
9771                                  (const struct rte_flow_action_set_meta *)
9772                                   actions->conf, error))
9773                                 return -rte_errno;
9774                         action_flags |= MLX5_FLOW_ACTION_SET_META;
9775                         break;
9776                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9777                         if (flow_dv_convert_action_set_tag
9778                                 (dev, mhdr_res,
9779                                  (const struct rte_flow_action_set_tag *)
9780                                   actions->conf, error))
9781                                 return -rte_errno;
9782                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9783                         break;
9784                 case RTE_FLOW_ACTION_TYPE_DROP:
9785                         action_flags |= MLX5_FLOW_ACTION_DROP;
9786                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9787                         break;
9788                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9789                         queue = actions->conf;
9790                         rss_desc->queue_num = 1;
9791                         rss_desc->queue[0] = queue->index;
9792                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9793                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9794                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9795                         num_of_dest++;
9796                         break;
9797                 case RTE_FLOW_ACTION_TYPE_RSS:
9798                         rss = actions->conf;
9799                         memcpy(rss_desc->queue, rss->queue,
9800                                rss->queue_num * sizeof(uint16_t));
9801                         rss_desc->queue_num = rss->queue_num;
9802                         /* NULL RSS key indicates default RSS key. */
9803                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9804                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9805                         /*
9806                          * rss->level and rss.types should be set in advance
9807                          * when expanding items for RSS.
9808                          */
9809                         action_flags |= MLX5_FLOW_ACTION_RSS;
9810                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
9811                                 MLX5_FLOW_FATE_SHARED_RSS :
9812                                 MLX5_FLOW_FATE_QUEUE;
9813                         break;
9814                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
9815                         flow->age = (uint32_t)(uintptr_t)(action->conf);
9816                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
9817                         __atomic_fetch_add(&age_act->refcnt, 1,
9818                                            __ATOMIC_RELAXED);
9819                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
9820                         action_flags |= MLX5_FLOW_ACTION_AGE;
9821                         break;
9822                 case RTE_FLOW_ACTION_TYPE_AGE:
9823                         if (priv->sh->flow_hit_aso_en && attr->group) {
9824                                 flow->age = flow_dv_translate_create_aso_age
9825                                                 (dev, action->conf);
9826                                 if (!flow->age)
9827                                         return rte_flow_error_set
9828                                                 (error, rte_errno,
9829                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9830                                                  NULL,
9831                                                  "can't create ASO age action");
9832                                 dev_flow->dv.actions[actions_n++] =
9833                                           (flow_aso_age_get_by_idx
9834                                                 (dev, flow->age))->dr_action;
9835                                 action_flags |= MLX5_FLOW_ACTION_AGE;
9836                                 break;
9837                         }
9838                         /* Fall-through */
9839                 case RTE_FLOW_ACTION_TYPE_COUNT:
9840                         if (!dev_conf->devx) {
9841                                 return rte_flow_error_set
9842                                               (error, ENOTSUP,
9843                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9844                                                NULL,
9845                                                "count action not supported");
9846                         }
9847                         /* Save information first, will apply later. */
9848                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9849                                 count = action->conf;
9850                         else
9851                                 age = action->conf;
9852                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9853                         break;
9854                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9855                         dev_flow->dv.actions[actions_n++] =
9856                                                 priv->sh->pop_vlan_action;
9857                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9858                         break;
9859                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9860                         if (!(action_flags &
9861                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9862                                 flow_dev_get_vlan_info_from_items(items, &vlan);
9863                         vlan.eth_proto = rte_be_to_cpu_16
9864                              ((((const struct rte_flow_action_of_push_vlan *)
9865                                                    actions->conf)->ethertype));
9866                         found_action = mlx5_flow_find_action
9867                                         (actions + 1,
9868                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9869                         if (found_action)
9870                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9871                         found_action = mlx5_flow_find_action
9872                                         (actions + 1,
9873                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9874                         if (found_action)
9875                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9876                         if (flow_dv_create_action_push_vlan
9877                                             (dev, attr, &vlan, dev_flow, error))
9878                                 return -rte_errno;
9879                         dev_flow->dv.actions[actions_n++] =
9880                                         dev_flow->dv.push_vlan_res->action;
9881                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9882                         break;
9883                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9884                         /* of_vlan_push action handled this action */
9885                         MLX5_ASSERT(action_flags &
9886                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9887                         break;
9888                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9889                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9890                                 break;
9891                         flow_dev_get_vlan_info_from_items(items, &vlan);
9892                         mlx5_update_vlan_vid_pcp(actions, &vlan);
9893                         /* If no VLAN push - this is a modify header action */
9894                         if (flow_dv_convert_action_modify_vlan_vid
9895                                                 (mhdr_res, actions, error))
9896                                 return -rte_errno;
9897                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9898                         break;
9899                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9900                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9901                         if (flow_dv_create_action_l2_encap(dev, actions,
9902                                                            dev_flow,
9903                                                            attr->transfer,
9904                                                            error))
9905                                 return -rte_errno;
9906                         dev_flow->dv.actions[actions_n++] =
9907                                         dev_flow->dv.encap_decap->action;
9908                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9909                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9910                                 sample_act->action_flags |=
9911                                                         MLX5_FLOW_ACTION_ENCAP;
9912                         break;
9913                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9914                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9915                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9916                                                            attr->transfer,
9917                                                            error))
9918                                 return -rte_errno;
9919                         dev_flow->dv.actions[actions_n++] =
9920                                         dev_flow->dv.encap_decap->action;
9921                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9922                         break;
9923                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9924                         /* Handle encap with preceding decap. */
9925                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
9926                                 if (flow_dv_create_action_raw_encap
9927                                         (dev, actions, dev_flow, attr, error))
9928                                         return -rte_errno;
9929                                 dev_flow->dv.actions[actions_n++] =
9930                                         dev_flow->dv.encap_decap->action;
9931                         } else {
9932                                 /* Handle encap without preceding decap. */
9933                                 if (flow_dv_create_action_l2_encap
9934                                     (dev, actions, dev_flow, attr->transfer,
9935                                      error))
9936                                         return -rte_errno;
9937                                 dev_flow->dv.actions[actions_n++] =
9938                                         dev_flow->dv.encap_decap->action;
9939                         }
9940                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9941                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9942                                 sample_act->action_flags |=
9943                                                         MLX5_FLOW_ACTION_ENCAP;
9944                         break;
9945                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
9946                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
9947                                 ;
9948                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
9949                                 if (flow_dv_create_action_l2_decap
9950                                     (dev, dev_flow, attr->transfer, error))
9951                                         return -rte_errno;
9952                                 dev_flow->dv.actions[actions_n++] =
9953                                         dev_flow->dv.encap_decap->action;
9954                         }
9955                         /* If decap is followed by encap, handle it at encap. */
9956                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9957                         break;
9958                 case RTE_FLOW_ACTION_TYPE_JUMP:
9959                         jump_group = ((const struct rte_flow_action_jump *)
9960                                                         action->conf)->group;
9961                         grp_info.std_tbl_fix = 0;
9962                         grp_info.skip_scale = 0;
9963                         ret = mlx5_flow_group_to_table(dev, tunnel,
9964                                                        jump_group,
9965                                                        &table,
9966                                                        &grp_info, error);
9967                         if (ret)
9968                                 return ret;
9969                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
9970                                                        attr->transfer,
9971                                                        !!dev_flow->external,
9972                                                        tunnel, jump_group, 0,
9973                                                        error);
9974                         if (!tbl)
9975                                 return rte_flow_error_set
9976                                                 (error, errno,
9977                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9978                                                  NULL,
9979                                                  "cannot create jump action.");
9980                         if (flow_dv_jump_tbl_resource_register
9981                             (dev, tbl, dev_flow, error)) {
9982                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
9983                                 return rte_flow_error_set
9984                                                 (error, errno,
9985                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9986                                                  NULL,
9987                                                  "cannot create jump action.");
9988                         }
9989                         dev_flow->dv.actions[actions_n++] =
9990                                         dev_flow->dv.jump->action;
9991                         action_flags |= MLX5_FLOW_ACTION_JUMP;
9992                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
9993                         break;
9994                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
9995                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
9996                         if (flow_dv_convert_action_modify_mac
9997                                         (mhdr_res, actions, error))
9998                                 return -rte_errno;
9999                         action_flags |= actions->type ==
10000                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
10001                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
10002                                         MLX5_FLOW_ACTION_SET_MAC_DST;
10003                         break;
10004                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
10005                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
10006                         if (flow_dv_convert_action_modify_ipv4
10007                                         (mhdr_res, actions, error))
10008                                 return -rte_errno;
10009                         action_flags |= actions->type ==
10010                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
10011                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
10012                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
10013                         break;
10014                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
10015                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
10016                         if (flow_dv_convert_action_modify_ipv6
10017                                         (mhdr_res, actions, error))
10018                                 return -rte_errno;
10019                         action_flags |= actions->type ==
10020                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
10021                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
10022                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
10023                         break;
10024                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
10025                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
10026                         if (flow_dv_convert_action_modify_tp
10027                                         (mhdr_res, actions, items,
10028                                          &flow_attr, dev_flow, !!(action_flags &
10029                                          MLX5_FLOW_ACTION_DECAP), error))
10030                                 return -rte_errno;
10031                         action_flags |= actions->type ==
10032                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
10033                                         MLX5_FLOW_ACTION_SET_TP_SRC :
10034                                         MLX5_FLOW_ACTION_SET_TP_DST;
10035                         break;
10036                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
10037                         if (flow_dv_convert_action_modify_dec_ttl
10038                                         (mhdr_res, items, &flow_attr, dev_flow,
10039                                          !!(action_flags &
10040                                          MLX5_FLOW_ACTION_DECAP), error))
10041                                 return -rte_errno;
10042                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
10043                         break;
10044                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
10045                         if (flow_dv_convert_action_modify_ttl
10046                                         (mhdr_res, actions, items, &flow_attr,
10047                                          dev_flow, !!(action_flags &
10048                                          MLX5_FLOW_ACTION_DECAP), error))
10049                                 return -rte_errno;
10050                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
10051                         break;
10052                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
10053                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
10054                         if (flow_dv_convert_action_modify_tcp_seq
10055                                         (mhdr_res, actions, error))
10056                                 return -rte_errno;
10057                         action_flags |= actions->type ==
10058                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
10059                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
10060                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
10061                         break;
10062
10063                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
10064                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
10065                         if (flow_dv_convert_action_modify_tcp_ack
10066                                         (mhdr_res, actions, error))
10067                                 return -rte_errno;
10068                         action_flags |= actions->type ==
10069                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
10070                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
10071                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
10072                         break;
10073                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
10074                         if (flow_dv_convert_action_set_reg
10075                                         (mhdr_res, actions, error))
10076                                 return -rte_errno;
10077                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10078                         break;
10079                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
10080                         if (flow_dv_convert_action_copy_mreg
10081                                         (dev, mhdr_res, actions, error))
10082                                 return -rte_errno;
10083                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10084                         break;
10085                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
10086                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
10087                         dev_flow->handle->fate_action =
10088                                         MLX5_FLOW_FATE_DEFAULT_MISS;
10089                         break;
10090                 case RTE_FLOW_ACTION_TYPE_METER:
10091                         mtr = actions->conf;
10092                         if (!flow->meter) {
10093                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
10094                                                             attr, error);
10095                                 if (!fm)
10096                                         return rte_flow_error_set(error,
10097                                                 rte_errno,
10098                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10099                                                 NULL,
10100                                                 "meter not found "
10101                                                 "or invalid parameters");
10102                                 flow->meter = fm->idx;
10103                         }
10104                         /* Set the meter action. */
10105                         if (!fm) {
10106                                 fm = mlx5_ipool_get(priv->sh->ipool
10107                                                 [MLX5_IPOOL_MTR], flow->meter);
10108                                 if (!fm)
10109                                         return rte_flow_error_set(error,
10110                                                 rte_errno,
10111                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10112                                                 NULL,
10113                                                 "meter not found "
10114                                                 "or invalid parameters");
10115                         }
10116                         dev_flow->dv.actions[actions_n++] =
10117                                 fm->mfts->meter_action;
10118                         action_flags |= MLX5_FLOW_ACTION_METER;
10119                         break;
10120                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
10121                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
10122                                                               actions, error))
10123                                 return -rte_errno;
10124                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
10125                         break;
10126                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
10127                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
10128                                                               actions, error))
10129                                 return -rte_errno;
10130                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
10131                         break;
10132                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
10133                         sample_act_pos = actions_n;
10134                         ret = flow_dv_translate_action_sample(dev,
10135                                                               actions,
10136                                                               dev_flow, attr,
10137                                                               &num_of_dest,
10138                                                               sample_actions,
10139                                                               &sample_res,
10140                                                               error);
10141                         if (ret < 0)
10142                                 return ret;
10143                         actions_n++;
10144                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
10145                         /* put encap action into group if work with port id */
10146                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
10147                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
10148                                 sample_act->action_flags |=
10149                                                         MLX5_FLOW_ACTION_ENCAP;
10150                         break;
10151                 case RTE_FLOW_ACTION_TYPE_END:
10152                         actions_end = true;
10153                         if (mhdr_res->actions_num) {
10154                                 /* create modify action if needed. */
10155                                 if (flow_dv_modify_hdr_resource_register
10156                                         (dev, mhdr_res, dev_flow, error))
10157                                         return -rte_errno;
10158                                 dev_flow->dv.actions[modify_action_position] =
10159                                         handle->dvh.modify_hdr->action;
10160                         }
10161                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
10162                                 flow->counter =
10163                                         flow_dv_translate_create_counter(dev,
10164                                                 dev_flow, count, age);
10165
10166                                 if (!flow->counter)
10167                                         return rte_flow_error_set
10168                                                 (error, rte_errno,
10169                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10170                                                 NULL,
10171                                                 "cannot create counter"
10172                                                 " object.");
10173                                 dev_flow->dv.actions[actions_n] =
10174                                           (flow_dv_counter_get_by_idx(dev,
10175                                           flow->counter, NULL))->action;
10176                                 actions_n++;
10177                         }
10178                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
10179                                 ret = flow_dv_create_action_sample(dev,
10180                                                           dev_flow,
10181                                                           num_of_dest,
10182                                                           &sample_res,
10183                                                           &mdest_res,
10184                                                           sample_actions,
10185                                                           action_flags,
10186                                                           error);
10187                                 if (ret < 0)
10188                                         return rte_flow_error_set
10189                                                 (error, rte_errno,
10190                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10191                                                 NULL,
10192                                                 "cannot create sample action");
10193                                 if (num_of_dest > 1) {
10194                                         dev_flow->dv.actions[sample_act_pos] =
10195                                         dev_flow->dv.dest_array_res->action;
10196                                 } else {
10197                                         dev_flow->dv.actions[sample_act_pos] =
10198                                         dev_flow->dv.sample_res->verbs_action;
10199                                 }
10200                         }
10201                         break;
10202                 default:
10203                         break;
10204                 }
10205                 if (mhdr_res->actions_num &&
10206                     modify_action_position == UINT32_MAX)
10207                         modify_action_position = actions_n++;
10208         }
10209         /*
10210          * For multiple destination (sample action with ratio=1), the encap
10211          * action and port id action will be combined into group action.
10212          * So need remove the original these actions in the flow and only
10213          * use the sample action instead of.
10214          */
10215         if (num_of_dest > 1 && sample_act->dr_port_id_action) {
10216                 int i;
10217                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
10218
10219                 for (i = 0; i < actions_n; i++) {
10220                         if ((sample_act->dr_encap_action &&
10221                                 sample_act->dr_encap_action ==
10222                                 dev_flow->dv.actions[i]) ||
10223                                 (sample_act->dr_port_id_action &&
10224                                 sample_act->dr_port_id_action ==
10225                                 dev_flow->dv.actions[i]))
10226                                 continue;
10227                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
10228                 }
10229                 memcpy((void *)dev_flow->dv.actions,
10230                                 (void *)temp_actions,
10231                                 tmp_actions_n * sizeof(void *));
10232                 actions_n = tmp_actions_n;
10233         }
10234         dev_flow->dv.actions_n = actions_n;
10235         dev_flow->act_flags = action_flags;
10236         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
10237                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
10238                 int item_type = items->type;
10239
10240                 if (!mlx5_flow_os_item_supported(item_type))
10241                         return rte_flow_error_set(error, ENOTSUP,
10242                                                   RTE_FLOW_ERROR_TYPE_ITEM,
10243                                                   NULL, "item not supported");
10244                 switch (item_type) {
10245                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
10246                         flow_dv_translate_item_port_id
10247                                 (dev, match_mask, match_value, items, attr);
10248                         last_item = MLX5_FLOW_ITEM_PORT_ID;
10249                         break;
10250                 case RTE_FLOW_ITEM_TYPE_ETH:
10251                         flow_dv_translate_item_eth(match_mask, match_value,
10252                                                    items, tunnel,
10253                                                    dev_flow->dv.group);
10254                         matcher.priority = action_flags &
10255                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
10256                                         !dev_flow->external ?
10257                                         MLX5_PRIORITY_MAP_L3 :
10258                                         MLX5_PRIORITY_MAP_L2;
10259                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
10260                                              MLX5_FLOW_LAYER_OUTER_L2;
10261                         break;
10262                 case RTE_FLOW_ITEM_TYPE_VLAN:
10263                         flow_dv_translate_item_vlan(dev_flow,
10264                                                     match_mask, match_value,
10265                                                     items, tunnel,
10266                                                     dev_flow->dv.group);
10267                         matcher.priority = MLX5_PRIORITY_MAP_L2;
10268                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
10269                                               MLX5_FLOW_LAYER_INNER_VLAN) :
10270                                              (MLX5_FLOW_LAYER_OUTER_L2 |
10271                                               MLX5_FLOW_LAYER_OUTER_VLAN);
10272                         break;
10273                 case RTE_FLOW_ITEM_TYPE_IPV4:
10274                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10275                                                   &item_flags, &tunnel);
10276                         flow_dv_translate_item_ipv4(match_mask, match_value,
10277                                                     items, tunnel,
10278                                                     dev_flow->dv.group);
10279                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10280                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
10281                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
10282                         if (items->mask != NULL &&
10283                             ((const struct rte_flow_item_ipv4 *)
10284                              items->mask)->hdr.next_proto_id) {
10285                                 next_protocol =
10286                                         ((const struct rte_flow_item_ipv4 *)
10287                                          (items->spec))->hdr.next_proto_id;
10288                                 next_protocol &=
10289                                         ((const struct rte_flow_item_ipv4 *)
10290                                          (items->mask))->hdr.next_proto_id;
10291                         } else {
10292                                 /* Reset for inner layer. */
10293                                 next_protocol = 0xff;
10294                         }
10295                         break;
10296                 case RTE_FLOW_ITEM_TYPE_IPV6:
10297                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10298                                                   &item_flags, &tunnel);
10299                         flow_dv_translate_item_ipv6(match_mask, match_value,
10300                                                     items, tunnel,
10301                                                     dev_flow->dv.group);
10302                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10303                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10304                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10305                         if (items->mask != NULL &&
10306                             ((const struct rte_flow_item_ipv6 *)
10307                              items->mask)->hdr.proto) {
10308                                 next_protocol =
10309                                         ((const struct rte_flow_item_ipv6 *)
10310                                          items->spec)->hdr.proto;
10311                                 next_protocol &=
10312                                         ((const struct rte_flow_item_ipv6 *)
10313                                          items->mask)->hdr.proto;
10314                         } else {
10315                                 /* Reset for inner layer. */
10316                                 next_protocol = 0xff;
10317                         }
10318                         break;
10319                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10320                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
10321                                                              match_value,
10322                                                              items, tunnel);
10323                         last_item = tunnel ?
10324                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10325                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10326                         if (items->mask != NULL &&
10327                             ((const struct rte_flow_item_ipv6_frag_ext *)
10328                              items->mask)->hdr.next_header) {
10329                                 next_protocol =
10330                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10331                                  items->spec)->hdr.next_header;
10332                                 next_protocol &=
10333                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10334                                  items->mask)->hdr.next_header;
10335                         } else {
10336                                 /* Reset for inner layer. */
10337                                 next_protocol = 0xff;
10338                         }
10339                         break;
10340                 case RTE_FLOW_ITEM_TYPE_TCP:
10341                         flow_dv_translate_item_tcp(match_mask, match_value,
10342                                                    items, tunnel);
10343                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10344                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10345                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10346                         break;
10347                 case RTE_FLOW_ITEM_TYPE_UDP:
10348                         flow_dv_translate_item_udp(match_mask, match_value,
10349                                                    items, tunnel);
10350                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10351                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10352                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10353                         break;
10354                 case RTE_FLOW_ITEM_TYPE_GRE:
10355                         flow_dv_translate_item_gre(match_mask, match_value,
10356                                                    items, tunnel);
10357                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10358                         last_item = MLX5_FLOW_LAYER_GRE;
10359                         break;
10360                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10361                         flow_dv_translate_item_gre_key(match_mask,
10362                                                        match_value, items);
10363                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10364                         break;
10365                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10366                         flow_dv_translate_item_nvgre(match_mask, match_value,
10367                                                      items, tunnel);
10368                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10369                         last_item = MLX5_FLOW_LAYER_GRE;
10370                         break;
10371                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10372                         flow_dv_translate_item_vxlan(match_mask, match_value,
10373                                                      items, tunnel);
10374                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10375                         last_item = MLX5_FLOW_LAYER_VXLAN;
10376                         break;
10377                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10378                         flow_dv_translate_item_vxlan_gpe(match_mask,
10379                                                          match_value, items,
10380                                                          tunnel);
10381                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10382                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10383                         break;
10384                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10385                         flow_dv_translate_item_geneve(match_mask, match_value,
10386                                                       items, tunnel);
10387                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10388                         last_item = MLX5_FLOW_LAYER_GENEVE;
10389                         break;
10390                 case RTE_FLOW_ITEM_TYPE_MPLS:
10391                         flow_dv_translate_item_mpls(match_mask, match_value,
10392                                                     items, last_item, tunnel);
10393                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10394                         last_item = MLX5_FLOW_LAYER_MPLS;
10395                         break;
10396                 case RTE_FLOW_ITEM_TYPE_MARK:
10397                         flow_dv_translate_item_mark(dev, match_mask,
10398                                                     match_value, items);
10399                         last_item = MLX5_FLOW_ITEM_MARK;
10400                         break;
10401                 case RTE_FLOW_ITEM_TYPE_META:
10402                         flow_dv_translate_item_meta(dev, match_mask,
10403                                                     match_value, attr, items);
10404                         last_item = MLX5_FLOW_ITEM_METADATA;
10405                         break;
10406                 case RTE_FLOW_ITEM_TYPE_ICMP:
10407                         flow_dv_translate_item_icmp(match_mask, match_value,
10408                                                     items, tunnel);
10409                         last_item = MLX5_FLOW_LAYER_ICMP;
10410                         break;
10411                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10412                         flow_dv_translate_item_icmp6(match_mask, match_value,
10413                                                       items, tunnel);
10414                         last_item = MLX5_FLOW_LAYER_ICMP6;
10415                         break;
10416                 case RTE_FLOW_ITEM_TYPE_TAG:
10417                         flow_dv_translate_item_tag(dev, match_mask,
10418                                                    match_value, items);
10419                         last_item = MLX5_FLOW_ITEM_TAG;
10420                         break;
10421                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10422                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10423                                                         match_value, items);
10424                         last_item = MLX5_FLOW_ITEM_TAG;
10425                         break;
10426                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10427                         flow_dv_translate_item_tx_queue(dev, match_mask,
10428                                                         match_value,
10429                                                         items);
10430                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10431                         break;
10432                 case RTE_FLOW_ITEM_TYPE_GTP:
10433                         flow_dv_translate_item_gtp(match_mask, match_value,
10434                                                    items, tunnel);
10435                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10436                         last_item = MLX5_FLOW_LAYER_GTP;
10437                         break;
10438                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10439                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10440                                 /* Create it only the first time to be used. */
10441                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10442                                 if (ret)
10443                                         return rte_flow_error_set
10444                                                 (error, -ret,
10445                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10446                                                 NULL,
10447                                                 "cannot create eCPRI parser");
10448                         }
10449                         /* Adjust the length matcher and device flow value. */
10450                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10451                         dev_flow->dv.value.size =
10452                                         MLX5_ST_SZ_BYTES(fte_match_param);
10453                         flow_dv_translate_item_ecpri(dev, match_mask,
10454                                                      match_value, items);
10455                         /* No other protocol should follow eCPRI layer. */
10456                         last_item = MLX5_FLOW_LAYER_ECPRI;
10457                         break;
10458                 default:
10459                         break;
10460                 }
10461                 item_flags |= last_item;
10462         }
10463         /*
10464          * When E-Switch mode is enabled, we have two cases where we need to
10465          * set the source port manually.
10466          * The first one, is in case of Nic steering rule, and the second is
10467          * E-Switch rule where no port_id item was found. In both cases
10468          * the source port is set according the current port in use.
10469          */
10470         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10471             (priv->representor || priv->master)) {
10472                 if (flow_dv_translate_item_port_id(dev, match_mask,
10473                                                    match_value, NULL, attr))
10474                         return -rte_errno;
10475         }
10476 #ifdef RTE_LIBRTE_MLX5_DEBUG
10477         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10478                                               dev_flow->dv.value.buf));
10479 #endif
10480         /*
10481          * Layers may be already initialized from prefix flow if this dev_flow
10482          * is the suffix flow.
10483          */
10484         handle->layers |= item_flags;
10485         if (action_flags & MLX5_FLOW_ACTION_RSS)
10486                 flow_dv_hashfields_set(dev_flow, rss_desc);
10487         /* Register matcher. */
10488         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10489                                     matcher.mask.size);
10490         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
10491                                                      matcher.priority);
10492         /* reserved field no needs to be set to 0 here. */
10493         tbl_key.domain = attr->transfer;
10494         tbl_key.direction = attr->egress;
10495         tbl_key.table_id = dev_flow->dv.group;
10496         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
10497                                      tunnel, attr->group, error))
10498                 return -rte_errno;
10499         return 0;
10500 }
10501
10502 /**
10503  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10504  * and tunnel.
10505  *
10506  * @param[in, out] action
10507  *   Shred RSS action holding hash RX queue objects.
10508  * @param[in] hash_fields
10509  *   Defines combination of packet fields to participate in RX hash.
10510  * @param[in] tunnel
10511  *   Tunnel type
10512  * @param[in] hrxq_idx
10513  *   Hash RX queue index to set.
10514  *
10515  * @return
10516  *   0 on success, otherwise negative errno value.
10517  */
10518 static int
10519 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10520                               const uint64_t hash_fields,
10521                               const int tunnel,
10522                               uint32_t hrxq_idx)
10523 {
10524         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10525
10526         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10527         case MLX5_RSS_HASH_IPV4:
10528                 hrxqs[0] = hrxq_idx;
10529                 return 0;
10530         case MLX5_RSS_HASH_IPV4_TCP:
10531                 hrxqs[1] = hrxq_idx;
10532                 return 0;
10533         case MLX5_RSS_HASH_IPV4_UDP:
10534                 hrxqs[2] = hrxq_idx;
10535                 return 0;
10536         case MLX5_RSS_HASH_IPV6:
10537                 hrxqs[3] = hrxq_idx;
10538                 return 0;
10539         case MLX5_RSS_HASH_IPV6_TCP:
10540                 hrxqs[4] = hrxq_idx;
10541                 return 0;
10542         case MLX5_RSS_HASH_IPV6_UDP:
10543                 hrxqs[5] = hrxq_idx;
10544                 return 0;
10545         case MLX5_RSS_HASH_NONE:
10546                 hrxqs[6] = hrxq_idx;
10547                 return 0;
10548         default:
10549                 return -1;
10550         }
10551 }
10552
10553 /**
10554  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10555  * and tunnel.
10556  *
10557  * @param[in] dev
10558  *   Pointer to the Ethernet device structure.
10559  * @param[in] idx
10560  *   Shared RSS action ID holding hash RX queue objects.
10561  * @param[in] hash_fields
10562  *   Defines combination of packet fields to participate in RX hash.
10563  * @param[in] tunnel
10564  *   Tunnel type
10565  *
10566  * @return
10567  *   Valid hash RX queue index, otherwise 0.
10568  */
10569 static uint32_t
10570 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
10571                                  const uint64_t hash_fields,
10572                                  const int tunnel)
10573 {
10574         struct mlx5_priv *priv = dev->data->dev_private;
10575         struct mlx5_shared_action_rss *shared_rss =
10576             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
10577         const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
10578                                                         shared_rss->hrxq_tunnel;
10579
10580         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10581         case MLX5_RSS_HASH_IPV4:
10582                 return hrxqs[0];
10583         case MLX5_RSS_HASH_IPV4_TCP:
10584                 return hrxqs[1];
10585         case MLX5_RSS_HASH_IPV4_UDP:
10586                 return hrxqs[2];
10587         case MLX5_RSS_HASH_IPV6:
10588                 return hrxqs[3];
10589         case MLX5_RSS_HASH_IPV6_TCP:
10590                 return hrxqs[4];
10591         case MLX5_RSS_HASH_IPV6_UDP:
10592                 return hrxqs[5];
10593         case MLX5_RSS_HASH_NONE:
10594                 return hrxqs[6];
10595         default:
10596                 return 0;
10597         }
10598 }
10599
10600 /**
10601  * Retrieves hash RX queue suitable for the *flow*.
10602  * If shared action configured for *flow* suitable hash RX queue will be
10603  * retrieved from attached shared action.
10604  *
10605  * @param[in] dev
10606  *   Pointer to the Ethernet device structure.
10607  * @param[in] dev_flow
10608  *   Pointer to the sub flow.
10609  * @param[in] rss_desc
10610  *   Pointer to the RSS descriptor.
10611  * @param[out] hrxq
10612  *   Pointer to retrieved hash RX queue object.
10613  *
10614  * @return
10615  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
10616  */
10617 static uint32_t
10618 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
10619                        struct mlx5_flow_rss_desc *rss_desc,
10620                        struct mlx5_hrxq **hrxq)
10621 {
10622         struct mlx5_priv *priv = dev->data->dev_private;
10623         uint32_t hrxq_idx;
10624
10625         if (rss_desc->shared_rss) {
10626                 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10627                                 (dev, rss_desc->shared_rss,
10628                                  dev_flow->hash_fields,
10629                                  !!(dev_flow->handle->layers &
10630                                     MLX5_FLOW_LAYER_TUNNEL));
10631                 if (hrxq_idx)
10632                         *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10633                                                hrxq_idx);
10634         } else {
10635                 *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
10636                                              &hrxq_idx);
10637         }
10638         return hrxq_idx;
10639 }
10640
10641 /**
10642  * Apply the flow to the NIC, lock free,
10643  * (mutex should be acquired by caller).
10644  *
10645  * @param[in] dev
10646  *   Pointer to the Ethernet device structure.
10647  * @param[in, out] flow
10648  *   Pointer to flow structure.
10649  * @param[out] error
10650  *   Pointer to error structure.
10651  *
10652  * @return
10653  *   0 on success, a negative errno value otherwise and rte_errno is set.
10654  */
10655 static int
10656 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10657               struct rte_flow_error *error)
10658 {
10659         struct mlx5_flow_dv_workspace *dv;
10660         struct mlx5_flow_handle *dh;
10661         struct mlx5_flow_handle_dv *dv_h;
10662         struct mlx5_flow *dev_flow;
10663         struct mlx5_priv *priv = dev->data->dev_private;
10664         uint32_t handle_idx;
10665         int n;
10666         int err;
10667         int idx;
10668         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10669         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
10670
10671         MLX5_ASSERT(wks);
10672         if (rss_desc->shared_rss) {
10673                 dh = wks->flows[wks->flow_idx - 1].handle;
10674                 MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
10675                 dh->rix_srss = rss_desc->shared_rss;
10676         }
10677         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
10678                 dev_flow = &wks->flows[idx];
10679                 dv = &dev_flow->dv;
10680                 dh = dev_flow->handle;
10681                 dv_h = &dh->dvh;
10682                 n = dv->actions_n;
10683                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10684                         if (dv->transfer) {
10685                                 dv->actions[n++] = priv->sh->esw_drop_action;
10686                         } else {
10687                                 MLX5_ASSERT(priv->drop_queue.hrxq);
10688                                 dv->actions[n++] =
10689                                                 priv->drop_queue.hrxq->action;
10690                         }
10691                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10692                            !dv_h->rix_sample && !dv_h->rix_dest_array) ||
10693                             (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
10694                         struct mlx5_hrxq *hrxq = NULL;
10695                         uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10696                                         (dev, dev_flow, rss_desc, &hrxq);
10697                         if (!hrxq) {
10698                                 rte_flow_error_set
10699                                         (error, rte_errno,
10700                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10701                                          "cannot get hash queue");
10702                                 goto error;
10703                         }
10704                         if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
10705                                 dh->rix_hrxq = hrxq_idx;
10706                         dv->actions[n++] = hrxq->action;
10707                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10708                         if (!priv->sh->default_miss_action) {
10709                                 rte_flow_error_set
10710                                         (error, rte_errno,
10711                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10712                                          "default miss action not be created.");
10713                                 goto error;
10714                         }
10715                         dv->actions[n++] = priv->sh->default_miss_action;
10716                 }
10717                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10718                                                (void *)&dv->value, n,
10719                                                dv->actions, &dh->drv_flow);
10720                 if (err) {
10721                         rte_flow_error_set(error, errno,
10722                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10723                                            NULL,
10724                                            "hardware refuses to create flow");
10725                         goto error;
10726                 }
10727                 if (priv->vmwa_context &&
10728                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
10729                         /*
10730                          * The rule contains the VLAN pattern.
10731                          * For VF we are going to create VLAN
10732                          * interface to make hypervisor set correct
10733                          * e-Switch vport context.
10734                          */
10735                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10736                 }
10737         }
10738         return 0;
10739 error:
10740         err = rte_errno; /* Save rte_errno before cleanup. */
10741         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10742                        handle_idx, dh, next) {
10743                 /* hrxq is union, don't clear it if the flag is not set. */
10744                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10745                         mlx5_hrxq_release(dev, dh->rix_hrxq);
10746                         dh->rix_hrxq = 0;
10747                 }
10748                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10749                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10750         }
10751         if (rss_desc->shared_rss)
10752                 wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
10753         rte_errno = err; /* Restore rte_errno. */
10754         return -rte_errno;
10755 }
10756
10757 void
10758 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
10759                           struct mlx5_cache_entry *entry)
10760 {
10761         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
10762                                                           entry);
10763
10764         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
10765         mlx5_free(cache);
10766 }
10767
10768 /**
10769  * Release the flow matcher.
10770  *
10771  * @param dev
10772  *   Pointer to Ethernet device.
10773  * @param handle
10774  *   Pointer to mlx5_flow_handle.
10775  *
10776  * @return
10777  *   1 while a reference on it exists, 0 when freed.
10778  */
10779 static int
10780 flow_dv_matcher_release(struct rte_eth_dev *dev,
10781                         struct mlx5_flow_handle *handle)
10782 {
10783         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10784         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
10785                                                             typeof(*tbl), tbl);
10786         int ret;
10787
10788         MLX5_ASSERT(matcher->matcher_object);
10789         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
10790         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
10791         return ret;
10792 }
10793
10794 /**
10795  * Release encap_decap resource.
10796  *
10797  * @param list
10798  *   Pointer to the hash list.
10799  * @param entry
10800  *   Pointer to exist resource entry object.
10801  */
10802 void
10803 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
10804                               struct mlx5_hlist_entry *entry)
10805 {
10806         struct mlx5_dev_ctx_shared *sh = list->ctx;
10807         struct mlx5_flow_dv_encap_decap_resource *res =
10808                 container_of(entry, typeof(*res), entry);
10809
10810         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10811         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
10812 }
10813
10814 /**
10815  * Release an encap/decap resource.
10816  *
10817  * @param dev
10818  *   Pointer to Ethernet device.
10819  * @param encap_decap_idx
10820  *   Index of encap decap resource.
10821  *
10822  * @return
10823  *   1 while a reference on it exists, 0 when freed.
10824  */
10825 static int
10826 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10827                                      uint32_t encap_decap_idx)
10828 {
10829         struct mlx5_priv *priv = dev->data->dev_private;
10830         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10831
10832         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10833                                         encap_decap_idx);
10834         if (!cache_resource)
10835                 return 0;
10836         MLX5_ASSERT(cache_resource->action);
10837         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
10838                                      &cache_resource->entry);
10839 }
10840
10841 /**
10842  * Release an jump to table action resource.
10843  *
10844  * @param dev
10845  *   Pointer to Ethernet device.
10846  * @param handle
10847  *   Pointer to mlx5_flow_handle.
10848  *
10849  * @return
10850  *   1 while a reference on it exists, 0 when freed.
10851  */
10852 static int
10853 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10854                                   struct mlx5_flow_handle *handle)
10855 {
10856         struct mlx5_priv *priv = dev->data->dev_private;
10857         struct mlx5_flow_tbl_data_entry *tbl_data;
10858
10859         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10860                              handle->rix_jump);
10861         if (!tbl_data)
10862                 return 0;
10863         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
10864 }
10865
10866 void
10867 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10868                          struct mlx5_hlist_entry *entry)
10869 {
10870         struct mlx5_flow_dv_modify_hdr_resource *res =
10871                 container_of(entry, typeof(*res), entry);
10872
10873         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10874         mlx5_free(entry);
10875 }
10876
10877 /**
10878  * Release a modify-header resource.
10879  *
10880  * @param dev
10881  *   Pointer to Ethernet device.
10882  * @param handle
10883  *   Pointer to mlx5_flow_handle.
10884  *
10885  * @return
10886  *   1 while a reference on it exists, 0 when freed.
10887  */
10888 static int
10889 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10890                                     struct mlx5_flow_handle *handle)
10891 {
10892         struct mlx5_priv *priv = dev->data->dev_private;
10893         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10894
10895         MLX5_ASSERT(entry->action);
10896         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10897 }
10898
10899 void
10900 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
10901                           struct mlx5_cache_entry *entry)
10902 {
10903         struct mlx5_dev_ctx_shared *sh = list->ctx;
10904         struct mlx5_flow_dv_port_id_action_resource *cache =
10905                         container_of(entry, typeof(*cache), entry);
10906
10907         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10908         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
10909 }
10910
10911 /**
10912  * Release port ID action resource.
10913  *
10914  * @param dev
10915  *   Pointer to Ethernet device.
10916  * @param handle
10917  *   Pointer to mlx5_flow_handle.
10918  *
10919  * @return
10920  *   1 while a reference on it exists, 0 when freed.
10921  */
10922 static int
10923 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
10924                                         uint32_t port_id)
10925 {
10926         struct mlx5_priv *priv = dev->data->dev_private;
10927         struct mlx5_flow_dv_port_id_action_resource *cache;
10928
10929         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
10930         if (!cache)
10931                 return 0;
10932         MLX5_ASSERT(cache->action);
10933         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
10934                                      &cache->entry);
10935 }
10936
10937 /**
10938  * Release shared RSS action resource.
10939  *
10940  * @param dev
10941  *   Pointer to Ethernet device.
10942  * @param srss
10943  *   Shared RSS action index.
10944  */
10945 static void
10946 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
10947 {
10948         struct mlx5_priv *priv = dev->data->dev_private;
10949         struct mlx5_shared_action_rss *shared_rss;
10950
10951         shared_rss = mlx5_ipool_get
10952                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
10953         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
10954 }
10955
10956 void
10957 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
10958                             struct mlx5_cache_entry *entry)
10959 {
10960         struct mlx5_dev_ctx_shared *sh = list->ctx;
10961         struct mlx5_flow_dv_push_vlan_action_resource *cache =
10962                         container_of(entry, typeof(*cache), entry);
10963
10964         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10965         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
10966 }
10967
10968 /**
10969  * Release push vlan action resource.
10970  *
10971  * @param dev
10972  *   Pointer to Ethernet device.
10973  * @param handle
10974  *   Pointer to mlx5_flow_handle.
10975  *
10976  * @return
10977  *   1 while a reference on it exists, 0 when freed.
10978  */
10979 static int
10980 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
10981                                           struct mlx5_flow_handle *handle)
10982 {
10983         struct mlx5_priv *priv = dev->data->dev_private;
10984         struct mlx5_flow_dv_push_vlan_action_resource *cache;
10985         uint32_t idx = handle->dvh.rix_push_vlan;
10986
10987         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
10988         if (!cache)
10989                 return 0;
10990         MLX5_ASSERT(cache->action);
10991         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
10992                                      &cache->entry);
10993 }
10994
10995 /**
10996  * Release the fate resource.
10997  *
10998  * @param dev
10999  *   Pointer to Ethernet device.
11000  * @param handle
11001  *   Pointer to mlx5_flow_handle.
11002  */
11003 static void
11004 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
11005                                struct mlx5_flow_handle *handle)
11006 {
11007         if (!handle->rix_fate)
11008                 return;
11009         switch (handle->fate_action) {
11010         case MLX5_FLOW_FATE_QUEUE:
11011                 mlx5_hrxq_release(dev, handle->rix_hrxq);
11012                 break;
11013         case MLX5_FLOW_FATE_JUMP:
11014                 flow_dv_jump_tbl_resource_release(dev, handle);
11015                 break;
11016         case MLX5_FLOW_FATE_PORT_ID:
11017                 flow_dv_port_id_action_resource_release(dev,
11018                                 handle->rix_port_id_action);
11019                 break;
11020         case MLX5_FLOW_FATE_SHARED_RSS:
11021                 flow_dv_shared_rss_action_release(dev, handle->rix_srss);
11022                 break;
11023         default:
11024                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
11025                 break;
11026         }
11027         handle->rix_fate = 0;
11028 }
11029
11030 void
11031 flow_dv_sample_remove_cb(struct mlx5_cache_list *list,
11032                          struct mlx5_cache_entry *entry)
11033 {
11034         struct rte_eth_dev *dev = list->ctx;
11035         struct mlx5_priv *priv = dev->data->dev_private;
11036         struct mlx5_flow_dv_sample_resource *cache_resource =
11037                         container_of(entry, typeof(*cache_resource), entry);
11038
11039         if (cache_resource->verbs_action)
11040                 claim_zero(mlx5_glue->destroy_flow_action
11041                                 (cache_resource->verbs_action));
11042         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11043                 if (cache_resource->default_miss)
11044                         claim_zero(mlx5_glue->destroy_flow_action
11045                           (cache_resource->default_miss));
11046         }
11047         if (cache_resource->normal_path_tbl)
11048                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11049                         cache_resource->normal_path_tbl);
11050         flow_dv_sample_sub_actions_release(dev,
11051                                 &cache_resource->sample_idx);
11052         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11053                         cache_resource->idx);
11054         DRV_LOG(DEBUG, "sample resource %p: removed",
11055                 (void *)cache_resource);
11056 }
11057
11058 /**
11059  * Release an sample resource.
11060  *
11061  * @param dev
11062  *   Pointer to Ethernet device.
11063  * @param handle
11064  *   Pointer to mlx5_flow_handle.
11065  *
11066  * @return
11067  *   1 while a reference on it exists, 0 when freed.
11068  */
11069 static int
11070 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
11071                                      struct mlx5_flow_handle *handle)
11072 {
11073         struct mlx5_priv *priv = dev->data->dev_private;
11074         struct mlx5_flow_dv_sample_resource *cache_resource;
11075
11076         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11077                          handle->dvh.rix_sample);
11078         if (!cache_resource)
11079                 return 0;
11080         MLX5_ASSERT(cache_resource->verbs_action);
11081         return mlx5_cache_unregister(&priv->sh->sample_action_list,
11082                                      &cache_resource->entry);
11083 }
11084
11085 void
11086 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list,
11087                              struct mlx5_cache_entry *entry)
11088 {
11089         struct rte_eth_dev *dev = list->ctx;
11090         struct mlx5_priv *priv = dev->data->dev_private;
11091         struct mlx5_flow_dv_dest_array_resource *cache_resource =
11092                         container_of(entry, typeof(*cache_resource), entry);
11093         uint32_t i = 0;
11094
11095         MLX5_ASSERT(cache_resource->action);
11096         if (cache_resource->action)
11097                 claim_zero(mlx5_glue->destroy_flow_action
11098                                         (cache_resource->action));
11099         for (; i < cache_resource->num_of_dest; i++)
11100                 flow_dv_sample_sub_actions_release(dev,
11101                                 &cache_resource->sample_idx[i]);
11102         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11103                         cache_resource->idx);
11104         DRV_LOG(DEBUG, "destination array resource %p: removed",
11105                 (void *)cache_resource);
11106 }
11107
11108 /**
11109  * Release an destination array resource.
11110  *
11111  * @param dev
11112  *   Pointer to Ethernet device.
11113  * @param handle
11114  *   Pointer to mlx5_flow_handle.
11115  *
11116  * @return
11117  *   1 while a reference on it exists, 0 when freed.
11118  */
11119 static int
11120 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
11121                                     struct mlx5_flow_handle *handle)
11122 {
11123         struct mlx5_priv *priv = dev->data->dev_private;
11124         struct mlx5_flow_dv_dest_array_resource *cache;
11125
11126         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11127                                handle->dvh.rix_dest_array);
11128         if (!cache)
11129                 return 0;
11130         MLX5_ASSERT(cache->action);
11131         return mlx5_cache_unregister(&priv->sh->dest_array_list,
11132                                      &cache->entry);
11133 }
11134
11135 /**
11136  * Remove the flow from the NIC but keeps it in memory.
11137  * Lock free, (mutex should be acquired by caller).
11138  *
11139  * @param[in] dev
11140  *   Pointer to Ethernet device.
11141  * @param[in, out] flow
11142  *   Pointer to flow structure.
11143  */
11144 static void
11145 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
11146 {
11147         struct mlx5_flow_handle *dh;
11148         uint32_t handle_idx;
11149         struct mlx5_priv *priv = dev->data->dev_private;
11150
11151         if (!flow)
11152                 return;
11153         handle_idx = flow->dev_handles;
11154         while (handle_idx) {
11155                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11156                                     handle_idx);
11157                 if (!dh)
11158                         return;
11159                 if (dh->drv_flow) {
11160                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
11161                         dh->drv_flow = NULL;
11162                 }
11163                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
11164                         flow_dv_fate_resource_release(dev, dh);
11165                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
11166                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
11167                 handle_idx = dh->next.next;
11168         }
11169 }
11170
11171 /**
11172  * Remove the flow from the NIC and the memory.
11173  * Lock free, (mutex should be acquired by caller).
11174  *
11175  * @param[in] dev
11176  *   Pointer to the Ethernet device structure.
11177  * @param[in, out] flow
11178  *   Pointer to flow structure.
11179  */
11180 static void
11181 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
11182 {
11183         struct mlx5_flow_handle *dev_handle;
11184         struct mlx5_priv *priv = dev->data->dev_private;
11185
11186         if (!flow)
11187                 return;
11188         flow_dv_remove(dev, flow);
11189         if (flow->counter) {
11190                 flow_dv_counter_free(dev, flow->counter);
11191                 flow->counter = 0;
11192         }
11193         if (flow->meter) {
11194                 struct mlx5_flow_meter *fm;
11195
11196                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
11197                                     flow->meter);
11198                 if (fm)
11199                         mlx5_flow_meter_detach(fm);
11200                 flow->meter = 0;
11201         }
11202         if (flow->age)
11203                 flow_dv_aso_age_release(dev, flow->age);
11204         while (flow->dev_handles) {
11205                 uint32_t tmp_idx = flow->dev_handles;
11206
11207                 dev_handle = mlx5_ipool_get(priv->sh->ipool
11208                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
11209                 if (!dev_handle)
11210                         return;
11211                 flow->dev_handles = dev_handle->next.next;
11212                 if (dev_handle->dvh.matcher)
11213                         flow_dv_matcher_release(dev, dev_handle);
11214                 if (dev_handle->dvh.rix_sample)
11215                         flow_dv_sample_resource_release(dev, dev_handle);
11216                 if (dev_handle->dvh.rix_dest_array)
11217                         flow_dv_dest_array_resource_release(dev, dev_handle);
11218                 if (dev_handle->dvh.rix_encap_decap)
11219                         flow_dv_encap_decap_resource_release(dev,
11220                                 dev_handle->dvh.rix_encap_decap);
11221                 if (dev_handle->dvh.modify_hdr)
11222                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
11223                 if (dev_handle->dvh.rix_push_vlan)
11224                         flow_dv_push_vlan_action_resource_release(dev,
11225                                                                   dev_handle);
11226                 if (dev_handle->dvh.rix_tag)
11227                         flow_dv_tag_release(dev,
11228                                             dev_handle->dvh.rix_tag);
11229                 flow_dv_fate_resource_release(dev, dev_handle);
11230                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11231                            tmp_idx);
11232         }
11233 }
11234
11235 /**
11236  * Release array of hash RX queue objects.
11237  * Helper function.
11238  *
11239  * @param[in] dev
11240  *   Pointer to the Ethernet device structure.
11241  * @param[in, out] hrxqs
11242  *   Array of hash RX queue objects.
11243  *
11244  * @return
11245  *   Total number of references to hash RX queue objects in *hrxqs* array
11246  *   after this operation.
11247  */
11248 static int
11249 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
11250                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
11251 {
11252         size_t i;
11253         int remaining = 0;
11254
11255         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
11256                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
11257
11258                 if (!ret)
11259                         (*hrxqs)[i] = 0;
11260                 remaining += ret;
11261         }
11262         return remaining;
11263 }
11264
11265 /**
11266  * Release all hash RX queue objects representing shared RSS action.
11267  *
11268  * @param[in] dev
11269  *   Pointer to the Ethernet device structure.
11270  * @param[in, out] action
11271  *   Shared RSS action to remove hash RX queue objects from.
11272  *
11273  * @return
11274  *   Total number of references to hash RX queue objects stored in *action*
11275  *   after this operation.
11276  *   Expected to be 0 if no external references held.
11277  */
11278 static int
11279 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
11280                                  struct mlx5_shared_action_rss *action)
11281 {
11282         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
11283                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
11284 }
11285
11286 /**
11287  * Setup shared RSS action.
11288  * Prepare set of hash RX queue objects sufficient to handle all valid
11289  * hash_fields combinations (see enum ibv_rx_hash_fields).
11290  *
11291  * @param[in] dev
11292  *   Pointer to the Ethernet device structure.
11293  * @param[in] action_idx
11294  *   Shared RSS action ipool index.
11295  * @param[in, out] action
11296  *   Partially initialized shared RSS action.
11297  * @param[out] error
11298  *   Perform verbose error reporting if not NULL. Initialized in case of
11299  *   error only.
11300  *
11301  * @return
11302  *   0 on success, otherwise negative errno value.
11303  */
11304 static int
11305 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11306                         uint32_t action_idx,
11307                         struct mlx5_shared_action_rss *action,
11308                         struct rte_flow_error *error)
11309 {
11310         struct mlx5_flow_rss_desc rss_desc = { 0 };
11311         size_t i;
11312         int err;
11313
11314         memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
11315         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
11316         rss_desc.const_q = action->origin.queue;
11317         rss_desc.queue_num = action->origin.queue_num;
11318         /* Set non-zero value to indicate a shared RSS. */
11319         rss_desc.shared_rss = action_idx;
11320         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11321                 uint32_t hrxq_idx;
11322                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11323                 int tunnel;
11324
11325                 for (tunnel = 0; tunnel < 2; tunnel++) {
11326                         rss_desc.tunnel = tunnel;
11327                         rss_desc.hash_fields = hash_fields;
11328                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
11329                         if (!hrxq_idx) {
11330                                 rte_flow_error_set
11331                                         (error, rte_errno,
11332                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11333                                          "cannot get hash queue");
11334                                 goto error_hrxq_new;
11335                         }
11336                         err = __flow_dv_action_rss_hrxq_set
11337                                 (action, hash_fields, tunnel, hrxq_idx);
11338                         MLX5_ASSERT(!err);
11339                 }
11340         }
11341         return 0;
11342 error_hrxq_new:
11343         err = rte_errno;
11344         __flow_dv_action_rss_hrxqs_release(dev, action);
11345         rte_errno = err;
11346         return -rte_errno;
11347 }
11348
11349 /**
11350  * Create shared RSS action.
11351  *
11352  * @param[in] dev
11353  *   Pointer to the Ethernet device structure.
11354  * @param[in] conf
11355  *   Shared action configuration.
11356  * @param[in] rss
11357  *   RSS action specification used to create shared action.
11358  * @param[out] error
11359  *   Perform verbose error reporting if not NULL. Initialized in case of
11360  *   error only.
11361  *
11362  * @return
11363  *   A valid shared action ID in case of success, 0 otherwise and
11364  *   rte_errno is set.
11365  */
11366 static uint32_t
11367 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11368                             const struct rte_flow_shared_action_conf *conf,
11369                             const struct rte_flow_action_rss *rss,
11370                             struct rte_flow_error *error)
11371 {
11372         struct mlx5_priv *priv = dev->data->dev_private;
11373         struct mlx5_shared_action_rss *shared_action = NULL;
11374         void *queue = NULL;
11375         struct rte_flow_action_rss *origin;
11376         const uint8_t *rss_key;
11377         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11378         uint32_t idx;
11379
11380         RTE_SET_USED(conf);
11381         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11382                             0, SOCKET_ID_ANY);
11383         shared_action = mlx5_ipool_zmalloc
11384                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
11385         if (!shared_action || !queue) {
11386                 rte_flow_error_set(error, ENOMEM,
11387                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11388                                    "cannot allocate resource memory");
11389                 goto error_rss_init;
11390         }
11391         if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
11392                 rte_flow_error_set(error, E2BIG,
11393                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11394                                    "rss action number out of range");
11395                 goto error_rss_init;
11396         }
11397         shared_action->queue = queue;
11398         origin = &shared_action->origin;
11399         origin->func = rss->func;
11400         origin->level = rss->level;
11401         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11402         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11403         /* NULL RSS key indicates default RSS key. */
11404         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11405         memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11406         origin->key = &shared_action->key[0];
11407         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11408         memcpy(shared_action->queue, rss->queue, queue_size);
11409         origin->queue = shared_action->queue;
11410         origin->queue_num = rss->queue_num;
11411         if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
11412                 goto error_rss_init;
11413         __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
11414         rte_spinlock_lock(&priv->shared_act_sl);
11415         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11416                      &priv->rss_shared_actions, idx, shared_action, next);
11417         rte_spinlock_unlock(&priv->shared_act_sl);
11418         return idx;
11419 error_rss_init:
11420         if (shared_action)
11421                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11422                                 idx);
11423         if (queue)
11424                 mlx5_free(queue);
11425         return 0;
11426 }
11427
11428 /**
11429  * Destroy the shared RSS action.
11430  * Release related hash RX queue objects.
11431  *
11432  * @param[in] dev
11433  *   Pointer to the Ethernet device structure.
11434  * @param[in] idx
11435  *   The shared RSS action object ID to be removed.
11436  * @param[out] error
11437  *   Perform verbose error reporting if not NULL. Initialized in case of
11438  *   error only.
11439  *
11440  * @return
11441  *   0 on success, otherwise negative errno value.
11442  */
11443 static int
11444 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
11445                              struct rte_flow_error *error)
11446 {
11447         struct mlx5_priv *priv = dev->data->dev_private;
11448         struct mlx5_shared_action_rss *shared_rss =
11449             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11450         uint32_t old_refcnt = 1;
11451         int remaining;
11452
11453         if (!shared_rss)
11454                 return rte_flow_error_set(error, EINVAL,
11455                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11456                                           "invalid shared action");
11457         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11458         if (remaining)
11459                 return rte_flow_error_set(error, ETOOMANYREFS,
11460                                           RTE_FLOW_ERROR_TYPE_ACTION,
11461                                           NULL,
11462                                           "shared rss hrxq has references");
11463         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
11464                                          0, 0, __ATOMIC_ACQUIRE,
11465                                          __ATOMIC_RELAXED))
11466                 return rte_flow_error_set(error, ETOOMANYREFS,
11467                                           RTE_FLOW_ERROR_TYPE_ACTION,
11468                                           NULL,
11469                                           "shared rss has references");
11470         rte_free(shared_rss->queue);
11471         rte_spinlock_lock(&priv->shared_act_sl);
11472         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11473                      &priv->rss_shared_actions, idx, shared_rss, next);
11474         rte_spinlock_unlock(&priv->shared_act_sl);
11475         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11476                         idx);
11477         return 0;
11478 }
11479
11480 /**
11481  * Create shared action, lock free,
11482  * (mutex should be acquired by caller).
11483  * Dispatcher for action type specific call.
11484  *
11485  * @param[in] dev
11486  *   Pointer to the Ethernet device structure.
11487  * @param[in] conf
11488  *   Shared action configuration.
11489  * @param[in] action
11490  *   Action specification used to create shared action.
11491  * @param[out] error
11492  *   Perform verbose error reporting if not NULL. Initialized in case of
11493  *   error only.
11494  *
11495  * @return
11496  *   A valid shared action handle in case of success, NULL otherwise and
11497  *   rte_errno is set.
11498  */
11499 static struct rte_flow_shared_action *
11500 flow_dv_action_create(struct rte_eth_dev *dev,
11501                       const struct rte_flow_shared_action_conf *conf,
11502                       const struct rte_flow_action *action,
11503                       struct rte_flow_error *err)
11504 {
11505         uint32_t idx = 0;
11506         uint32_t ret = 0;
11507
11508         switch (action->type) {
11509         case RTE_FLOW_ACTION_TYPE_RSS:
11510                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
11511                 idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
11512                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
11513                 break;
11514         case RTE_FLOW_ACTION_TYPE_AGE:
11515                 ret = flow_dv_translate_create_aso_age(dev, action->conf);
11516                 idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
11517                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
11518                 if (ret) {
11519                         struct mlx5_aso_age_action *aso_age =
11520                                               flow_aso_age_get_by_idx(dev, ret);
11521
11522                         if (!aso_age->age_params.context)
11523                                 aso_age->age_params.context =
11524                                                          (void *)(uintptr_t)idx;
11525                 }
11526                 break;
11527         default:
11528                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11529                                    NULL, "action type not supported");
11530                 break;
11531         }
11532         return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
11533 }
11534
11535 /**
11536  * Destroy the shared action.
11537  * Release action related resources on the NIC and the memory.
11538  * Lock free, (mutex should be acquired by caller).
11539  * Dispatcher for action type specific call.
11540  *
11541  * @param[in] dev
11542  *   Pointer to the Ethernet device structure.
11543  * @param[in] action
11544  *   The shared action object to be removed.
11545  * @param[out] error
11546  *   Perform verbose error reporting if not NULL. Initialized in case of
11547  *   error only.
11548  *
11549  * @return
11550  *   0 on success, otherwise negative errno value.
11551  */
11552 static int
11553 flow_dv_action_destroy(struct rte_eth_dev *dev,
11554                        struct rte_flow_shared_action *action,
11555                        struct rte_flow_error *error)
11556 {
11557         uint32_t act_idx = (uint32_t)(uintptr_t)action;
11558         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11559         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11560         int ret;
11561
11562         switch (type) {
11563         case MLX5_SHARED_ACTION_TYPE_RSS:
11564                 return __flow_dv_action_rss_release(dev, idx, error);
11565         case MLX5_SHARED_ACTION_TYPE_AGE:
11566                 ret = flow_dv_aso_age_release(dev, idx);
11567                 if (ret)
11568                         /*
11569                          * In this case, the last flow has a reference will
11570                          * actually release the age action.
11571                          */
11572                         DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
11573                                 " released with references %d.", idx, ret);
11574                 return 0;
11575         default:
11576                 return rte_flow_error_set(error, ENOTSUP,
11577                                           RTE_FLOW_ERROR_TYPE_ACTION,
11578                                           NULL,
11579                                           "action type not supported");
11580         }
11581 }
11582
11583 /**
11584  * Updates in place shared RSS action configuration.
11585  *
11586  * @param[in] dev
11587  *   Pointer to the Ethernet device structure.
11588  * @param[in] idx
11589  *   The shared RSS action object ID to be updated.
11590  * @param[in] action_conf
11591  *   RSS action specification used to modify *shared_rss*.
11592  * @param[out] error
11593  *   Perform verbose error reporting if not NULL. Initialized in case of
11594  *   error only.
11595  *
11596  * @return
11597  *   0 on success, otherwise negative errno value.
11598  * @note: currently only support update of RSS queues.
11599  */
11600 static int
11601 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
11602                             const struct rte_flow_action_rss *action_conf,
11603                             struct rte_flow_error *error)
11604 {
11605         struct mlx5_priv *priv = dev->data->dev_private;
11606         struct mlx5_shared_action_rss *shared_rss =
11607             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11608         size_t i;
11609         int ret;
11610         void *queue = NULL;
11611         const uint8_t *rss_key;
11612         uint32_t rss_key_len;
11613         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11614
11615         if (!shared_rss)
11616                 return rte_flow_error_set(error, EINVAL,
11617                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11618                                           "invalid shared action to update");
11619         queue = mlx5_malloc(MLX5_MEM_ZERO,
11620                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11621                             0, SOCKET_ID_ANY);
11622         if (!queue)
11623                 return rte_flow_error_set(error, ENOMEM,
11624                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11625                                           NULL,
11626                                           "cannot allocate resource memory");
11627         if (action_conf->key) {
11628                 rss_key = action_conf->key;
11629                 rss_key_len = action_conf->key_len;
11630         } else {
11631                 rss_key = rss_hash_default_key;
11632                 rss_key_len = MLX5_RSS_HASH_KEY_LEN;
11633         }
11634         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11635                 uint32_t hrxq_idx;
11636                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11637                 int tunnel;
11638
11639                 for (tunnel = 0; tunnel < 2; tunnel++) {
11640                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup
11641                                         (dev, idx, hash_fields, tunnel);
11642                         MLX5_ASSERT(hrxq_idx);
11643                         ret = mlx5_hrxq_modify
11644                                 (dev, hrxq_idx,
11645                                  rss_key, rss_key_len,
11646                                  hash_fields,
11647                                  action_conf->queue, action_conf->queue_num);
11648                         if (ret) {
11649                                 mlx5_free(queue);
11650                                 return rte_flow_error_set
11651                                         (error, rte_errno,
11652                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11653                                          "cannot update hash queue");
11654                         }
11655                 }
11656         }
11657         mlx5_free(shared_rss->queue);
11658         shared_rss->queue = queue;
11659         memcpy(shared_rss->queue, action_conf->queue, queue_size);
11660         shared_rss->origin.queue = shared_rss->queue;
11661         shared_rss->origin.queue_num = action_conf->queue_num;
11662         return 0;
11663 }
11664
11665 /**
11666  * Updates in place shared action configuration, lock free,
11667  * (mutex should be acquired by caller).
11668  *
11669  * @param[in] dev
11670  *   Pointer to the Ethernet device structure.
11671  * @param[in] action
11672  *   The shared action object to be updated.
11673  * @param[in] action_conf
11674  *   Action specification used to modify *action*.
11675  *   *action_conf* should be of type correlating with type of the *action*,
11676  *   otherwise considered as invalid.
11677  * @param[out] error
11678  *   Perform verbose error reporting if not NULL. Initialized in case of
11679  *   error only.
11680  *
11681  * @return
11682  *   0 on success, otherwise negative errno value.
11683  */
11684 static int
11685 flow_dv_action_update(struct rte_eth_dev *dev,
11686                         struct rte_flow_shared_action *action,
11687                         const void *action_conf,
11688                         struct rte_flow_error *err)
11689 {
11690         uint32_t act_idx = (uint32_t)(uintptr_t)action;
11691         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11692         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11693
11694         switch (type) {
11695         case MLX5_SHARED_ACTION_TYPE_RSS:
11696                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
11697         default:
11698                 return rte_flow_error_set(err, ENOTSUP,
11699                                           RTE_FLOW_ERROR_TYPE_ACTION,
11700                                           NULL,
11701                                           "action type update not supported");
11702         }
11703 }
11704
11705 static int
11706 flow_dv_action_query(struct rte_eth_dev *dev,
11707                      const struct rte_flow_shared_action *action, void *data,
11708                      struct rte_flow_error *error)
11709 {
11710         struct mlx5_age_param *age_param;
11711         struct rte_flow_query_age *resp;
11712         uint32_t act_idx = (uint32_t)(uintptr_t)action;
11713         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11714         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11715
11716         switch (type) {
11717         case MLX5_SHARED_ACTION_TYPE_AGE:
11718                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
11719                 resp = data;
11720                 resp->aged = __atomic_load_n(&age_param->state,
11721                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
11722                                                                           1 : 0;
11723                 resp->sec_since_last_hit_valid = !resp->aged;
11724                 if (resp->sec_since_last_hit_valid)
11725                         resp->sec_since_last_hit = __atomic_load_n
11726                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
11727                 return 0;
11728         default:
11729                 return rte_flow_error_set(error, ENOTSUP,
11730                                           RTE_FLOW_ERROR_TYPE_ACTION,
11731                                           NULL,
11732                                           "action type query not supported");
11733         }
11734 }
11735
11736 /**
11737  * Query a dv flow  rule for its statistics via devx.
11738  *
11739  * @param[in] dev
11740  *   Pointer to Ethernet device.
11741  * @param[in] flow
11742  *   Pointer to the sub flow.
11743  * @param[out] data
11744  *   data retrieved by the query.
11745  * @param[out] error
11746  *   Perform verbose error reporting if not NULL.
11747  *
11748  * @return
11749  *   0 on success, a negative errno value otherwise and rte_errno is set.
11750  */
11751 static int
11752 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11753                     void *data, struct rte_flow_error *error)
11754 {
11755         struct mlx5_priv *priv = dev->data->dev_private;
11756         struct rte_flow_query_count *qc = data;
11757
11758         if (!priv->config.devx)
11759                 return rte_flow_error_set(error, ENOTSUP,
11760                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11761                                           NULL,
11762                                           "counters are not supported");
11763         if (flow->counter) {
11764                 uint64_t pkts, bytes;
11765                 struct mlx5_flow_counter *cnt;
11766
11767                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11768                                                  NULL);
11769                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11770                                                &bytes);
11771
11772                 if (err)
11773                         return rte_flow_error_set(error, -err,
11774                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11775                                         NULL, "cannot read counters");
11776                 qc->hits_set = 1;
11777                 qc->bytes_set = 1;
11778                 qc->hits = pkts - cnt->hits;
11779                 qc->bytes = bytes - cnt->bytes;
11780                 if (qc->reset) {
11781                         cnt->hits = pkts;
11782                         cnt->bytes = bytes;
11783                 }
11784                 return 0;
11785         }
11786         return rte_flow_error_set(error, EINVAL,
11787                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11788                                   NULL,
11789                                   "counters are not available");
11790 }
11791
11792 /**
11793  * Query a flow rule AGE action for aging information.
11794  *
11795  * @param[in] dev
11796  *   Pointer to Ethernet device.
11797  * @param[in] flow
11798  *   Pointer to the sub flow.
11799  * @param[out] data
11800  *   data retrieved by the query.
11801  * @param[out] error
11802  *   Perform verbose error reporting if not NULL.
11803  *
11804  * @return
11805  *   0 on success, a negative errno value otherwise and rte_errno is set.
11806  */
11807 static int
11808 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11809                   void *data, struct rte_flow_error *error)
11810 {
11811         struct rte_flow_query_age *resp = data;
11812         struct mlx5_age_param *age_param;
11813
11814         if (flow->age) {
11815                 struct mlx5_aso_age_action *act =
11816                                      flow_aso_age_get_by_idx(dev, flow->age);
11817
11818                 age_param = &act->age_params;
11819         } else if (flow->counter) {
11820                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
11821
11822                 if (!age_param || !age_param->timeout)
11823                         return rte_flow_error_set
11824                                         (error, EINVAL,
11825                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11826                                          NULL, "cannot read age data");
11827         } else {
11828                 return rte_flow_error_set(error, EINVAL,
11829                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11830                                           NULL, "age data not available");
11831         }
11832         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
11833                                      AGE_TMOUT ? 1 : 0;
11834         resp->sec_since_last_hit_valid = !resp->aged;
11835         if (resp->sec_since_last_hit_valid)
11836                 resp->sec_since_last_hit = __atomic_load_n
11837                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
11838         return 0;
11839 }
11840
11841 /**
11842  * Query a flow.
11843  *
11844  * @see rte_flow_query()
11845  * @see rte_flow_ops
11846  */
11847 static int
11848 flow_dv_query(struct rte_eth_dev *dev,
11849               struct rte_flow *flow __rte_unused,
11850               const struct rte_flow_action *actions __rte_unused,
11851               void *data __rte_unused,
11852               struct rte_flow_error *error __rte_unused)
11853 {
11854         int ret = -EINVAL;
11855
11856         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11857                 switch (actions->type) {
11858                 case RTE_FLOW_ACTION_TYPE_VOID:
11859                         break;
11860                 case RTE_FLOW_ACTION_TYPE_COUNT:
11861                         ret = flow_dv_query_count(dev, flow, data, error);
11862                         break;
11863                 case RTE_FLOW_ACTION_TYPE_AGE:
11864                         ret = flow_dv_query_age(dev, flow, data, error);
11865                         break;
11866                 default:
11867                         return rte_flow_error_set(error, ENOTSUP,
11868                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11869                                                   actions,
11870                                                   "action not supported");
11871                 }
11872         }
11873         return ret;
11874 }
11875
11876 /**
11877  * Destroy the meter table set.
11878  * Lock free, (mutex should be acquired by caller).
11879  *
11880  * @param[in] dev
11881  *   Pointer to Ethernet device.
11882  * @param[in] tbl
11883  *   Pointer to the meter table set.
11884  *
11885  * @return
11886  *   Always 0.
11887  */
11888 static int
11889 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11890                         struct mlx5_meter_domains_infos *tbl)
11891 {
11892         struct mlx5_priv *priv = dev->data->dev_private;
11893         struct mlx5_meter_domains_infos *mtd =
11894                                 (struct mlx5_meter_domains_infos *)tbl;
11895
11896         if (!mtd || !priv->config.dv_flow_en)
11897                 return 0;
11898         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11899                 claim_zero(mlx5_flow_os_destroy_flow
11900                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11901         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11902                 claim_zero(mlx5_flow_os_destroy_flow
11903                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11904         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11905                 claim_zero(mlx5_flow_os_destroy_flow
11906                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11907         if (mtd->egress.color_matcher)
11908                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11909                            (mtd->egress.color_matcher));
11910         if (mtd->egress.any_matcher)
11911                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11912                            (mtd->egress.any_matcher));
11913         if (mtd->egress.tbl)
11914                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
11915         if (mtd->egress.sfx_tbl)
11916                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
11917         if (mtd->ingress.color_matcher)
11918                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11919                            (mtd->ingress.color_matcher));
11920         if (mtd->ingress.any_matcher)
11921                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11922                            (mtd->ingress.any_matcher));
11923         if (mtd->ingress.tbl)
11924                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
11925         if (mtd->ingress.sfx_tbl)
11926                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11927                                              mtd->ingress.sfx_tbl);
11928         if (mtd->transfer.color_matcher)
11929                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11930                            (mtd->transfer.color_matcher));
11931         if (mtd->transfer.any_matcher)
11932                 claim_zero(mlx5_flow_os_destroy_flow_matcher
11933                            (mtd->transfer.any_matcher));
11934         if (mtd->transfer.tbl)
11935                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
11936         if (mtd->transfer.sfx_tbl)
11937                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11938                                              mtd->transfer.sfx_tbl);
11939         if (mtd->drop_actn)
11940                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
11941         mlx5_free(mtd);
11942         return 0;
11943 }
11944
11945 /* Number of meter flow actions, count and jump or count and drop. */
11946 #define METER_ACTIONS 2
11947
11948 /**
11949  * Create specify domain meter table and suffix table.
11950  *
11951  * @param[in] dev
11952  *   Pointer to Ethernet device.
11953  * @param[in,out] mtb
11954  *   Pointer to DV meter table set.
11955  * @param[in] egress
11956  *   Table attribute.
11957  * @param[in] transfer
11958  *   Table attribute.
11959  * @param[in] color_reg_c_idx
11960  *   Reg C index for color match.
11961  *
11962  * @return
11963  *   0 on success, -1 otherwise and rte_errno is set.
11964  */
11965 static int
11966 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
11967                            struct mlx5_meter_domains_infos *mtb,
11968                            uint8_t egress, uint8_t transfer,
11969                            uint32_t color_reg_c_idx)
11970 {
11971         struct mlx5_priv *priv = dev->data->dev_private;
11972         struct mlx5_dev_ctx_shared *sh = priv->sh;
11973         struct mlx5_flow_dv_match_params mask = {
11974                 .size = sizeof(mask.buf),
11975         };
11976         struct mlx5_flow_dv_match_params value = {
11977                 .size = sizeof(value.buf),
11978         };
11979         struct mlx5dv_flow_matcher_attr dv_attr = {
11980                 .type = IBV_FLOW_ATTR_NORMAL,
11981                 .priority = 0,
11982                 .match_criteria_enable = 0,
11983                 .match_mask = (void *)&mask,
11984         };
11985         void *actions[METER_ACTIONS];
11986         struct mlx5_meter_domain_info *dtb;
11987         struct rte_flow_error error;
11988         int i = 0;
11989         int ret;
11990
11991         if (transfer)
11992                 dtb = &mtb->transfer;
11993         else if (egress)
11994                 dtb = &mtb->egress;
11995         else
11996                 dtb = &mtb->ingress;
11997         /* Create the meter table with METER level. */
11998         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
11999                                             egress, transfer, false, NULL, 0,
12000                                             0, &error);
12001         if (!dtb->tbl) {
12002                 DRV_LOG(ERR, "Failed to create meter policer table.");
12003                 return -1;
12004         }
12005         /* Create the meter suffix table with SUFFIX level. */
12006         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
12007                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
12008                                             egress, transfer, false, NULL, 0,
12009                                             0, &error);
12010         if (!dtb->sfx_tbl) {
12011                 DRV_LOG(ERR, "Failed to create meter suffix table.");
12012                 return -1;
12013         }
12014         /* Create matchers, Any and Color. */
12015         dv_attr.priority = 3;
12016         dv_attr.match_criteria_enable = 0;
12017         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12018                                                &dtb->any_matcher);
12019         if (ret) {
12020                 DRV_LOG(ERR, "Failed to create meter"
12021                              " policer default matcher.");
12022                 goto error_exit;
12023         }
12024         dv_attr.priority = 0;
12025         dv_attr.match_criteria_enable =
12026                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
12027         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
12028                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
12029         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12030                                                &dtb->color_matcher);
12031         if (ret) {
12032                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
12033                 goto error_exit;
12034         }
12035         if (mtb->count_actns[RTE_MTR_DROPPED])
12036                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
12037         actions[i++] = mtb->drop_actn;
12038         /* Default rule: lowest priority, match any, actions: drop. */
12039         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
12040                                        actions,
12041                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
12042         if (ret) {
12043                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
12044                 goto error_exit;
12045         }
12046         return 0;
12047 error_exit:
12048         return -1;
12049 }
12050
12051 /**
12052  * Create the needed meter and suffix tables.
12053  * Lock free, (mutex should be acquired by caller).
12054  *
12055  * @param[in] dev
12056  *   Pointer to Ethernet device.
12057  * @param[in] fm
12058  *   Pointer to the flow meter.
12059  *
12060  * @return
12061  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
12062  */
12063 static struct mlx5_meter_domains_infos *
12064 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
12065                        const struct mlx5_flow_meter *fm)
12066 {
12067         struct mlx5_priv *priv = dev->data->dev_private;
12068         struct mlx5_meter_domains_infos *mtb;
12069         int ret;
12070         int i;
12071
12072         if (!priv->mtr_en) {
12073                 rte_errno = ENOTSUP;
12074                 return NULL;
12075         }
12076         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
12077         if (!mtb) {
12078                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
12079                 return NULL;
12080         }
12081         /* Create meter count actions */
12082         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
12083                 struct mlx5_flow_counter *cnt;
12084                 if (!fm->policer_stats.cnt[i])
12085                         continue;
12086                 cnt = flow_dv_counter_get_by_idx(dev,
12087                       fm->policer_stats.cnt[i], NULL);
12088                 mtb->count_actns[i] = cnt->action;
12089         }
12090         /* Create drop action. */
12091         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
12092         if (ret) {
12093                 DRV_LOG(ERR, "Failed to create drop action.");
12094                 goto error_exit;
12095         }
12096         /* Egress meter table. */
12097         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
12098         if (ret) {
12099                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
12100                 goto error_exit;
12101         }
12102         /* Ingress meter table. */
12103         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
12104         if (ret) {
12105                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
12106                 goto error_exit;
12107         }
12108         /* FDB meter table. */
12109         if (priv->config.dv_esw_en) {
12110                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
12111                                                  priv->mtr_color_reg);
12112                 if (ret) {
12113                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
12114                         goto error_exit;
12115                 }
12116         }
12117         return mtb;
12118 error_exit:
12119         flow_dv_destroy_mtr_tbl(dev, mtb);
12120         return NULL;
12121 }
12122
12123 /**
12124  * Destroy domain policer rule.
12125  *
12126  * @param[in] dt
12127  *   Pointer to domain table.
12128  */
12129 static void
12130 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
12131 {
12132         int i;
12133
12134         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12135                 if (dt->policer_rules[i]) {
12136                         claim_zero(mlx5_flow_os_destroy_flow
12137                                    (dt->policer_rules[i]));
12138                         dt->policer_rules[i] = NULL;
12139                 }
12140         }
12141         if (dt->jump_actn) {
12142                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
12143                 dt->jump_actn = NULL;
12144         }
12145 }
12146
12147 /**
12148  * Destroy policer rules.
12149  *
12150  * @param[in] dev
12151  *   Pointer to Ethernet device.
12152  * @param[in] fm
12153  *   Pointer to flow meter structure.
12154  * @param[in] attr
12155  *   Pointer to flow attributes.
12156  *
12157  * @return
12158  *   Always 0.
12159  */
12160 static int
12161 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
12162                               const struct mlx5_flow_meter *fm,
12163                               const struct rte_flow_attr *attr)
12164 {
12165         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
12166
12167         if (!mtb)
12168                 return 0;
12169         if (attr->egress)
12170                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
12171         if (attr->ingress)
12172                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
12173         if (attr->transfer)
12174                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
12175         return 0;
12176 }
12177
12178 /**
12179  * Create specify domain meter policer rule.
12180  *
12181  * @param[in] fm
12182  *   Pointer to flow meter structure.
12183  * @param[in] mtb
12184  *   Pointer to DV meter table set.
12185  * @param[in] mtr_reg_c
12186  *   Color match REG_C.
12187  *
12188  * @return
12189  *   0 on success, -1 otherwise.
12190  */
12191 static int
12192 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
12193                                     struct mlx5_meter_domain_info *dtb,
12194                                     uint8_t mtr_reg_c)
12195 {
12196         struct mlx5_flow_dv_match_params matcher = {
12197                 .size = sizeof(matcher.buf),
12198         };
12199         struct mlx5_flow_dv_match_params value = {
12200                 .size = sizeof(value.buf),
12201         };
12202         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12203         void *actions[METER_ACTIONS];
12204         int i;
12205         int ret = 0;
12206
12207         /* Create jump action. */
12208         if (!dtb->jump_actn)
12209                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
12210                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
12211         if (ret) {
12212                 DRV_LOG(ERR, "Failed to create policer jump action.");
12213                 goto error;
12214         }
12215         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12216                 int j = 0;
12217
12218                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
12219                                        rte_col_2_mlx5_col(i), UINT8_MAX);
12220                 if (mtb->count_actns[i])
12221                         actions[j++] = mtb->count_actns[i];
12222                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
12223                         actions[j++] = mtb->drop_actn;
12224                 else
12225                         actions[j++] = dtb->jump_actn;
12226                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
12227                                                (void *)&value, j, actions,
12228                                                &dtb->policer_rules[i]);
12229                 if (ret) {
12230                         DRV_LOG(ERR, "Failed to create policer rule.");
12231                         goto error;
12232                 }
12233         }
12234         return 0;
12235 error:
12236         rte_errno = errno;
12237         return -1;
12238 }
12239
12240 /**
12241  * Create policer rules.
12242  *
12243  * @param[in] dev
12244  *   Pointer to Ethernet device.
12245  * @param[in] fm
12246  *   Pointer to flow meter structure.
12247  * @param[in] attr
12248  *   Pointer to flow attributes.
12249  *
12250  * @return
12251  *   0 on success, -1 otherwise.
12252  */
12253 static int
12254 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
12255                              struct mlx5_flow_meter *fm,
12256                              const struct rte_flow_attr *attr)
12257 {
12258         struct mlx5_priv *priv = dev->data->dev_private;
12259         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12260         int ret;
12261
12262         if (attr->egress) {
12263                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
12264                                                 priv->mtr_color_reg);
12265                 if (ret) {
12266                         DRV_LOG(ERR, "Failed to create egress policer.");
12267                         goto error;
12268                 }
12269         }
12270         if (attr->ingress) {
12271                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
12272                                                 priv->mtr_color_reg);
12273                 if (ret) {
12274                         DRV_LOG(ERR, "Failed to create ingress policer.");
12275                         goto error;
12276                 }
12277         }
12278         if (attr->transfer) {
12279                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
12280                                                 priv->mtr_color_reg);
12281                 if (ret) {
12282                         DRV_LOG(ERR, "Failed to create transfer policer.");
12283                         goto error;
12284                 }
12285         }
12286         return 0;
12287 error:
12288         flow_dv_destroy_policer_rules(dev, fm, attr);
12289         return -1;
12290 }
12291
12292 /**
12293  * Validate the batch counter support in root table.
12294  *
12295  * Create a simple flow with invalid counter and drop action on root table to
12296  * validate if batch counter with offset on root table is supported or not.
12297  *
12298  * @param[in] dev
12299  *   Pointer to rte_eth_dev structure.
12300  *
12301  * @return
12302  *   0 on success, a negative errno value otherwise and rte_errno is set.
12303  */
12304 int
12305 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
12306 {
12307         struct mlx5_priv *priv = dev->data->dev_private;
12308         struct mlx5_dev_ctx_shared *sh = priv->sh;
12309         struct mlx5_flow_dv_match_params mask = {
12310                 .size = sizeof(mask.buf),
12311         };
12312         struct mlx5_flow_dv_match_params value = {
12313                 .size = sizeof(value.buf),
12314         };
12315         struct mlx5dv_flow_matcher_attr dv_attr = {
12316                 .type = IBV_FLOW_ATTR_NORMAL,
12317                 .priority = 0,
12318                 .match_criteria_enable = 0,
12319                 .match_mask = (void *)&mask,
12320         };
12321         void *actions[2] = { 0 };
12322         struct mlx5_flow_tbl_resource *tbl = NULL;
12323         struct mlx5_devx_obj *dcs = NULL;
12324         void *matcher = NULL;
12325         void *flow = NULL;
12326         int ret = -1;
12327
12328         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
12329         if (!tbl)
12330                 goto err;
12331         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
12332         if (!dcs)
12333                 goto err;
12334         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
12335                                                     &actions[0]);
12336         if (ret)
12337                 goto err;
12338         actions[1] = priv->drop_queue.hrxq->action;
12339         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
12340         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
12341                                                &matcher);
12342         if (ret)
12343                 goto err;
12344         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
12345                                        actions, &flow);
12346 err:
12347         /*
12348          * If batch counter with offset is not supported, the driver will not
12349          * validate the invalid offset value, flow create should success.
12350          * In this case, it means batch counter is not supported in root table.
12351          *
12352          * Otherwise, if flow create is failed, counter offset is supported.
12353          */
12354         if (flow) {
12355                 DRV_LOG(INFO, "Batch counter is not supported in root "
12356                               "table. Switch to fallback mode.");
12357                 rte_errno = ENOTSUP;
12358                 ret = -rte_errno;
12359                 claim_zero(mlx5_flow_os_destroy_flow(flow));
12360         } else {
12361                 /* Check matcher to make sure validate fail at flow create. */
12362                 if (!matcher || (matcher && errno != EINVAL))
12363                         DRV_LOG(ERR, "Unexpected error in counter offset "
12364                                      "support detection");
12365                 ret = 0;
12366         }
12367         if (actions[0])
12368                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
12369         if (matcher)
12370                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
12371         if (tbl)
12372                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12373         if (dcs)
12374                 claim_zero(mlx5_devx_cmd_destroy(dcs));
12375         return ret;
12376 }
12377
12378 /**
12379  * Query a devx counter.
12380  *
12381  * @param[in] dev
12382  *   Pointer to the Ethernet device structure.
12383  * @param[in] cnt
12384  *   Index to the flow counter.
12385  * @param[in] clear
12386  *   Set to clear the counter statistics.
12387  * @param[out] pkts
12388  *   The statistics value of packets.
12389  * @param[out] bytes
12390  *   The statistics value of bytes.
12391  *
12392  * @return
12393  *   0 on success, otherwise return -1.
12394  */
12395 static int
12396 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12397                       uint64_t *pkts, uint64_t *bytes)
12398 {
12399         struct mlx5_priv *priv = dev->data->dev_private;
12400         struct mlx5_flow_counter *cnt;
12401         uint64_t inn_pkts, inn_bytes;
12402         int ret;
12403
12404         if (!priv->config.devx)
12405                 return -1;
12406
12407         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
12408         if (ret)
12409                 return -1;
12410         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
12411         *pkts = inn_pkts - cnt->hits;
12412         *bytes = inn_bytes - cnt->bytes;
12413         if (clear) {
12414                 cnt->hits = inn_pkts;
12415                 cnt->bytes = inn_bytes;
12416         }
12417         return 0;
12418 }
12419
12420 /**
12421  * Get aged-out flows.
12422  *
12423  * @param[in] dev
12424  *   Pointer to the Ethernet device structure.
12425  * @param[in] context
12426  *   The address of an array of pointers to the aged-out flows contexts.
12427  * @param[in] nb_contexts
12428  *   The length of context array pointers.
12429  * @param[out] error
12430  *   Perform verbose error reporting if not NULL. Initialized in case of
12431  *   error only.
12432  *
12433  * @return
12434  *   how many contexts get in success, otherwise negative errno value.
12435  *   if nb_contexts is 0, return the amount of all aged contexts.
12436  *   if nb_contexts is not 0 , return the amount of aged flows reported
12437  *   in the context array.
12438  * @note: only stub for now
12439  */
12440 static int
12441 flow_get_aged_flows(struct rte_eth_dev *dev,
12442                     void **context,
12443                     uint32_t nb_contexts,
12444                     struct rte_flow_error *error)
12445 {
12446         struct mlx5_priv *priv = dev->data->dev_private;
12447         struct mlx5_age_info *age_info;
12448         struct mlx5_age_param *age_param;
12449         struct mlx5_flow_counter *counter;
12450         struct mlx5_aso_age_action *act;
12451         int nb_flows = 0;
12452
12453         if (nb_contexts && !context)
12454                 return rte_flow_error_set(error, EINVAL,
12455                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12456                                           NULL, "empty context");
12457         age_info = GET_PORT_AGE_INFO(priv);
12458         rte_spinlock_lock(&age_info->aged_sl);
12459         LIST_FOREACH(act, &age_info->aged_aso, next) {
12460                 nb_flows++;
12461                 if (nb_contexts) {
12462                         context[nb_flows - 1] =
12463                                                 act->age_params.context;
12464                         if (!(--nb_contexts))
12465                                 break;
12466                 }
12467         }
12468         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12469                 nb_flows++;
12470                 if (nb_contexts) {
12471                         age_param = MLX5_CNT_TO_AGE(counter);
12472                         context[nb_flows - 1] = age_param->context;
12473                         if (!(--nb_contexts))
12474                                 break;
12475                 }
12476         }
12477         rte_spinlock_unlock(&age_info->aged_sl);
12478         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12479         return nb_flows;
12480 }
12481
12482 /*
12483  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12484  */
12485 static uint32_t
12486 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12487 {
12488         return flow_dv_counter_alloc(dev, 0);
12489 }
12490
12491 /**
12492  * Validate shared action.
12493  * Dispatcher for action type specific validation.
12494  *
12495  * @param[in] dev
12496  *   Pointer to the Ethernet device structure.
12497  * @param[in] conf
12498  *   Shared action configuration.
12499  * @param[in] action
12500  *   The shared action object to validate.
12501  * @param[out] error
12502  *   Perform verbose error reporting if not NULL. Initialized in case of
12503  *   error only.
12504  *
12505  * @return
12506  *   0 on success, otherwise negative errno value.
12507  */
12508 static int
12509 flow_dv_action_validate(struct rte_eth_dev *dev,
12510                         const struct rte_flow_shared_action_conf *conf,
12511                         const struct rte_flow_action *action,
12512                         struct rte_flow_error *err)
12513 {
12514         struct mlx5_priv *priv = dev->data->dev_private;
12515
12516         RTE_SET_USED(conf);
12517         switch (action->type) {
12518         case RTE_FLOW_ACTION_TYPE_RSS:
12519                 return mlx5_validate_action_rss(dev, action, err);
12520         case RTE_FLOW_ACTION_TYPE_AGE:
12521                 if (!priv->sh->aso_age_mng)
12522                         return rte_flow_error_set(err, ENOTSUP,
12523                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12524                                                 NULL,
12525                                              "shared age action not supported");
12526                 return flow_dv_validate_action_age(0, action, dev, err);
12527         default:
12528                 return rte_flow_error_set(err, ENOTSUP,
12529                                           RTE_FLOW_ERROR_TYPE_ACTION,
12530                                           NULL,
12531                                           "action type not supported");
12532         }
12533 }
12534
12535 static int
12536 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12537 {
12538         struct mlx5_priv *priv = dev->data->dev_private;
12539         int ret = 0;
12540
12541         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12542                 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12543                                                 flags);
12544                 if (ret != 0)
12545                         return ret;
12546         }
12547         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12548                 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12549                 if (ret != 0)
12550                         return ret;
12551         }
12552         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12553                 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12554                 if (ret != 0)
12555                         return ret;
12556         }
12557         return 0;
12558 }
12559
12560 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12561         .validate = flow_dv_validate,
12562         .prepare = flow_dv_prepare,
12563         .translate = flow_dv_translate,
12564         .apply = flow_dv_apply,
12565         .remove = flow_dv_remove,
12566         .destroy = flow_dv_destroy,
12567         .query = flow_dv_query,
12568         .create_mtr_tbls = flow_dv_create_mtr_tbl,
12569         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12570         .create_policer_rules = flow_dv_create_policer_rules,
12571         .destroy_policer_rules = flow_dv_destroy_policer_rules,
12572         .counter_alloc = flow_dv_counter_allocate,
12573         .counter_free = flow_dv_counter_free,
12574         .counter_query = flow_dv_counter_query,
12575         .get_aged_flows = flow_get_aged_flows,
12576         .action_validate = flow_dv_action_validate,
12577         .action_create = flow_dv_action_create,
12578         .action_destroy = flow_dv_action_destroy,
12579         .action_update = flow_dv_action_update,
12580         .action_query = flow_dv_action_query,
12581         .sync_domain = flow_dv_sync_domain,
12582 };
12583
12584 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
12585