707d6749e28174b127a34aebf601723b8efeda9e
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                       uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84 static void
85 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
86
87 /**
88  * Initialize flow attributes structure according to flow items' types.
89  *
90  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
91  * mode. For tunnel mode, the items to be modified are the outermost ones.
92  *
93  * @param[in] item
94  *   Pointer to item specification.
95  * @param[out] attr
96  *   Pointer to flow attributes structure.
97  * @param[in] dev_flow
98  *   Pointer to the sub flow.
99  * @param[in] tunnel_decap
100  *   Whether action is after tunnel decapsulation.
101  */
102 static void
103 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
104                   struct mlx5_flow *dev_flow, bool tunnel_decap)
105 {
106         uint64_t layers = dev_flow->handle->layers;
107
108         /*
109          * If layers is already initialized, it means this dev_flow is the
110          * suffix flow, the layers flags is set by the prefix flow. Need to
111          * use the layer flags from prefix flow as the suffix flow may not
112          * have the user defined items as the flow is split.
113          */
114         if (layers) {
115                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
116                         attr->ipv4 = 1;
117                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
118                         attr->ipv6 = 1;
119                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
120                         attr->tcp = 1;
121                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
122                         attr->udp = 1;
123                 attr->valid = 1;
124                 return;
125         }
126         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
127                 uint8_t next_protocol = 0xff;
128                 switch (item->type) {
129                 case RTE_FLOW_ITEM_TYPE_GRE:
130                 case RTE_FLOW_ITEM_TYPE_NVGRE:
131                 case RTE_FLOW_ITEM_TYPE_VXLAN:
132                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
133                 case RTE_FLOW_ITEM_TYPE_GENEVE:
134                 case RTE_FLOW_ITEM_TYPE_MPLS:
135                         if (tunnel_decap)
136                                 attr->attr = 0;
137                         break;
138                 case RTE_FLOW_ITEM_TYPE_IPV4:
139                         if (!attr->ipv6)
140                                 attr->ipv4 = 1;
141                         if (item->mask != NULL &&
142                             ((const struct rte_flow_item_ipv4 *)
143                             item->mask)->hdr.next_proto_id)
144                                 next_protocol =
145                                     ((const struct rte_flow_item_ipv4 *)
146                                       (item->spec))->hdr.next_proto_id &
147                                     ((const struct rte_flow_item_ipv4 *)
148                                       (item->mask))->hdr.next_proto_id;
149                         if ((next_protocol == IPPROTO_IPIP ||
150                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
151                                 attr->attr = 0;
152                         break;
153                 case RTE_FLOW_ITEM_TYPE_IPV6:
154                         if (!attr->ipv4)
155                                 attr->ipv6 = 1;
156                         if (item->mask != NULL &&
157                             ((const struct rte_flow_item_ipv6 *)
158                             item->mask)->hdr.proto)
159                                 next_protocol =
160                                     ((const struct rte_flow_item_ipv6 *)
161                                       (item->spec))->hdr.proto &
162                                     ((const struct rte_flow_item_ipv6 *)
163                                       (item->mask))->hdr.proto;
164                         if ((next_protocol == IPPROTO_IPIP ||
165                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
166                                 attr->attr = 0;
167                         break;
168                 case RTE_FLOW_ITEM_TYPE_UDP:
169                         if (!attr->tcp)
170                                 attr->udp = 1;
171                         break;
172                 case RTE_FLOW_ITEM_TYPE_TCP:
173                         if (!attr->udp)
174                                 attr->tcp = 1;
175                         break;
176                 default:
177                         break;
178                 }
179         }
180         attr->valid = 1;
181 }
182
183 /**
184  * Convert rte_mtr_color to mlx5 color.
185  *
186  * @param[in] rcol
187  *   rte_mtr_color.
188  *
189  * @return
190  *   mlx5 color.
191  */
192 static int
193 rte_col_2_mlx5_col(enum rte_color rcol)
194 {
195         switch (rcol) {
196         case RTE_COLOR_GREEN:
197                 return MLX5_FLOW_COLOR_GREEN;
198         case RTE_COLOR_YELLOW:
199                 return MLX5_FLOW_COLOR_YELLOW;
200         case RTE_COLOR_RED:
201                 return MLX5_FLOW_COLOR_RED;
202         default:
203                 break;
204         }
205         return MLX5_FLOW_COLOR_UNDEFINED;
206 }
207
208 struct field_modify_info {
209         uint32_t size; /* Size of field in protocol header, in bytes. */
210         uint32_t offset; /* Offset of field in protocol header, in bytes. */
211         enum mlx5_modification_field id;
212 };
213
214 struct field_modify_info modify_eth[] = {
215         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
216         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
217         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
218         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
219         {0, 0, 0},
220 };
221
222 struct field_modify_info modify_vlan_out_first_vid[] = {
223         /* Size in bits !!! */
224         {12, 0, MLX5_MODI_OUT_FIRST_VID},
225         {0, 0, 0},
226 };
227
228 struct field_modify_info modify_ipv4[] = {
229         {1,  1, MLX5_MODI_OUT_IP_DSCP},
230         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
231         {4, 12, MLX5_MODI_OUT_SIPV4},
232         {4, 16, MLX5_MODI_OUT_DIPV4},
233         {0, 0, 0},
234 };
235
236 struct field_modify_info modify_ipv6[] = {
237         {1,  0, MLX5_MODI_OUT_IP_DSCP},
238         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
239         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
240         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
241         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
242         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
243         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
244         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
245         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
246         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
247         {0, 0, 0},
248 };
249
250 struct field_modify_info modify_udp[] = {
251         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
252         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
253         {0, 0, 0},
254 };
255
256 struct field_modify_info modify_tcp[] = {
257         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
258         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
259         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
260         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
261         {0, 0, 0},
262 };
263
264 static void
265 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
266                           uint8_t next_protocol, uint64_t *item_flags,
267                           int *tunnel)
268 {
269         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
270                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
271         if (next_protocol == IPPROTO_IPIP) {
272                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
273                 *tunnel = 1;
274         }
275         if (next_protocol == IPPROTO_IPV6) {
276                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
277                 *tunnel = 1;
278         }
279 }
280
281 /* Update VLAN's VID/PCP based on input rte_flow_action.
282  *
283  * @param[in] action
284  *   Pointer to struct rte_flow_action.
285  * @param[out] vlan
286  *   Pointer to struct rte_vlan_hdr.
287  */
288 static void
289 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
290                          struct rte_vlan_hdr *vlan)
291 {
292         uint16_t vlan_tci;
293         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
294                 vlan_tci =
295                     ((const struct rte_flow_action_of_set_vlan_pcp *)
296                                                action->conf)->vlan_pcp;
297                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
298                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
299                 vlan->vlan_tci |= vlan_tci;
300         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
301                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
302                 vlan->vlan_tci |= rte_be_to_cpu_16
303                     (((const struct rte_flow_action_of_set_vlan_vid *)
304                                              action->conf)->vlan_vid);
305         }
306 }
307
308 /**
309  * Fetch 1, 2, 3 or 4 byte field from the byte array
310  * and return as unsigned integer in host-endian format.
311  *
312  * @param[in] data
313  *   Pointer to data array.
314  * @param[in] size
315  *   Size of field to extract.
316  *
317  * @return
318  *   converted field in host endian format.
319  */
320 static inline uint32_t
321 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
322 {
323         uint32_t ret;
324
325         switch (size) {
326         case 1:
327                 ret = *data;
328                 break;
329         case 2:
330                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
331                 break;
332         case 3:
333                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
334                 ret = (ret << 8) | *(data + sizeof(uint16_t));
335                 break;
336         case 4:
337                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
338                 break;
339         default:
340                 MLX5_ASSERT(false);
341                 ret = 0;
342                 break;
343         }
344         return ret;
345 }
346
347 /**
348  * Convert modify-header action to DV specification.
349  *
350  * Data length of each action is determined by provided field description
351  * and the item mask. Data bit offset and width of each action is determined
352  * by provided item mask.
353  *
354  * @param[in] item
355  *   Pointer to item specification.
356  * @param[in] field
357  *   Pointer to field modification information.
358  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
359  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
360  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
361  * @param[in] dcopy
362  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
363  *   Negative offset value sets the same offset as source offset.
364  *   size field is ignored, value is taken from source field.
365  * @param[in,out] resource
366  *   Pointer to the modify-header resource.
367  * @param[in] type
368  *   Type of modification.
369  * @param[out] error
370  *   Pointer to the error structure.
371  *
372  * @return
373  *   0 on success, a negative errno value otherwise and rte_errno is set.
374  */
375 static int
376 flow_dv_convert_modify_action(struct rte_flow_item *item,
377                               struct field_modify_info *field,
378                               struct field_modify_info *dcopy,
379                               struct mlx5_flow_dv_modify_hdr_resource *resource,
380                               uint32_t type, struct rte_flow_error *error)
381 {
382         uint32_t i = resource->actions_num;
383         struct mlx5_modification_cmd *actions = resource->actions;
384
385         /*
386          * The item and mask are provided in big-endian format.
387          * The fields should be presented as in big-endian format either.
388          * Mask must be always present, it defines the actual field width.
389          */
390         MLX5_ASSERT(item->mask);
391         MLX5_ASSERT(field->size);
392         do {
393                 unsigned int size_b;
394                 unsigned int off_b;
395                 uint32_t mask;
396                 uint32_t data;
397
398                 if (i >= MLX5_MAX_MODIFY_NUM)
399                         return rte_flow_error_set(error, EINVAL,
400                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
401                                  "too many items to modify");
402                 /* Fetch variable byte size mask from the array. */
403                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
404                                            field->offset, field->size);
405                 if (!mask) {
406                         ++field;
407                         continue;
408                 }
409                 /* Deduce actual data width in bits from mask value. */
410                 off_b = rte_bsf32(mask);
411                 size_b = sizeof(uint32_t) * CHAR_BIT -
412                          off_b - __builtin_clz(mask);
413                 MLX5_ASSERT(size_b);
414                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
415                 actions[i] = (struct mlx5_modification_cmd) {
416                         .action_type = type,
417                         .field = field->id,
418                         .offset = off_b,
419                         .length = size_b,
420                 };
421                 /* Convert entire record to expected big-endian format. */
422                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
423                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
424                         MLX5_ASSERT(dcopy);
425                         actions[i].dst_field = dcopy->id;
426                         actions[i].dst_offset =
427                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
428                         /* Convert entire record to big-endian format. */
429                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
430                 } else {
431                         MLX5_ASSERT(item->spec);
432                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
433                                                    field->offset, field->size);
434                         /* Shift out the trailing masked bits from data. */
435                         data = (data & mask) >> off_b;
436                         actions[i].data1 = rte_cpu_to_be_32(data);
437                 }
438                 ++i;
439                 ++field;
440         } while (field->size);
441         if (resource->actions_num == i)
442                 return rte_flow_error_set(error, EINVAL,
443                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
444                                           "invalid modification flow item");
445         resource->actions_num = i;
446         return 0;
447 }
448
449 /**
450  * Convert modify-header set IPv4 address action to DV specification.
451  *
452  * @param[in,out] resource
453  *   Pointer to the modify-header resource.
454  * @param[in] action
455  *   Pointer to action specification.
456  * @param[out] error
457  *   Pointer to the error structure.
458  *
459  * @return
460  *   0 on success, a negative errno value otherwise and rte_errno is set.
461  */
462 static int
463 flow_dv_convert_action_modify_ipv4
464                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
465                          const struct rte_flow_action *action,
466                          struct rte_flow_error *error)
467 {
468         const struct rte_flow_action_set_ipv4 *conf =
469                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
470         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
471         struct rte_flow_item_ipv4 ipv4;
472         struct rte_flow_item_ipv4 ipv4_mask;
473
474         memset(&ipv4, 0, sizeof(ipv4));
475         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
476         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
477                 ipv4.hdr.src_addr = conf->ipv4_addr;
478                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
479         } else {
480                 ipv4.hdr.dst_addr = conf->ipv4_addr;
481                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
482         }
483         item.spec = &ipv4;
484         item.mask = &ipv4_mask;
485         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
486                                              MLX5_MODIFICATION_TYPE_SET, error);
487 }
488
489 /**
490  * Convert modify-header set IPv6 address action to DV specification.
491  *
492  * @param[in,out] resource
493  *   Pointer to the modify-header resource.
494  * @param[in] action
495  *   Pointer to action specification.
496  * @param[out] error
497  *   Pointer to the error structure.
498  *
499  * @return
500  *   0 on success, a negative errno value otherwise and rte_errno is set.
501  */
502 static int
503 flow_dv_convert_action_modify_ipv6
504                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
505                          const struct rte_flow_action *action,
506                          struct rte_flow_error *error)
507 {
508         const struct rte_flow_action_set_ipv6 *conf =
509                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
510         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
511         struct rte_flow_item_ipv6 ipv6;
512         struct rte_flow_item_ipv6 ipv6_mask;
513
514         memset(&ipv6, 0, sizeof(ipv6));
515         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
516         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
517                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
518                        sizeof(ipv6.hdr.src_addr));
519                 memcpy(&ipv6_mask.hdr.src_addr,
520                        &rte_flow_item_ipv6_mask.hdr.src_addr,
521                        sizeof(ipv6.hdr.src_addr));
522         } else {
523                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
524                        sizeof(ipv6.hdr.dst_addr));
525                 memcpy(&ipv6_mask.hdr.dst_addr,
526                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
527                        sizeof(ipv6.hdr.dst_addr));
528         }
529         item.spec = &ipv6;
530         item.mask = &ipv6_mask;
531         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
532                                              MLX5_MODIFICATION_TYPE_SET, error);
533 }
534
535 /**
536  * Convert modify-header set MAC address action to DV specification.
537  *
538  * @param[in,out] resource
539  *   Pointer to the modify-header resource.
540  * @param[in] action
541  *   Pointer to action specification.
542  * @param[out] error
543  *   Pointer to the error structure.
544  *
545  * @return
546  *   0 on success, a negative errno value otherwise and rte_errno is set.
547  */
548 static int
549 flow_dv_convert_action_modify_mac
550                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
551                          const struct rte_flow_action *action,
552                          struct rte_flow_error *error)
553 {
554         const struct rte_flow_action_set_mac *conf =
555                 (const struct rte_flow_action_set_mac *)(action->conf);
556         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
557         struct rte_flow_item_eth eth;
558         struct rte_flow_item_eth eth_mask;
559
560         memset(&eth, 0, sizeof(eth));
561         memset(&eth_mask, 0, sizeof(eth_mask));
562         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
563                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
564                        sizeof(eth.src.addr_bytes));
565                 memcpy(&eth_mask.src.addr_bytes,
566                        &rte_flow_item_eth_mask.src.addr_bytes,
567                        sizeof(eth_mask.src.addr_bytes));
568         } else {
569                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
570                        sizeof(eth.dst.addr_bytes));
571                 memcpy(&eth_mask.dst.addr_bytes,
572                        &rte_flow_item_eth_mask.dst.addr_bytes,
573                        sizeof(eth_mask.dst.addr_bytes));
574         }
575         item.spec = &eth;
576         item.mask = &eth_mask;
577         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
578                                              MLX5_MODIFICATION_TYPE_SET, error);
579 }
580
581 /**
582  * Convert modify-header set VLAN VID action to DV specification.
583  *
584  * @param[in,out] resource
585  *   Pointer to the modify-header resource.
586  * @param[in] action
587  *   Pointer to action specification.
588  * @param[out] error
589  *   Pointer to the error structure.
590  *
591  * @return
592  *   0 on success, a negative errno value otherwise and rte_errno is set.
593  */
594 static int
595 flow_dv_convert_action_modify_vlan_vid
596                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
597                          const struct rte_flow_action *action,
598                          struct rte_flow_error *error)
599 {
600         const struct rte_flow_action_of_set_vlan_vid *conf =
601                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
602         int i = resource->actions_num;
603         struct mlx5_modification_cmd *actions = resource->actions;
604         struct field_modify_info *field = modify_vlan_out_first_vid;
605
606         if (i >= MLX5_MAX_MODIFY_NUM)
607                 return rte_flow_error_set(error, EINVAL,
608                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
609                          "too many items to modify");
610         actions[i] = (struct mlx5_modification_cmd) {
611                 .action_type = MLX5_MODIFICATION_TYPE_SET,
612                 .field = field->id,
613                 .length = field->size,
614                 .offset = field->offset,
615         };
616         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
617         actions[i].data1 = conf->vlan_vid;
618         actions[i].data1 = actions[i].data1 << 16;
619         resource->actions_num = ++i;
620         return 0;
621 }
622
623 /**
624  * Convert modify-header set TP action to DV specification.
625  *
626  * @param[in,out] resource
627  *   Pointer to the modify-header resource.
628  * @param[in] action
629  *   Pointer to action specification.
630  * @param[in] items
631  *   Pointer to rte_flow_item objects list.
632  * @param[in] attr
633  *   Pointer to flow attributes structure.
634  * @param[in] dev_flow
635  *   Pointer to the sub flow.
636  * @param[in] tunnel_decap
637  *   Whether action is after tunnel decapsulation.
638  * @param[out] error
639  *   Pointer to the error structure.
640  *
641  * @return
642  *   0 on success, a negative errno value otherwise and rte_errno is set.
643  */
644 static int
645 flow_dv_convert_action_modify_tp
646                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
647                          const struct rte_flow_action *action,
648                          const struct rte_flow_item *items,
649                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
650                          bool tunnel_decap, struct rte_flow_error *error)
651 {
652         const struct rte_flow_action_set_tp *conf =
653                 (const struct rte_flow_action_set_tp *)(action->conf);
654         struct rte_flow_item item;
655         struct rte_flow_item_udp udp;
656         struct rte_flow_item_udp udp_mask;
657         struct rte_flow_item_tcp tcp;
658         struct rte_flow_item_tcp tcp_mask;
659         struct field_modify_info *field;
660
661         if (!attr->valid)
662                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
663         if (attr->udp) {
664                 memset(&udp, 0, sizeof(udp));
665                 memset(&udp_mask, 0, sizeof(udp_mask));
666                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
667                         udp.hdr.src_port = conf->port;
668                         udp_mask.hdr.src_port =
669                                         rte_flow_item_udp_mask.hdr.src_port;
670                 } else {
671                         udp.hdr.dst_port = conf->port;
672                         udp_mask.hdr.dst_port =
673                                         rte_flow_item_udp_mask.hdr.dst_port;
674                 }
675                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
676                 item.spec = &udp;
677                 item.mask = &udp_mask;
678                 field = modify_udp;
679         } else {
680                 MLX5_ASSERT(attr->tcp);
681                 memset(&tcp, 0, sizeof(tcp));
682                 memset(&tcp_mask, 0, sizeof(tcp_mask));
683                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
684                         tcp.hdr.src_port = conf->port;
685                         tcp_mask.hdr.src_port =
686                                         rte_flow_item_tcp_mask.hdr.src_port;
687                 } else {
688                         tcp.hdr.dst_port = conf->port;
689                         tcp_mask.hdr.dst_port =
690                                         rte_flow_item_tcp_mask.hdr.dst_port;
691                 }
692                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
693                 item.spec = &tcp;
694                 item.mask = &tcp_mask;
695                 field = modify_tcp;
696         }
697         return flow_dv_convert_modify_action(&item, field, NULL, resource,
698                                              MLX5_MODIFICATION_TYPE_SET, error);
699 }
700
701 /**
702  * Convert modify-header set TTL action to DV specification.
703  *
704  * @param[in,out] resource
705  *   Pointer to the modify-header resource.
706  * @param[in] action
707  *   Pointer to action specification.
708  * @param[in] items
709  *   Pointer to rte_flow_item objects list.
710  * @param[in] attr
711  *   Pointer to flow attributes structure.
712  * @param[in] dev_flow
713  *   Pointer to the sub flow.
714  * @param[in] tunnel_decap
715  *   Whether action is after tunnel decapsulation.
716  * @param[out] error
717  *   Pointer to the error structure.
718  *
719  * @return
720  *   0 on success, a negative errno value otherwise and rte_errno is set.
721  */
722 static int
723 flow_dv_convert_action_modify_ttl
724                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
725                          const struct rte_flow_action *action,
726                          const struct rte_flow_item *items,
727                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
728                          bool tunnel_decap, struct rte_flow_error *error)
729 {
730         const struct rte_flow_action_set_ttl *conf =
731                 (const struct rte_flow_action_set_ttl *)(action->conf);
732         struct rte_flow_item item;
733         struct rte_flow_item_ipv4 ipv4;
734         struct rte_flow_item_ipv4 ipv4_mask;
735         struct rte_flow_item_ipv6 ipv6;
736         struct rte_flow_item_ipv6 ipv6_mask;
737         struct field_modify_info *field;
738
739         if (!attr->valid)
740                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
741         if (attr->ipv4) {
742                 memset(&ipv4, 0, sizeof(ipv4));
743                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
744                 ipv4.hdr.time_to_live = conf->ttl_value;
745                 ipv4_mask.hdr.time_to_live = 0xFF;
746                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
747                 item.spec = &ipv4;
748                 item.mask = &ipv4_mask;
749                 field = modify_ipv4;
750         } else {
751                 MLX5_ASSERT(attr->ipv6);
752                 memset(&ipv6, 0, sizeof(ipv6));
753                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
754                 ipv6.hdr.hop_limits = conf->ttl_value;
755                 ipv6_mask.hdr.hop_limits = 0xFF;
756                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
757                 item.spec = &ipv6;
758                 item.mask = &ipv6_mask;
759                 field = modify_ipv6;
760         }
761         return flow_dv_convert_modify_action(&item, field, NULL, resource,
762                                              MLX5_MODIFICATION_TYPE_SET, error);
763 }
764
765 /**
766  * Convert modify-header decrement TTL action to DV specification.
767  *
768  * @param[in,out] resource
769  *   Pointer to the modify-header resource.
770  * @param[in] action
771  *   Pointer to action specification.
772  * @param[in] items
773  *   Pointer to rte_flow_item objects list.
774  * @param[in] attr
775  *   Pointer to flow attributes structure.
776  * @param[in] dev_flow
777  *   Pointer to the sub flow.
778  * @param[in] tunnel_decap
779  *   Whether action is after tunnel decapsulation.
780  * @param[out] error
781  *   Pointer to the error structure.
782  *
783  * @return
784  *   0 on success, a negative errno value otherwise and rte_errno is set.
785  */
786 static int
787 flow_dv_convert_action_modify_dec_ttl
788                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
789                          const struct rte_flow_item *items,
790                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
791                          bool tunnel_decap, struct rte_flow_error *error)
792 {
793         struct rte_flow_item item;
794         struct rte_flow_item_ipv4 ipv4;
795         struct rte_flow_item_ipv4 ipv4_mask;
796         struct rte_flow_item_ipv6 ipv6;
797         struct rte_flow_item_ipv6 ipv6_mask;
798         struct field_modify_info *field;
799
800         if (!attr->valid)
801                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
802         if (attr->ipv4) {
803                 memset(&ipv4, 0, sizeof(ipv4));
804                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
805                 ipv4.hdr.time_to_live = 0xFF;
806                 ipv4_mask.hdr.time_to_live = 0xFF;
807                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
808                 item.spec = &ipv4;
809                 item.mask = &ipv4_mask;
810                 field = modify_ipv4;
811         } else {
812                 MLX5_ASSERT(attr->ipv6);
813                 memset(&ipv6, 0, sizeof(ipv6));
814                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
815                 ipv6.hdr.hop_limits = 0xFF;
816                 ipv6_mask.hdr.hop_limits = 0xFF;
817                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
818                 item.spec = &ipv6;
819                 item.mask = &ipv6_mask;
820                 field = modify_ipv6;
821         }
822         return flow_dv_convert_modify_action(&item, field, NULL, resource,
823                                              MLX5_MODIFICATION_TYPE_ADD, error);
824 }
825
826 /**
827  * Convert modify-header increment/decrement TCP Sequence number
828  * to DV specification.
829  *
830  * @param[in,out] resource
831  *   Pointer to the modify-header resource.
832  * @param[in] action
833  *   Pointer to action specification.
834  * @param[out] error
835  *   Pointer to the error structure.
836  *
837  * @return
838  *   0 on success, a negative errno value otherwise and rte_errno is set.
839  */
840 static int
841 flow_dv_convert_action_modify_tcp_seq
842                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
843                          const struct rte_flow_action *action,
844                          struct rte_flow_error *error)
845 {
846         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
847         uint64_t value = rte_be_to_cpu_32(*conf);
848         struct rte_flow_item item;
849         struct rte_flow_item_tcp tcp;
850         struct rte_flow_item_tcp tcp_mask;
851
852         memset(&tcp, 0, sizeof(tcp));
853         memset(&tcp_mask, 0, sizeof(tcp_mask));
854         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
855                 /*
856                  * The HW has no decrement operation, only increment operation.
857                  * To simulate decrement X from Y using increment operation
858                  * we need to add UINT32_MAX X times to Y.
859                  * Each adding of UINT32_MAX decrements Y by 1.
860                  */
861                 value *= UINT32_MAX;
862         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
863         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
864         item.type = RTE_FLOW_ITEM_TYPE_TCP;
865         item.spec = &tcp;
866         item.mask = &tcp_mask;
867         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
868                                              MLX5_MODIFICATION_TYPE_ADD, error);
869 }
870
871 /**
872  * Convert modify-header increment/decrement TCP Acknowledgment number
873  * to DV specification.
874  *
875  * @param[in,out] resource
876  *   Pointer to the modify-header resource.
877  * @param[in] action
878  *   Pointer to action specification.
879  * @param[out] error
880  *   Pointer to the error structure.
881  *
882  * @return
883  *   0 on success, a negative errno value otherwise and rte_errno is set.
884  */
885 static int
886 flow_dv_convert_action_modify_tcp_ack
887                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
888                          const struct rte_flow_action *action,
889                          struct rte_flow_error *error)
890 {
891         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
892         uint64_t value = rte_be_to_cpu_32(*conf);
893         struct rte_flow_item item;
894         struct rte_flow_item_tcp tcp;
895         struct rte_flow_item_tcp tcp_mask;
896
897         memset(&tcp, 0, sizeof(tcp));
898         memset(&tcp_mask, 0, sizeof(tcp_mask));
899         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
900                 /*
901                  * The HW has no decrement operation, only increment operation.
902                  * To simulate decrement X from Y using increment operation
903                  * we need to add UINT32_MAX X times to Y.
904                  * Each adding of UINT32_MAX decrements Y by 1.
905                  */
906                 value *= UINT32_MAX;
907         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
908         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
909         item.type = RTE_FLOW_ITEM_TYPE_TCP;
910         item.spec = &tcp;
911         item.mask = &tcp_mask;
912         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 static enum mlx5_modification_field reg_to_field[] = {
917         [REG_NON] = MLX5_MODI_OUT_NONE,
918         [REG_A] = MLX5_MODI_META_DATA_REG_A,
919         [REG_B] = MLX5_MODI_META_DATA_REG_B,
920         [REG_C_0] = MLX5_MODI_META_REG_C_0,
921         [REG_C_1] = MLX5_MODI_META_REG_C_1,
922         [REG_C_2] = MLX5_MODI_META_REG_C_2,
923         [REG_C_3] = MLX5_MODI_META_REG_C_3,
924         [REG_C_4] = MLX5_MODI_META_REG_C_4,
925         [REG_C_5] = MLX5_MODI_META_REG_C_5,
926         [REG_C_6] = MLX5_MODI_META_REG_C_6,
927         [REG_C_7] = MLX5_MODI_META_REG_C_7,
928 };
929
930 /**
931  * Convert register set to DV specification.
932  *
933  * @param[in,out] resource
934  *   Pointer to the modify-header resource.
935  * @param[in] action
936  *   Pointer to action specification.
937  * @param[out] error
938  *   Pointer to the error structure.
939  *
940  * @return
941  *   0 on success, a negative errno value otherwise and rte_errno is set.
942  */
943 static int
944 flow_dv_convert_action_set_reg
945                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
946                          const struct rte_flow_action *action,
947                          struct rte_flow_error *error)
948 {
949         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
950         struct mlx5_modification_cmd *actions = resource->actions;
951         uint32_t i = resource->actions_num;
952
953         if (i >= MLX5_MAX_MODIFY_NUM)
954                 return rte_flow_error_set(error, EINVAL,
955                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
956                                           "too many items to modify");
957         MLX5_ASSERT(conf->id != REG_NON);
958         MLX5_ASSERT(conf->id < (enum modify_reg)RTE_DIM(reg_to_field));
959         actions[i] = (struct mlx5_modification_cmd) {
960                 .action_type = MLX5_MODIFICATION_TYPE_SET,
961                 .field = reg_to_field[conf->id],
962         };
963         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
964         actions[i].data1 = rte_cpu_to_be_32(conf->data);
965         ++i;
966         resource->actions_num = i;
967         return 0;
968 }
969
970 /**
971  * Convert SET_TAG action to DV specification.
972  *
973  * @param[in] dev
974  *   Pointer to the rte_eth_dev structure.
975  * @param[in,out] resource
976  *   Pointer to the modify-header resource.
977  * @param[in] conf
978  *   Pointer to action specification.
979  * @param[out] error
980  *   Pointer to the error structure.
981  *
982  * @return
983  *   0 on success, a negative errno value otherwise and rte_errno is set.
984  */
985 static int
986 flow_dv_convert_action_set_tag
987                         (struct rte_eth_dev *dev,
988                          struct mlx5_flow_dv_modify_hdr_resource *resource,
989                          const struct rte_flow_action_set_tag *conf,
990                          struct rte_flow_error *error)
991 {
992         rte_be32_t data = rte_cpu_to_be_32(conf->data);
993         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
994         struct rte_flow_item item = {
995                 .spec = &data,
996                 .mask = &mask,
997         };
998         struct field_modify_info reg_c_x[] = {
999                 [1] = {0, 0, 0},
1000         };
1001         enum mlx5_modification_field reg_type;
1002         int ret;
1003
1004         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1005         if (ret < 0)
1006                 return ret;
1007         MLX5_ASSERT(ret != REG_NON);
1008         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1009         reg_type = reg_to_field[ret];
1010         MLX5_ASSERT(reg_type > 0);
1011         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1012         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1013                                              MLX5_MODIFICATION_TYPE_SET, error);
1014 }
1015
1016 /**
1017  * Convert internal COPY_REG action to DV specification.
1018  *
1019  * @param[in] dev
1020  *   Pointer to the rte_eth_dev structure.
1021  * @param[in,out] res
1022  *   Pointer to the modify-header resource.
1023  * @param[in] action
1024  *   Pointer to action specification.
1025  * @param[out] error
1026  *   Pointer to the error structure.
1027  *
1028  * @return
1029  *   0 on success, a negative errno value otherwise and rte_errno is set.
1030  */
1031 static int
1032 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1033                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1034                                  const struct rte_flow_action *action,
1035                                  struct rte_flow_error *error)
1036 {
1037         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1038         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1039         struct rte_flow_item item = {
1040                 .spec = NULL,
1041                 .mask = &mask,
1042         };
1043         struct field_modify_info reg_src[] = {
1044                 {4, 0, reg_to_field[conf->src]},
1045                 {0, 0, 0},
1046         };
1047         struct field_modify_info reg_dst = {
1048                 .offset = 0,
1049                 .id = reg_to_field[conf->dst],
1050         };
1051         /* Adjust reg_c[0] usage according to reported mask. */
1052         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1053                 struct mlx5_priv *priv = dev->data->dev_private;
1054                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1055
1056                 MLX5_ASSERT(reg_c0);
1057                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1058                 if (conf->dst == REG_C_0) {
1059                         /* Copy to reg_c[0], within mask only. */
1060                         reg_dst.offset = rte_bsf32(reg_c0);
1061                         /*
1062                          * Mask is ignoring the enianness, because
1063                          * there is no conversion in datapath.
1064                          */
1065 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1066                         /* Copy from destination lower bits to reg_c[0]. */
1067                         mask = reg_c0 >> reg_dst.offset;
1068 #else
1069                         /* Copy from destination upper bits to reg_c[0]. */
1070                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1071                                           rte_fls_u32(reg_c0));
1072 #endif
1073                 } else {
1074                         mask = rte_cpu_to_be_32(reg_c0);
1075 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1076                         /* Copy from reg_c[0] to destination lower bits. */
1077                         reg_dst.offset = 0;
1078 #else
1079                         /* Copy from reg_c[0] to destination upper bits. */
1080                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1081                                          (rte_fls_u32(reg_c0) -
1082                                           rte_bsf32(reg_c0));
1083 #endif
1084                 }
1085         }
1086         return flow_dv_convert_modify_action(&item,
1087                                              reg_src, &reg_dst, res,
1088                                              MLX5_MODIFICATION_TYPE_COPY,
1089                                              error);
1090 }
1091
1092 /**
1093  * Convert MARK action to DV specification. This routine is used
1094  * in extensive metadata only and requires metadata register to be
1095  * handled. In legacy mode hardware tag resource is engaged.
1096  *
1097  * @param[in] dev
1098  *   Pointer to the rte_eth_dev structure.
1099  * @param[in] conf
1100  *   Pointer to MARK action specification.
1101  * @param[in,out] resource
1102  *   Pointer to the modify-header resource.
1103  * @param[out] error
1104  *   Pointer to the error structure.
1105  *
1106  * @return
1107  *   0 on success, a negative errno value otherwise and rte_errno is set.
1108  */
1109 static int
1110 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1111                             const struct rte_flow_action_mark *conf,
1112                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1113                             struct rte_flow_error *error)
1114 {
1115         struct mlx5_priv *priv = dev->data->dev_private;
1116         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1117                                            priv->sh->dv_mark_mask);
1118         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1119         struct rte_flow_item item = {
1120                 .spec = &data,
1121                 .mask = &mask,
1122         };
1123         struct field_modify_info reg_c_x[] = {
1124                 [1] = {0, 0, 0},
1125         };
1126         int reg;
1127
1128         if (!mask)
1129                 return rte_flow_error_set(error, EINVAL,
1130                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1131                                           NULL, "zero mark action mask");
1132         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1133         if (reg < 0)
1134                 return reg;
1135         MLX5_ASSERT(reg > 0);
1136         if (reg == REG_C_0) {
1137                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1138                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1139
1140                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1141                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1142                 mask = rte_cpu_to_be_32(mask << shl_c0);
1143         }
1144         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1145         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1146                                              MLX5_MODIFICATION_TYPE_SET, error);
1147 }
1148
1149 /**
1150  * Get metadata register index for specified steering domain.
1151  *
1152  * @param[in] dev
1153  *   Pointer to the rte_eth_dev structure.
1154  * @param[in] attr
1155  *   Attributes of flow to determine steering domain.
1156  * @param[out] error
1157  *   Pointer to the error structure.
1158  *
1159  * @return
1160  *   positive index on success, a negative errno value otherwise
1161  *   and rte_errno is set.
1162  */
1163 static enum modify_reg
1164 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1165                          const struct rte_flow_attr *attr,
1166                          struct rte_flow_error *error)
1167 {
1168         int reg =
1169                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1170                                           MLX5_METADATA_FDB :
1171                                             attr->egress ?
1172                                             MLX5_METADATA_TX :
1173                                             MLX5_METADATA_RX, 0, error);
1174         if (reg < 0)
1175                 return rte_flow_error_set(error,
1176                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1177                                           NULL, "unavailable "
1178                                           "metadata register");
1179         return reg;
1180 }
1181
1182 /**
1183  * Convert SET_META action to DV specification.
1184  *
1185  * @param[in] dev
1186  *   Pointer to the rte_eth_dev structure.
1187  * @param[in,out] resource
1188  *   Pointer to the modify-header resource.
1189  * @param[in] attr
1190  *   Attributes of flow that includes this item.
1191  * @param[in] conf
1192  *   Pointer to action specification.
1193  * @param[out] error
1194  *   Pointer to the error structure.
1195  *
1196  * @return
1197  *   0 on success, a negative errno value otherwise and rte_errno is set.
1198  */
1199 static int
1200 flow_dv_convert_action_set_meta
1201                         (struct rte_eth_dev *dev,
1202                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1203                          const struct rte_flow_attr *attr,
1204                          const struct rte_flow_action_set_meta *conf,
1205                          struct rte_flow_error *error)
1206 {
1207         uint32_t data = conf->data;
1208         uint32_t mask = conf->mask;
1209         struct rte_flow_item item = {
1210                 .spec = &data,
1211                 .mask = &mask,
1212         };
1213         struct field_modify_info reg_c_x[] = {
1214                 [1] = {0, 0, 0},
1215         };
1216         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1217
1218         if (reg < 0)
1219                 return reg;
1220         MLX5_ASSERT(reg != REG_NON);
1221         /*
1222          * In datapath code there is no endianness
1223          * coversions for perfromance reasons, all
1224          * pattern conversions are done in rte_flow.
1225          */
1226         if (reg == REG_C_0) {
1227                 struct mlx5_priv *priv = dev->data->dev_private;
1228                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1229                 uint32_t shl_c0;
1230
1231                 MLX5_ASSERT(msk_c0);
1232 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1233                 shl_c0 = rte_bsf32(msk_c0);
1234 #else
1235                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1236 #endif
1237                 mask <<= shl_c0;
1238                 data <<= shl_c0;
1239                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1240         }
1241         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1242         /* The routine expects parameters in memory as big-endian ones. */
1243         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1244                                              MLX5_MODIFICATION_TYPE_SET, error);
1245 }
1246
1247 /**
1248  * Convert modify-header set IPv4 DSCP action to DV specification.
1249  *
1250  * @param[in,out] resource
1251  *   Pointer to the modify-header resource.
1252  * @param[in] action
1253  *   Pointer to action specification.
1254  * @param[out] error
1255  *   Pointer to the error structure.
1256  *
1257  * @return
1258  *   0 on success, a negative errno value otherwise and rte_errno is set.
1259  */
1260 static int
1261 flow_dv_convert_action_modify_ipv4_dscp
1262                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1263                          const struct rte_flow_action *action,
1264                          struct rte_flow_error *error)
1265 {
1266         const struct rte_flow_action_set_dscp *conf =
1267                 (const struct rte_flow_action_set_dscp *)(action->conf);
1268         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1269         struct rte_flow_item_ipv4 ipv4;
1270         struct rte_flow_item_ipv4 ipv4_mask;
1271
1272         memset(&ipv4, 0, sizeof(ipv4));
1273         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1274         ipv4.hdr.type_of_service = conf->dscp;
1275         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1276         item.spec = &ipv4;
1277         item.mask = &ipv4_mask;
1278         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1279                                              MLX5_MODIFICATION_TYPE_SET, error);
1280 }
1281
1282 /**
1283  * Convert modify-header set IPv6 DSCP action to DV specification.
1284  *
1285  * @param[in,out] resource
1286  *   Pointer to the modify-header resource.
1287  * @param[in] action
1288  *   Pointer to action specification.
1289  * @param[out] error
1290  *   Pointer to the error structure.
1291  *
1292  * @return
1293  *   0 on success, a negative errno value otherwise and rte_errno is set.
1294  */
1295 static int
1296 flow_dv_convert_action_modify_ipv6_dscp
1297                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1298                          const struct rte_flow_action *action,
1299                          struct rte_flow_error *error)
1300 {
1301         const struct rte_flow_action_set_dscp *conf =
1302                 (const struct rte_flow_action_set_dscp *)(action->conf);
1303         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1304         struct rte_flow_item_ipv6 ipv6;
1305         struct rte_flow_item_ipv6 ipv6_mask;
1306
1307         memset(&ipv6, 0, sizeof(ipv6));
1308         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1309         /*
1310          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1311          * rdma-core only accept the DSCP bits byte aligned start from
1312          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1313          * bits in IPv6 case as rdma-core requires byte aligned value.
1314          */
1315         ipv6.hdr.vtc_flow = conf->dscp;
1316         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1317         item.spec = &ipv6;
1318         item.mask = &ipv6_mask;
1319         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1320                                              MLX5_MODIFICATION_TYPE_SET, error);
1321 }
1322
1323 /**
1324  * Validate MARK item.
1325  *
1326  * @param[in] dev
1327  *   Pointer to the rte_eth_dev structure.
1328  * @param[in] item
1329  *   Item specification.
1330  * @param[in] attr
1331  *   Attributes of flow that includes this item.
1332  * @param[out] error
1333  *   Pointer to error structure.
1334  *
1335  * @return
1336  *   0 on success, a negative errno value otherwise and rte_errno is set.
1337  */
1338 static int
1339 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1340                            const struct rte_flow_item *item,
1341                            const struct rte_flow_attr *attr __rte_unused,
1342                            struct rte_flow_error *error)
1343 {
1344         struct mlx5_priv *priv = dev->data->dev_private;
1345         struct mlx5_dev_config *config = &priv->config;
1346         const struct rte_flow_item_mark *spec = item->spec;
1347         const struct rte_flow_item_mark *mask = item->mask;
1348         const struct rte_flow_item_mark nic_mask = {
1349                 .id = priv->sh->dv_mark_mask,
1350         };
1351         int ret;
1352
1353         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1354                 return rte_flow_error_set(error, ENOTSUP,
1355                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1356                                           "extended metadata feature"
1357                                           " isn't enabled");
1358         if (!mlx5_flow_ext_mreg_supported(dev))
1359                 return rte_flow_error_set(error, ENOTSUP,
1360                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1361                                           "extended metadata register"
1362                                           " isn't supported");
1363         if (!nic_mask.id)
1364                 return rte_flow_error_set(error, ENOTSUP,
1365                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1366                                           "extended metadata register"
1367                                           " isn't available");
1368         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1369         if (ret < 0)
1370                 return ret;
1371         if (!spec)
1372                 return rte_flow_error_set(error, EINVAL,
1373                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1374                                           item->spec,
1375                                           "data cannot be empty");
1376         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1377                 return rte_flow_error_set(error, EINVAL,
1378                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1379                                           &spec->id,
1380                                           "mark id exceeds the limit");
1381         if (!mask)
1382                 mask = &nic_mask;
1383         if (!mask->id)
1384                 return rte_flow_error_set(error, EINVAL,
1385                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1386                                         "mask cannot be zero");
1387
1388         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1389                                         (const uint8_t *)&nic_mask,
1390                                         sizeof(struct rte_flow_item_mark),
1391                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1392         if (ret < 0)
1393                 return ret;
1394         return 0;
1395 }
1396
1397 /**
1398  * Validate META item.
1399  *
1400  * @param[in] dev
1401  *   Pointer to the rte_eth_dev structure.
1402  * @param[in] item
1403  *   Item specification.
1404  * @param[in] attr
1405  *   Attributes of flow that includes this item.
1406  * @param[out] error
1407  *   Pointer to error structure.
1408  *
1409  * @return
1410  *   0 on success, a negative errno value otherwise and rte_errno is set.
1411  */
1412 static int
1413 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1414                            const struct rte_flow_item *item,
1415                            const struct rte_flow_attr *attr,
1416                            struct rte_flow_error *error)
1417 {
1418         struct mlx5_priv *priv = dev->data->dev_private;
1419         struct mlx5_dev_config *config = &priv->config;
1420         const struct rte_flow_item_meta *spec = item->spec;
1421         const struct rte_flow_item_meta *mask = item->mask;
1422         struct rte_flow_item_meta nic_mask = {
1423                 .data = UINT32_MAX
1424         };
1425         int reg;
1426         int ret;
1427
1428         if (!spec)
1429                 return rte_flow_error_set(error, EINVAL,
1430                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1431                                           item->spec,
1432                                           "data cannot be empty");
1433         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1434                 if (!mlx5_flow_ext_mreg_supported(dev))
1435                         return rte_flow_error_set(error, ENOTSUP,
1436                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1437                                           "extended metadata register"
1438                                           " isn't supported");
1439                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1440                 if (reg < 0)
1441                         return reg;
1442                 if (reg == REG_NON)
1443                         return rte_flow_error_set(error, ENOTSUP,
1444                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1445                                         "unavalable extended metadata register");
1446                 if (reg == REG_B)
1447                         return rte_flow_error_set(error, ENOTSUP,
1448                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1449                                           "match on reg_b "
1450                                           "isn't supported");
1451                 if (reg != REG_A)
1452                         nic_mask.data = priv->sh->dv_meta_mask;
1453         } else if (attr->transfer) {
1454                 return rte_flow_error_set(error, ENOTSUP,
1455                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1456                                         "extended metadata feature "
1457                                         "should be enabled when "
1458                                         "meta item is requested "
1459                                         "with e-switch mode ");
1460         }
1461         if (!mask)
1462                 mask = &rte_flow_item_meta_mask;
1463         if (!mask->data)
1464                 return rte_flow_error_set(error, EINVAL,
1465                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1466                                         "mask cannot be zero");
1467
1468         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1469                                         (const uint8_t *)&nic_mask,
1470                                         sizeof(struct rte_flow_item_meta),
1471                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1472         return ret;
1473 }
1474
1475 /**
1476  * Validate TAG item.
1477  *
1478  * @param[in] dev
1479  *   Pointer to the rte_eth_dev structure.
1480  * @param[in] item
1481  *   Item specification.
1482  * @param[in] attr
1483  *   Attributes of flow that includes this item.
1484  * @param[out] error
1485  *   Pointer to error structure.
1486  *
1487  * @return
1488  *   0 on success, a negative errno value otherwise and rte_errno is set.
1489  */
1490 static int
1491 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1492                           const struct rte_flow_item *item,
1493                           const struct rte_flow_attr *attr __rte_unused,
1494                           struct rte_flow_error *error)
1495 {
1496         const struct rte_flow_item_tag *spec = item->spec;
1497         const struct rte_flow_item_tag *mask = item->mask;
1498         const struct rte_flow_item_tag nic_mask = {
1499                 .data = RTE_BE32(UINT32_MAX),
1500                 .index = 0xff,
1501         };
1502         int ret;
1503
1504         if (!mlx5_flow_ext_mreg_supported(dev))
1505                 return rte_flow_error_set(error, ENOTSUP,
1506                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1507                                           "extensive metadata register"
1508                                           " isn't supported");
1509         if (!spec)
1510                 return rte_flow_error_set(error, EINVAL,
1511                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1512                                           item->spec,
1513                                           "data cannot be empty");
1514         if (!mask)
1515                 mask = &rte_flow_item_tag_mask;
1516         if (!mask->data)
1517                 return rte_flow_error_set(error, EINVAL,
1518                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1519                                         "mask cannot be zero");
1520
1521         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1522                                         (const uint8_t *)&nic_mask,
1523                                         sizeof(struct rte_flow_item_tag),
1524                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1525         if (ret < 0)
1526                 return ret;
1527         if (mask->index != 0xff)
1528                 return rte_flow_error_set(error, EINVAL,
1529                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1530                                           "partial mask for tag index"
1531                                           " is not supported");
1532         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1533         if (ret < 0)
1534                 return ret;
1535         MLX5_ASSERT(ret != REG_NON);
1536         return 0;
1537 }
1538
1539 /**
1540  * Validate vport item.
1541  *
1542  * @param[in] dev
1543  *   Pointer to the rte_eth_dev structure.
1544  * @param[in] item
1545  *   Item specification.
1546  * @param[in] attr
1547  *   Attributes of flow that includes this item.
1548  * @param[in] item_flags
1549  *   Bit-fields that holds the items detected until now.
1550  * @param[out] error
1551  *   Pointer to error structure.
1552  *
1553  * @return
1554  *   0 on success, a negative errno value otherwise and rte_errno is set.
1555  */
1556 static int
1557 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1558                               const struct rte_flow_item *item,
1559                               const struct rte_flow_attr *attr,
1560                               uint64_t item_flags,
1561                               struct rte_flow_error *error)
1562 {
1563         const struct rte_flow_item_port_id *spec = item->spec;
1564         const struct rte_flow_item_port_id *mask = item->mask;
1565         const struct rte_flow_item_port_id switch_mask = {
1566                         .id = 0xffffffff,
1567         };
1568         struct mlx5_priv *esw_priv;
1569         struct mlx5_priv *dev_priv;
1570         int ret;
1571
1572         if (!attr->transfer)
1573                 return rte_flow_error_set(error, EINVAL,
1574                                           RTE_FLOW_ERROR_TYPE_ITEM,
1575                                           NULL,
1576                                           "match on port id is valid only"
1577                                           " when transfer flag is enabled");
1578         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1579                 return rte_flow_error_set(error, ENOTSUP,
1580                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1581                                           "multiple source ports are not"
1582                                           " supported");
1583         if (!mask)
1584                 mask = &switch_mask;
1585         if (mask->id != 0xffffffff)
1586                 return rte_flow_error_set(error, ENOTSUP,
1587                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1588                                            mask,
1589                                            "no support for partial mask on"
1590                                            " \"id\" field");
1591         ret = mlx5_flow_item_acceptable
1592                                 (item, (const uint8_t *)mask,
1593                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1594                                  sizeof(struct rte_flow_item_port_id),
1595                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1596         if (ret)
1597                 return ret;
1598         if (!spec)
1599                 return 0;
1600         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1601         if (!esw_priv)
1602                 return rte_flow_error_set(error, rte_errno,
1603                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1604                                           "failed to obtain E-Switch info for"
1605                                           " port");
1606         dev_priv = mlx5_dev_to_eswitch_info(dev);
1607         if (!dev_priv)
1608                 return rte_flow_error_set(error, rte_errno,
1609                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1610                                           NULL,
1611                                           "failed to obtain E-Switch info");
1612         if (esw_priv->domain_id != dev_priv->domain_id)
1613                 return rte_flow_error_set(error, EINVAL,
1614                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1615                                           "cannot match on a port from a"
1616                                           " different E-Switch");
1617         return 0;
1618 }
1619
1620 /**
1621  * Validate VLAN item.
1622  *
1623  * @param[in] item
1624  *   Item specification.
1625  * @param[in] item_flags
1626  *   Bit-fields that holds the items detected until now.
1627  * @param[in] dev
1628  *   Ethernet device flow is being created on.
1629  * @param[out] error
1630  *   Pointer to error structure.
1631  *
1632  * @return
1633  *   0 on success, a negative errno value otherwise and rte_errno is set.
1634  */
1635 static int
1636 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1637                            uint64_t item_flags,
1638                            struct rte_eth_dev *dev,
1639                            struct rte_flow_error *error)
1640 {
1641         const struct rte_flow_item_vlan *mask = item->mask;
1642         const struct rte_flow_item_vlan nic_mask = {
1643                 .tci = RTE_BE16(UINT16_MAX),
1644                 .inner_type = RTE_BE16(UINT16_MAX),
1645                 .has_more_vlan = 1,
1646         };
1647         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1648         int ret;
1649         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1650                                         MLX5_FLOW_LAYER_INNER_L4) :
1651                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1652                                         MLX5_FLOW_LAYER_OUTER_L4);
1653         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1654                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1655
1656         if (item_flags & vlanm)
1657                 return rte_flow_error_set(error, EINVAL,
1658                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1659                                           "multiple VLAN layers not supported");
1660         else if ((item_flags & l34m) != 0)
1661                 return rte_flow_error_set(error, EINVAL,
1662                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1663                                           "VLAN cannot follow L3/L4 layer");
1664         if (!mask)
1665                 mask = &rte_flow_item_vlan_mask;
1666         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1667                                         (const uint8_t *)&nic_mask,
1668                                         sizeof(struct rte_flow_item_vlan),
1669                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1670         if (ret)
1671                 return ret;
1672         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1673                 struct mlx5_priv *priv = dev->data->dev_private;
1674
1675                 if (priv->vmwa_context) {
1676                         /*
1677                          * Non-NULL context means we have a virtual machine
1678                          * and SR-IOV enabled, we have to create VLAN interface
1679                          * to make hypervisor to setup E-Switch vport
1680                          * context correctly. We avoid creating the multiple
1681                          * VLAN interfaces, so we cannot support VLAN tag mask.
1682                          */
1683                         return rte_flow_error_set(error, EINVAL,
1684                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1685                                                   item,
1686                                                   "VLAN tag mask is not"
1687                                                   " supported in virtual"
1688                                                   " environment");
1689                 }
1690         }
1691         return 0;
1692 }
1693
1694 /*
1695  * GTP flags are contained in 1 byte of the format:
1696  * -------------------------------------------
1697  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1698  * |-----------------------------------------|
1699  * | value | Version | PT | Res | E | S | PN |
1700  * -------------------------------------------
1701  *
1702  * Matching is supported only for GTP flags E, S, PN.
1703  */
1704 #define MLX5_GTP_FLAGS_MASK     0x07
1705
1706 /**
1707  * Validate GTP item.
1708  *
1709  * @param[in] dev
1710  *   Pointer to the rte_eth_dev structure.
1711  * @param[in] item
1712  *   Item specification.
1713  * @param[in] item_flags
1714  *   Bit-fields that holds the items detected until now.
1715  * @param[out] error
1716  *   Pointer to error structure.
1717  *
1718  * @return
1719  *   0 on success, a negative errno value otherwise and rte_errno is set.
1720  */
1721 static int
1722 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1723                           const struct rte_flow_item *item,
1724                           uint64_t item_flags,
1725                           struct rte_flow_error *error)
1726 {
1727         struct mlx5_priv *priv = dev->data->dev_private;
1728         const struct rte_flow_item_gtp *spec = item->spec;
1729         const struct rte_flow_item_gtp *mask = item->mask;
1730         const struct rte_flow_item_gtp nic_mask = {
1731                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1732                 .msg_type = 0xff,
1733                 .teid = RTE_BE32(0xffffffff),
1734         };
1735
1736         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1737                 return rte_flow_error_set(error, ENOTSUP,
1738                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1739                                           "GTP support is not enabled");
1740         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1741                 return rte_flow_error_set(error, ENOTSUP,
1742                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1743                                           "multiple tunnel layers not"
1744                                           " supported");
1745         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1746                 return rte_flow_error_set(error, EINVAL,
1747                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1748                                           "no outer UDP layer found");
1749         if (!mask)
1750                 mask = &rte_flow_item_gtp_mask;
1751         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1752                 return rte_flow_error_set(error, ENOTSUP,
1753                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1754                                           "Match is supported for GTP"
1755                                           " flags only");
1756         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1757                                          (const uint8_t *)&nic_mask,
1758                                          sizeof(struct rte_flow_item_gtp),
1759                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1760 }
1761
1762 /**
1763  * Validate GTP PSC item.
1764  *
1765  * @param[in] item
1766  *   Item specification.
1767  * @param[in] last_item
1768  *   Previous validated item in the pattern items.
1769  * @param[in] gtp_item
1770  *   Previous GTP item specification.
1771  * @param[in] attr
1772  *   Pointer to flow attributes.
1773  * @param[out] error
1774  *   Pointer to error structure.
1775  *
1776  * @return
1777  *   0 on success, a negative errno value otherwise and rte_errno is set.
1778  */
1779 static int
1780 flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
1781                               uint64_t last_item,
1782                               const struct rte_flow_item *gtp_item,
1783                               const struct rte_flow_attr *attr,
1784                               struct rte_flow_error *error)
1785 {
1786         const struct rte_flow_item_gtp *gtp_spec;
1787         const struct rte_flow_item_gtp *gtp_mask;
1788         const struct rte_flow_item_gtp_psc *spec;
1789         const struct rte_flow_item_gtp_psc *mask;
1790         const struct rte_flow_item_gtp_psc nic_mask = {
1791                 .pdu_type = 0xFF,
1792                 .qfi = 0xFF,
1793         };
1794
1795         if (!gtp_item || !(last_item & MLX5_FLOW_LAYER_GTP))
1796                 return rte_flow_error_set
1797                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
1798                          "GTP PSC item must be preceded with GTP item");
1799         gtp_spec = gtp_item->spec;
1800         gtp_mask = gtp_item->mask ? gtp_item->mask : &rte_flow_item_gtp_mask;
1801         /* GTP spec and E flag is requested to match zero. */
1802         if (gtp_spec &&
1803                 (gtp_mask->v_pt_rsv_flags &
1804                 ~gtp_spec->v_pt_rsv_flags & MLX5_GTP_EXT_HEADER_FLAG))
1805                 return rte_flow_error_set
1806                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
1807                          "GTP E flag must be 1 to match GTP PSC");
1808         /* Check the flow is not created in group zero. */
1809         if (!attr->transfer && !attr->group)
1810                 return rte_flow_error_set
1811                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1812                          "GTP PSC is not supported for group 0");
1813         /* GTP spec is here and E flag is requested to match zero. */
1814         if (!item->spec)
1815                 return 0;
1816         spec = item->spec;
1817         mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
1818         if (spec->pdu_type > MLX5_GTP_EXT_MAX_PDU_TYPE)
1819                 return rte_flow_error_set
1820                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
1821                          "PDU type should be smaller than 16");
1822         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1823                                          (const uint8_t *)&nic_mask,
1824                                          sizeof(struct rte_flow_item_gtp_psc),
1825                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1826 }
1827
1828 /**
1829  * Validate IPV4 item.
1830  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1831  * add specific validation of fragment_offset field,
1832  *
1833  * @param[in] item
1834  *   Item specification.
1835  * @param[in] item_flags
1836  *   Bit-fields that holds the items detected until now.
1837  * @param[out] error
1838  *   Pointer to error structure.
1839  *
1840  * @return
1841  *   0 on success, a negative errno value otherwise and rte_errno is set.
1842  */
1843 static int
1844 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1845                            uint64_t item_flags,
1846                            uint64_t last_item,
1847                            uint16_t ether_type,
1848                            struct rte_flow_error *error)
1849 {
1850         int ret;
1851         const struct rte_flow_item_ipv4 *spec = item->spec;
1852         const struct rte_flow_item_ipv4 *last = item->last;
1853         const struct rte_flow_item_ipv4 *mask = item->mask;
1854         rte_be16_t fragment_offset_spec = 0;
1855         rte_be16_t fragment_offset_last = 0;
1856         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1857                 .hdr = {
1858                         .src_addr = RTE_BE32(0xffffffff),
1859                         .dst_addr = RTE_BE32(0xffffffff),
1860                         .type_of_service = 0xff,
1861                         .fragment_offset = RTE_BE16(0xffff),
1862                         .next_proto_id = 0xff,
1863                         .time_to_live = 0xff,
1864                 },
1865         };
1866
1867         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1868                                            ether_type, &nic_ipv4_mask,
1869                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1870         if (ret < 0)
1871                 return ret;
1872         if (spec && mask)
1873                 fragment_offset_spec = spec->hdr.fragment_offset &
1874                                        mask->hdr.fragment_offset;
1875         if (!fragment_offset_spec)
1876                 return 0;
1877         /*
1878          * spec and mask are valid, enforce using full mask to make sure the
1879          * complete value is used correctly.
1880          */
1881         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1882                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1883                 return rte_flow_error_set(error, EINVAL,
1884                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1885                                           item, "must use full mask for"
1886                                           " fragment_offset");
1887         /*
1888          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1889          * indicating this is 1st fragment of fragmented packet.
1890          * This is not yet supported in MLX5, return appropriate error message.
1891          */
1892         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1893                 return rte_flow_error_set(error, ENOTSUP,
1894                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1895                                           "match on first fragment not "
1896                                           "supported");
1897         if (fragment_offset_spec && !last)
1898                 return rte_flow_error_set(error, ENOTSUP,
1899                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1900                                           "specified value not supported");
1901         /* spec and last are valid, validate the specified range. */
1902         fragment_offset_last = last->hdr.fragment_offset &
1903                                mask->hdr.fragment_offset;
1904         /*
1905          * Match on fragment_offset spec 0x2001 and last 0x3fff
1906          * means MF is 1 and frag-offset is > 0.
1907          * This packet is fragment 2nd and onward, excluding last.
1908          * This is not yet supported in MLX5, return appropriate
1909          * error message.
1910          */
1911         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1912             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1913                 return rte_flow_error_set(error, ENOTSUP,
1914                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1915                                           last, "match on following "
1916                                           "fragments not supported");
1917         /*
1918          * Match on fragment_offset spec 0x0001 and last 0x1fff
1919          * means MF is 0 and frag-offset is > 0.
1920          * This packet is last fragment of fragmented packet.
1921          * This is not yet supported in MLX5, return appropriate
1922          * error message.
1923          */
1924         if (fragment_offset_spec == RTE_BE16(1) &&
1925             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1926                 return rte_flow_error_set(error, ENOTSUP,
1927                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1928                                           last, "match on last "
1929                                           "fragment not supported");
1930         /*
1931          * Match on fragment_offset spec 0x0001 and last 0x3fff
1932          * means MF and/or frag-offset is not 0.
1933          * This is a fragmented packet.
1934          * Other range values are invalid and rejected.
1935          */
1936         if (!(fragment_offset_spec == RTE_BE16(1) &&
1937               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1938                 return rte_flow_error_set(error, ENOTSUP,
1939                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1940                                           "specified range not supported");
1941         return 0;
1942 }
1943
1944 /**
1945  * Validate IPV6 fragment extension item.
1946  *
1947  * @param[in] item
1948  *   Item specification.
1949  * @param[in] item_flags
1950  *   Bit-fields that holds the items detected until now.
1951  * @param[out] error
1952  *   Pointer to error structure.
1953  *
1954  * @return
1955  *   0 on success, a negative errno value otherwise and rte_errno is set.
1956  */
1957 static int
1958 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1959                                     uint64_t item_flags,
1960                                     struct rte_flow_error *error)
1961 {
1962         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1963         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1964         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1965         rte_be16_t frag_data_spec = 0;
1966         rte_be16_t frag_data_last = 0;
1967         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1968         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1969                                       MLX5_FLOW_LAYER_OUTER_L4;
1970         int ret = 0;
1971         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1972                 .hdr = {
1973                         .next_header = 0xff,
1974                         .frag_data = RTE_BE16(0xffff),
1975                 },
1976         };
1977
1978         if (item_flags & l4m)
1979                 return rte_flow_error_set(error, EINVAL,
1980                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1981                                           "ipv6 fragment extension item cannot "
1982                                           "follow L4 item.");
1983         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1984             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1985                 return rte_flow_error_set(error, EINVAL,
1986                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1987                                           "ipv6 fragment extension item must "
1988                                           "follow ipv6 item");
1989         if (spec && mask)
1990                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1991         if (!frag_data_spec)
1992                 return 0;
1993         /*
1994          * spec and mask are valid, enforce using full mask to make sure the
1995          * complete value is used correctly.
1996          */
1997         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1998                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1999                 return rte_flow_error_set(error, EINVAL,
2000                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
2001                                           item, "must use full mask for"
2002                                           " frag_data");
2003         /*
2004          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
2005          * This is 1st fragment of fragmented packet.
2006          */
2007         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
2008                 return rte_flow_error_set(error, ENOTSUP,
2009                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2010                                           "match on first fragment not "
2011                                           "supported");
2012         if (frag_data_spec && !last)
2013                 return rte_flow_error_set(error, EINVAL,
2014                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2015                                           "specified value not supported");
2016         ret = mlx5_flow_item_acceptable
2017                                 (item, (const uint8_t *)mask,
2018                                  (const uint8_t *)&nic_mask,
2019                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
2020                                  MLX5_ITEM_RANGE_ACCEPTED, error);
2021         if (ret)
2022                 return ret;
2023         /* spec and last are valid, validate the specified range. */
2024         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
2025         /*
2026          * Match on frag_data spec 0x0009 and last 0xfff9
2027          * means M is 1 and frag-offset is > 0.
2028          * This packet is fragment 2nd and onward, excluding last.
2029          * This is not yet supported in MLX5, return appropriate
2030          * error message.
2031          */
2032         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
2033                                        RTE_IPV6_EHDR_MF_MASK) &&
2034             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
2035                 return rte_flow_error_set(error, ENOTSUP,
2036                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2037                                           last, "match on following "
2038                                           "fragments not supported");
2039         /*
2040          * Match on frag_data spec 0x0008 and last 0xfff8
2041          * means M is 0 and frag-offset is > 0.
2042          * This packet is last fragment of fragmented packet.
2043          * This is not yet supported in MLX5, return appropriate
2044          * error message.
2045          */
2046         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
2047             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
2048                 return rte_flow_error_set(error, ENOTSUP,
2049                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
2050                                           last, "match on last "
2051                                           "fragment not supported");
2052         /* Other range values are invalid and rejected. */
2053         return rte_flow_error_set(error, EINVAL,
2054                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
2055                                   "specified range not supported");
2056 }
2057
2058 /**
2059  * Validate the pop VLAN action.
2060  *
2061  * @param[in] dev
2062  *   Pointer to the rte_eth_dev structure.
2063  * @param[in] action_flags
2064  *   Holds the actions detected until now.
2065  * @param[in] action
2066  *   Pointer to the pop vlan action.
2067  * @param[in] item_flags
2068  *   The items found in this flow rule.
2069  * @param[in] attr
2070  *   Pointer to flow attributes.
2071  * @param[out] error
2072  *   Pointer to error structure.
2073  *
2074  * @return
2075  *   0 on success, a negative errno value otherwise and rte_errno is set.
2076  */
2077 static int
2078 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2079                                  uint64_t action_flags,
2080                                  const struct rte_flow_action *action,
2081                                  uint64_t item_flags,
2082                                  const struct rte_flow_attr *attr,
2083                                  struct rte_flow_error *error)
2084 {
2085         const struct mlx5_priv *priv = dev->data->dev_private;
2086
2087         (void)action;
2088         (void)attr;
2089         if (!priv->sh->pop_vlan_action)
2090                 return rte_flow_error_set(error, ENOTSUP,
2091                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2092                                           NULL,
2093                                           "pop vlan action is not supported");
2094         if (attr->egress)
2095                 return rte_flow_error_set(error, ENOTSUP,
2096                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2097                                           NULL,
2098                                           "pop vlan action not supported for "
2099                                           "egress");
2100         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2101                 return rte_flow_error_set(error, ENOTSUP,
2102                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2103                                           "no support for multiple VLAN "
2104                                           "actions");
2105         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2106         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2107             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2108                 return rte_flow_error_set(error, ENOTSUP,
2109                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2110                                           NULL,
2111                                           "cannot pop vlan after decap without "
2112                                           "match on inner vlan in the flow");
2113         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2114         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2115             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2116                 return rte_flow_error_set(error, ENOTSUP,
2117                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2118                                           NULL,
2119                                           "cannot pop vlan without a "
2120                                           "match on (outer) vlan in the flow");
2121         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2122                 return rte_flow_error_set(error, EINVAL,
2123                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2124                                           "wrong action order, port_id should "
2125                                           "be after pop VLAN action");
2126         if (!attr->transfer && priv->representor)
2127                 return rte_flow_error_set(error, ENOTSUP,
2128                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2129                                           "pop vlan action for VF representor "
2130                                           "not supported on NIC table");
2131         return 0;
2132 }
2133
2134 /**
2135  * Get VLAN default info from vlan match info.
2136  *
2137  * @param[in] items
2138  *   the list of item specifications.
2139  * @param[out] vlan
2140  *   pointer VLAN info to fill to.
2141  *
2142  * @return
2143  *   0 on success, a negative errno value otherwise and rte_errno is set.
2144  */
2145 static void
2146 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2147                                   struct rte_vlan_hdr *vlan)
2148 {
2149         const struct rte_flow_item_vlan nic_mask = {
2150                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2151                                 MLX5DV_FLOW_VLAN_VID_MASK),
2152                 .inner_type = RTE_BE16(0xffff),
2153         };
2154
2155         if (items == NULL)
2156                 return;
2157         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2158                 int type = items->type;
2159
2160                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2161                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2162                         break;
2163         }
2164         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2165                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2166                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2167
2168                 /* If VLAN item in pattern doesn't contain data, return here. */
2169                 if (!vlan_v)
2170                         return;
2171                 if (!vlan_m)
2172                         vlan_m = &nic_mask;
2173                 /* Only full match values are accepted */
2174                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2175                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2176                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2177                         vlan->vlan_tci |=
2178                                 rte_be_to_cpu_16(vlan_v->tci &
2179                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2180                 }
2181                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2182                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2183                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2184                         vlan->vlan_tci |=
2185                                 rte_be_to_cpu_16(vlan_v->tci &
2186                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2187                 }
2188                 if (vlan_m->inner_type == nic_mask.inner_type)
2189                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2190                                                            vlan_m->inner_type);
2191         }
2192 }
2193
2194 /**
2195  * Validate the push VLAN action.
2196  *
2197  * @param[in] dev
2198  *   Pointer to the rte_eth_dev structure.
2199  * @param[in] action_flags
2200  *   Holds the actions detected until now.
2201  * @param[in] item_flags
2202  *   The items found in this flow rule.
2203  * @param[in] action
2204  *   Pointer to the action structure.
2205  * @param[in] attr
2206  *   Pointer to flow attributes
2207  * @param[out] error
2208  *   Pointer to error structure.
2209  *
2210  * @return
2211  *   0 on success, a negative errno value otherwise and rte_errno is set.
2212  */
2213 static int
2214 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2215                                   uint64_t action_flags,
2216                                   const struct rte_flow_item_vlan *vlan_m,
2217                                   const struct rte_flow_action *action,
2218                                   const struct rte_flow_attr *attr,
2219                                   struct rte_flow_error *error)
2220 {
2221         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2222         const struct mlx5_priv *priv = dev->data->dev_private;
2223
2224         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2225             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2226                 return rte_flow_error_set(error, EINVAL,
2227                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2228                                           "invalid vlan ethertype");
2229         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2230                 return rte_flow_error_set(error, EINVAL,
2231                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2232                                           "wrong action order, port_id should "
2233                                           "be after push VLAN");
2234         if (!attr->transfer && priv->representor)
2235                 return rte_flow_error_set(error, ENOTSUP,
2236                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2237                                           "push vlan action for VF representor "
2238                                           "not supported on NIC table");
2239         if (vlan_m &&
2240             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2241             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2242                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2243             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2244             !(mlx5_flow_find_action
2245                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2246                 return rte_flow_error_set(error, EINVAL,
2247                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2248                                           "not full match mask on VLAN PCP and "
2249                                           "there is no of_set_vlan_pcp action, "
2250                                           "push VLAN action cannot figure out "
2251                                           "PCP value");
2252         if (vlan_m &&
2253             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2254             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2255                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2256             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2257             !(mlx5_flow_find_action
2258                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2259                 return rte_flow_error_set(error, EINVAL,
2260                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2261                                           "not full match mask on VLAN VID and "
2262                                           "there is no of_set_vlan_vid action, "
2263                                           "push VLAN action cannot figure out "
2264                                           "VID value");
2265         (void)attr;
2266         return 0;
2267 }
2268
2269 /**
2270  * Validate the set VLAN PCP.
2271  *
2272  * @param[in] action_flags
2273  *   Holds the actions detected until now.
2274  * @param[in] actions
2275  *   Pointer to the list of actions remaining in the flow rule.
2276  * @param[out] error
2277  *   Pointer to error structure.
2278  *
2279  * @return
2280  *   0 on success, a negative errno value otherwise and rte_errno is set.
2281  */
2282 static int
2283 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2284                                      const struct rte_flow_action actions[],
2285                                      struct rte_flow_error *error)
2286 {
2287         const struct rte_flow_action *action = actions;
2288         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2289
2290         if (conf->vlan_pcp > 7)
2291                 return rte_flow_error_set(error, EINVAL,
2292                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2293                                           "VLAN PCP value is too big");
2294         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2295                 return rte_flow_error_set(error, ENOTSUP,
2296                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2297                                           "set VLAN PCP action must follow "
2298                                           "the push VLAN action");
2299         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2300                 return rte_flow_error_set(error, ENOTSUP,
2301                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2302                                           "Multiple VLAN PCP modification are "
2303                                           "not supported");
2304         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2305                 return rte_flow_error_set(error, EINVAL,
2306                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2307                                           "wrong action order, port_id should "
2308                                           "be after set VLAN PCP");
2309         return 0;
2310 }
2311
2312 /**
2313  * Validate the set VLAN VID.
2314  *
2315  * @param[in] item_flags
2316  *   Holds the items detected in this rule.
2317  * @param[in] action_flags
2318  *   Holds the actions detected until now.
2319  * @param[in] actions
2320  *   Pointer to the list of actions remaining in the flow rule.
2321  * @param[out] error
2322  *   Pointer to error structure.
2323  *
2324  * @return
2325  *   0 on success, a negative errno value otherwise and rte_errno is set.
2326  */
2327 static int
2328 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2329                                      uint64_t action_flags,
2330                                      const struct rte_flow_action actions[],
2331                                      struct rte_flow_error *error)
2332 {
2333         const struct rte_flow_action *action = actions;
2334         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2335
2336         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2337                 return rte_flow_error_set(error, EINVAL,
2338                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2339                                           "VLAN VID value is too big");
2340         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2341             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2342                 return rte_flow_error_set(error, ENOTSUP,
2343                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2344                                           "set VLAN VID action must follow push"
2345                                           " VLAN action or match on VLAN item");
2346         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2347                 return rte_flow_error_set(error, ENOTSUP,
2348                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2349                                           "Multiple VLAN VID modifications are "
2350                                           "not supported");
2351         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2352                 return rte_flow_error_set(error, EINVAL,
2353                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2354                                           "wrong action order, port_id should "
2355                                           "be after set VLAN VID");
2356         return 0;
2357 }
2358
2359 /*
2360  * Validate the FLAG action.
2361  *
2362  * @param[in] dev
2363  *   Pointer to the rte_eth_dev structure.
2364  * @param[in] action_flags
2365  *   Holds the actions detected until now.
2366  * @param[in] attr
2367  *   Pointer to flow attributes
2368  * @param[out] error
2369  *   Pointer to error structure.
2370  *
2371  * @return
2372  *   0 on success, a negative errno value otherwise and rte_errno is set.
2373  */
2374 static int
2375 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2376                              uint64_t action_flags,
2377                              const struct rte_flow_attr *attr,
2378                              struct rte_flow_error *error)
2379 {
2380         struct mlx5_priv *priv = dev->data->dev_private;
2381         struct mlx5_dev_config *config = &priv->config;
2382         int ret;
2383
2384         /* Fall back if no extended metadata register support. */
2385         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2386                 return mlx5_flow_validate_action_flag(action_flags, attr,
2387                                                       error);
2388         /* Extensive metadata mode requires registers. */
2389         if (!mlx5_flow_ext_mreg_supported(dev))
2390                 return rte_flow_error_set(error, ENOTSUP,
2391                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2392                                           "no metadata registers "
2393                                           "to support flag action");
2394         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2395                 return rte_flow_error_set(error, ENOTSUP,
2396                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2397                                           "extended metadata register"
2398                                           " isn't available");
2399         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2400         if (ret < 0)
2401                 return ret;
2402         MLX5_ASSERT(ret > 0);
2403         if (action_flags & MLX5_FLOW_ACTION_MARK)
2404                 return rte_flow_error_set(error, EINVAL,
2405                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2406                                           "can't mark and flag in same flow");
2407         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2408                 return rte_flow_error_set(error, EINVAL,
2409                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2410                                           "can't have 2 flag"
2411                                           " actions in same flow");
2412         return 0;
2413 }
2414
2415 /**
2416  * Validate MARK action.
2417  *
2418  * @param[in] dev
2419  *   Pointer to the rte_eth_dev structure.
2420  * @param[in] action
2421  *   Pointer to action.
2422  * @param[in] action_flags
2423  *   Holds the actions detected until now.
2424  * @param[in] attr
2425  *   Pointer to flow attributes
2426  * @param[out] error
2427  *   Pointer to error structure.
2428  *
2429  * @return
2430  *   0 on success, a negative errno value otherwise and rte_errno is set.
2431  */
2432 static int
2433 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2434                              const struct rte_flow_action *action,
2435                              uint64_t action_flags,
2436                              const struct rte_flow_attr *attr,
2437                              struct rte_flow_error *error)
2438 {
2439         struct mlx5_priv *priv = dev->data->dev_private;
2440         struct mlx5_dev_config *config = &priv->config;
2441         const struct rte_flow_action_mark *mark = action->conf;
2442         int ret;
2443
2444         /* Fall back if no extended metadata register support. */
2445         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2446                 return mlx5_flow_validate_action_mark(action, action_flags,
2447                                                       attr, error);
2448         /* Extensive metadata mode requires registers. */
2449         if (!mlx5_flow_ext_mreg_supported(dev))
2450                 return rte_flow_error_set(error, ENOTSUP,
2451                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2452                                           "no metadata registers "
2453                                           "to support mark action");
2454         if (!priv->sh->dv_mark_mask)
2455                 return rte_flow_error_set(error, ENOTSUP,
2456                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2457                                           "extended metadata register"
2458                                           " isn't available");
2459         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2460         if (ret < 0)
2461                 return ret;
2462         MLX5_ASSERT(ret > 0);
2463         if (!mark)
2464                 return rte_flow_error_set(error, EINVAL,
2465                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2466                                           "configuration cannot be null");
2467         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2468                 return rte_flow_error_set(error, EINVAL,
2469                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2470                                           &mark->id,
2471                                           "mark id exceeds the limit");
2472         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2473                 return rte_flow_error_set(error, EINVAL,
2474                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2475                                           "can't flag and mark in same flow");
2476         if (action_flags & MLX5_FLOW_ACTION_MARK)
2477                 return rte_flow_error_set(error, EINVAL,
2478                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2479                                           "can't have 2 mark actions in same"
2480                                           " flow");
2481         return 0;
2482 }
2483
2484 /**
2485  * Validate SET_META action.
2486  *
2487  * @param[in] dev
2488  *   Pointer to the rte_eth_dev structure.
2489  * @param[in] action
2490  *   Pointer to the action structure.
2491  * @param[in] action_flags
2492  *   Holds the actions detected until now.
2493  * @param[in] attr
2494  *   Pointer to flow attributes
2495  * @param[out] error
2496  *   Pointer to error structure.
2497  *
2498  * @return
2499  *   0 on success, a negative errno value otherwise and rte_errno is set.
2500  */
2501 static int
2502 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2503                                  const struct rte_flow_action *action,
2504                                  uint64_t action_flags __rte_unused,
2505                                  const struct rte_flow_attr *attr,
2506                                  struct rte_flow_error *error)
2507 {
2508         const struct rte_flow_action_set_meta *conf;
2509         uint32_t nic_mask = UINT32_MAX;
2510         int reg;
2511
2512         if (!mlx5_flow_ext_mreg_supported(dev))
2513                 return rte_flow_error_set(error, ENOTSUP,
2514                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2515                                           "extended metadata register"
2516                                           " isn't supported");
2517         reg = flow_dv_get_metadata_reg(dev, attr, error);
2518         if (reg < 0)
2519                 return reg;
2520         if (reg == REG_NON)
2521                 return rte_flow_error_set(error, ENOTSUP,
2522                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2523                                           "unavalable extended metadata register");
2524         if (reg != REG_A && reg != REG_B) {
2525                 struct mlx5_priv *priv = dev->data->dev_private;
2526
2527                 nic_mask = priv->sh->dv_meta_mask;
2528         }
2529         if (!(action->conf))
2530                 return rte_flow_error_set(error, EINVAL,
2531                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2532                                           "configuration cannot be null");
2533         conf = (const struct rte_flow_action_set_meta *)action->conf;
2534         if (!conf->mask)
2535                 return rte_flow_error_set(error, EINVAL,
2536                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2537                                           "zero mask doesn't have any effect");
2538         if (conf->mask & ~nic_mask)
2539                 return rte_flow_error_set(error, EINVAL,
2540                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2541                                           "meta data must be within reg C0");
2542         return 0;
2543 }
2544
2545 /**
2546  * Validate SET_TAG action.
2547  *
2548  * @param[in] dev
2549  *   Pointer to the rte_eth_dev structure.
2550  * @param[in] action
2551  *   Pointer to the action structure.
2552  * @param[in] action_flags
2553  *   Holds the actions detected until now.
2554  * @param[in] attr
2555  *   Pointer to flow attributes
2556  * @param[out] error
2557  *   Pointer to error structure.
2558  *
2559  * @return
2560  *   0 on success, a negative errno value otherwise and rte_errno is set.
2561  */
2562 static int
2563 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2564                                 const struct rte_flow_action *action,
2565                                 uint64_t action_flags,
2566                                 const struct rte_flow_attr *attr,
2567                                 struct rte_flow_error *error)
2568 {
2569         const struct rte_flow_action_set_tag *conf;
2570         const uint64_t terminal_action_flags =
2571                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2572                 MLX5_FLOW_ACTION_RSS;
2573         int ret;
2574
2575         if (!mlx5_flow_ext_mreg_supported(dev))
2576                 return rte_flow_error_set(error, ENOTSUP,
2577                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2578                                           "extensive metadata register"
2579                                           " isn't supported");
2580         if (!(action->conf))
2581                 return rte_flow_error_set(error, EINVAL,
2582                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2583                                           "configuration cannot be null");
2584         conf = (const struct rte_flow_action_set_tag *)action->conf;
2585         if (!conf->mask)
2586                 return rte_flow_error_set(error, EINVAL,
2587                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2588                                           "zero mask doesn't have any effect");
2589         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2590         if (ret < 0)
2591                 return ret;
2592         if (!attr->transfer && attr->ingress &&
2593             (action_flags & terminal_action_flags))
2594                 return rte_flow_error_set(error, EINVAL,
2595                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2596                                           "set_tag has no effect"
2597                                           " with terminal actions");
2598         return 0;
2599 }
2600
2601 /**
2602  * Validate count action.
2603  *
2604  * @param[in] dev
2605  *   Pointer to rte_eth_dev structure.
2606  * @param[out] error
2607  *   Pointer to error structure.
2608  *
2609  * @return
2610  *   0 on success, a negative errno value otherwise and rte_errno is set.
2611  */
2612 static int
2613 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2614                               struct rte_flow_error *error)
2615 {
2616         struct mlx5_priv *priv = dev->data->dev_private;
2617
2618         if (!priv->config.devx)
2619                 goto notsup_err;
2620 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2621         return 0;
2622 #endif
2623 notsup_err:
2624         return rte_flow_error_set
2625                       (error, ENOTSUP,
2626                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2627                        NULL,
2628                        "count action not supported");
2629 }
2630
2631 /**
2632  * Validate the L2 encap action.
2633  *
2634  * @param[in] dev
2635  *   Pointer to the rte_eth_dev structure.
2636  * @param[in] action_flags
2637  *   Holds the actions detected until now.
2638  * @param[in] action
2639  *   Pointer to the action structure.
2640  * @param[in] attr
2641  *   Pointer to flow attributes.
2642  * @param[out] error
2643  *   Pointer to error structure.
2644  *
2645  * @return
2646  *   0 on success, a negative errno value otherwise and rte_errno is set.
2647  */
2648 static int
2649 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2650                                  uint64_t action_flags,
2651                                  const struct rte_flow_action *action,
2652                                  const struct rte_flow_attr *attr,
2653                                  struct rte_flow_error *error)
2654 {
2655         const struct mlx5_priv *priv = dev->data->dev_private;
2656
2657         if (!(action->conf))
2658                 return rte_flow_error_set(error, EINVAL,
2659                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2660                                           "configuration cannot be null");
2661         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2662                 return rte_flow_error_set(error, EINVAL,
2663                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2664                                           "can only have a single encap action "
2665                                           "in a flow");
2666         if (!attr->transfer && priv->representor)
2667                 return rte_flow_error_set(error, ENOTSUP,
2668                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2669                                           "encap action for VF representor "
2670                                           "not supported on NIC table");
2671         return 0;
2672 }
2673
2674 /**
2675  * Validate a decap action.
2676  *
2677  * @param[in] dev
2678  *   Pointer to the rte_eth_dev structure.
2679  * @param[in] action_flags
2680  *   Holds the actions detected until now.
2681  * @param[in] action
2682  *   Pointer to the action structure.
2683  * @param[in] item_flags
2684  *   Holds the items detected.
2685  * @param[in] attr
2686  *   Pointer to flow attributes
2687  * @param[out] error
2688  *   Pointer to error structure.
2689  *
2690  * @return
2691  *   0 on success, a negative errno value otherwise and rte_errno is set.
2692  */
2693 static int
2694 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2695                               uint64_t action_flags,
2696                               const struct rte_flow_action *action,
2697                               const uint64_t item_flags,
2698                               const struct rte_flow_attr *attr,
2699                               struct rte_flow_error *error)
2700 {
2701         const struct mlx5_priv *priv = dev->data->dev_private;
2702
2703         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2704             !priv->config.decap_en)
2705                 return rte_flow_error_set(error, ENOTSUP,
2706                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2707                                           "decap is not enabled");
2708         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2709                 return rte_flow_error_set(error, ENOTSUP,
2710                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2711                                           action_flags &
2712                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2713                                           "have a single decap action" : "decap "
2714                                           "after encap is not supported");
2715         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2716                 return rte_flow_error_set(error, EINVAL,
2717                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2718                                           "can't have decap action after"
2719                                           " modify action");
2720         if (attr->egress)
2721                 return rte_flow_error_set(error, ENOTSUP,
2722                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2723                                           NULL,
2724                                           "decap action not supported for "
2725                                           "egress");
2726         if (!attr->transfer && priv->representor)
2727                 return rte_flow_error_set(error, ENOTSUP,
2728                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2729                                           "decap action for VF representor "
2730                                           "not supported on NIC table");
2731         if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP &&
2732             !(item_flags & MLX5_FLOW_LAYER_VXLAN))
2733                 return rte_flow_error_set(error, ENOTSUP,
2734                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2735                                 "VXLAN item should be present for VXLAN decap");
2736         return 0;
2737 }
2738
2739 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2740
2741 /**
2742  * Validate the raw encap and decap actions.
2743  *
2744  * @param[in] dev
2745  *   Pointer to the rte_eth_dev structure.
2746  * @param[in] decap
2747  *   Pointer to the decap action.
2748  * @param[in] encap
2749  *   Pointer to the encap action.
2750  * @param[in] attr
2751  *   Pointer to flow attributes
2752  * @param[in/out] action_flags
2753  *   Holds the actions detected until now.
2754  * @param[out] actions_n
2755  *   pointer to the number of actions counter.
2756  * @param[in] action
2757  *   Pointer to the action structure.
2758  * @param[in] item_flags
2759  *   Holds the items detected.
2760  * @param[out] error
2761  *   Pointer to error structure.
2762  *
2763  * @return
2764  *   0 on success, a negative errno value otherwise and rte_errno is set.
2765  */
2766 static int
2767 flow_dv_validate_action_raw_encap_decap
2768         (struct rte_eth_dev *dev,
2769          const struct rte_flow_action_raw_decap *decap,
2770          const struct rte_flow_action_raw_encap *encap,
2771          const struct rte_flow_attr *attr, uint64_t *action_flags,
2772          int *actions_n, const struct rte_flow_action *action,
2773          uint64_t item_flags, struct rte_flow_error *error)
2774 {
2775         const struct mlx5_priv *priv = dev->data->dev_private;
2776         int ret;
2777
2778         if (encap && (!encap->size || !encap->data))
2779                 return rte_flow_error_set(error, EINVAL,
2780                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2781                                           "raw encap data cannot be empty");
2782         if (decap && encap) {
2783                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2784                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2785                         /* L3 encap. */
2786                         decap = NULL;
2787                 else if (encap->size <=
2788                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2789                            decap->size >
2790                            MLX5_ENCAPSULATION_DECISION_SIZE)
2791                         /* L3 decap. */
2792                         encap = NULL;
2793                 else if (encap->size >
2794                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2795                            decap->size >
2796                            MLX5_ENCAPSULATION_DECISION_SIZE)
2797                         /* 2 L2 actions: encap and decap. */
2798                         ;
2799                 else
2800                         return rte_flow_error_set(error,
2801                                 ENOTSUP,
2802                                 RTE_FLOW_ERROR_TYPE_ACTION,
2803                                 NULL, "unsupported too small "
2804                                 "raw decap and too small raw "
2805                                 "encap combination");
2806         }
2807         if (decap) {
2808                 ret = flow_dv_validate_action_decap(dev, *action_flags, action,
2809                                                     item_flags, attr, error);
2810                 if (ret < 0)
2811                         return ret;
2812                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2813                 ++(*actions_n);
2814         }
2815         if (encap) {
2816                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2817                         return rte_flow_error_set(error, ENOTSUP,
2818                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2819                                                   NULL,
2820                                                   "small raw encap size");
2821                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2822                         return rte_flow_error_set(error, EINVAL,
2823                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2824                                                   NULL,
2825                                                   "more than one encap action");
2826                 if (!attr->transfer && priv->representor)
2827                         return rte_flow_error_set
2828                                         (error, ENOTSUP,
2829                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2830                                          "encap action for VF representor "
2831                                          "not supported on NIC table");
2832                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2833                 ++(*actions_n);
2834         }
2835         return 0;
2836 }
2837
2838 /**
2839  * Match encap_decap resource.
2840  *
2841  * @param list
2842  *   Pointer to the hash list.
2843  * @param entry
2844  *   Pointer to exist resource entry object.
2845  * @param key
2846  *   Key of the new entry.
2847  * @param ctx_cb
2848  *   Pointer to new encap_decap resource.
2849  *
2850  * @return
2851  *   0 on matching, none-zero otherwise.
2852  */
2853 int
2854 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2855                              struct mlx5_hlist_entry *entry,
2856                              uint64_t key __rte_unused, void *cb_ctx)
2857 {
2858         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2859         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2860         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2861
2862         cache_resource = container_of(entry,
2863                                       struct mlx5_flow_dv_encap_decap_resource,
2864                                       entry);
2865         if (resource->reformat_type == cache_resource->reformat_type &&
2866             resource->ft_type == cache_resource->ft_type &&
2867             resource->flags == cache_resource->flags &&
2868             resource->size == cache_resource->size &&
2869             !memcmp((const void *)resource->buf,
2870                     (const void *)cache_resource->buf,
2871                     resource->size))
2872                 return 0;
2873         return -1;
2874 }
2875
2876 /**
2877  * Allocate encap_decap resource.
2878  *
2879  * @param list
2880  *   Pointer to the hash list.
2881  * @param entry
2882  *   Pointer to exist resource entry object.
2883  * @param ctx_cb
2884  *   Pointer to new encap_decap resource.
2885  *
2886  * @return
2887  *   0 on matching, none-zero otherwise.
2888  */
2889 struct mlx5_hlist_entry *
2890 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2891                               uint64_t key __rte_unused,
2892                               void *cb_ctx)
2893 {
2894         struct mlx5_dev_ctx_shared *sh = list->ctx;
2895         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2896         struct mlx5dv_dr_domain *domain;
2897         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2898         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2899         uint32_t idx;
2900         int ret;
2901
2902         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2903                 domain = sh->fdb_domain;
2904         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2905                 domain = sh->rx_domain;
2906         else
2907                 domain = sh->tx_domain;
2908         /* Register new encap/decap resource. */
2909         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2910                                        &idx);
2911         if (!cache_resource) {
2912                 rte_flow_error_set(ctx->error, ENOMEM,
2913                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2914                                    "cannot allocate resource memory");
2915                 return NULL;
2916         }
2917         *cache_resource = *resource;
2918         cache_resource->idx = idx;
2919         ret = mlx5_flow_os_create_flow_action_packet_reformat
2920                                         (sh->ctx, domain, cache_resource,
2921                                          &cache_resource->action);
2922         if (ret) {
2923                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2924                 rte_flow_error_set(ctx->error, ENOMEM,
2925                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2926                                    NULL, "cannot create action");
2927                 return NULL;
2928         }
2929
2930         return &cache_resource->entry;
2931 }
2932
2933 /**
2934  * Find existing encap/decap resource or create and register a new one.
2935  *
2936  * @param[in, out] dev
2937  *   Pointer to rte_eth_dev structure.
2938  * @param[in, out] resource
2939  *   Pointer to encap/decap resource.
2940  * @parm[in, out] dev_flow
2941  *   Pointer to the dev_flow.
2942  * @param[out] error
2943  *   pointer to error structure.
2944  *
2945  * @return
2946  *   0 on success otherwise -errno and errno is set.
2947  */
2948 static int
2949 flow_dv_encap_decap_resource_register
2950                         (struct rte_eth_dev *dev,
2951                          struct mlx5_flow_dv_encap_decap_resource *resource,
2952                          struct mlx5_flow *dev_flow,
2953                          struct rte_flow_error *error)
2954 {
2955         struct mlx5_priv *priv = dev->data->dev_private;
2956         struct mlx5_dev_ctx_shared *sh = priv->sh;
2957         struct mlx5_hlist_entry *entry;
2958         union {
2959                 struct {
2960                         uint32_t ft_type:8;
2961                         uint32_t refmt_type:8;
2962                         /*
2963                          * Header reformat actions can be shared between
2964                          * non-root tables. One bit to indicate non-root
2965                          * table or not.
2966                          */
2967                         uint32_t is_root:1;
2968                         uint32_t reserve:15;
2969                 };
2970                 uint32_t v32;
2971         } encap_decap_key = {
2972                 {
2973                         .ft_type = resource->ft_type,
2974                         .refmt_type = resource->reformat_type,
2975                         .is_root = !!dev_flow->dv.group,
2976                         .reserve = 0,
2977                 }
2978         };
2979         struct mlx5_flow_cb_ctx ctx = {
2980                 .error = error,
2981                 .data = resource,
2982         };
2983         uint64_t key64;
2984
2985         resource->flags = dev_flow->dv.group ? 0 : 1;
2986         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
2987                                  sizeof(encap_decap_key.v32), 0);
2988         if (resource->reformat_type !=
2989             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
2990             resource->size)
2991                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
2992         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
2993         if (!entry)
2994                 return -rte_errno;
2995         resource = container_of(entry, typeof(*resource), entry);
2996         dev_flow->dv.encap_decap = resource;
2997         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2998         return 0;
2999 }
3000
3001 /**
3002  * Find existing table jump resource or create and register a new one.
3003  *
3004  * @param[in, out] dev
3005  *   Pointer to rte_eth_dev structure.
3006  * @param[in, out] tbl
3007  *   Pointer to flow table resource.
3008  * @parm[in, out] dev_flow
3009  *   Pointer to the dev_flow.
3010  * @param[out] error
3011  *   pointer to error structure.
3012  *
3013  * @return
3014  *   0 on success otherwise -errno and errno is set.
3015  */
3016 static int
3017 flow_dv_jump_tbl_resource_register
3018                         (struct rte_eth_dev *dev __rte_unused,
3019                          struct mlx5_flow_tbl_resource *tbl,
3020                          struct mlx5_flow *dev_flow,
3021                          struct rte_flow_error *error __rte_unused)
3022 {
3023         struct mlx5_flow_tbl_data_entry *tbl_data =
3024                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
3025
3026         MLX5_ASSERT(tbl);
3027         MLX5_ASSERT(tbl_data->jump.action);
3028         dev_flow->handle->rix_jump = tbl_data->idx;
3029         dev_flow->dv.jump = &tbl_data->jump;
3030         return 0;
3031 }
3032
3033 int
3034 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
3035                          struct mlx5_cache_entry *entry, void *cb_ctx)
3036 {
3037         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3038         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3039         struct mlx5_flow_dv_port_id_action_resource *res =
3040                         container_of(entry, typeof(*res), entry);
3041
3042         return ref->port_id != res->port_id;
3043 }
3044
3045 struct mlx5_cache_entry *
3046 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
3047                           struct mlx5_cache_entry *entry __rte_unused,
3048                           void *cb_ctx)
3049 {
3050         struct mlx5_dev_ctx_shared *sh = list->ctx;
3051         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3052         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
3053         struct mlx5_flow_dv_port_id_action_resource *cache;
3054         uint32_t idx;
3055         int ret;
3056
3057         /* Register new port id action resource. */
3058         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
3059         if (!cache) {
3060                 rte_flow_error_set(ctx->error, ENOMEM,
3061                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3062                                    "cannot allocate port_id action cache memory");
3063                 return NULL;
3064         }
3065         *cache = *ref;
3066         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
3067                                                         ref->port_id,
3068                                                         &cache->action);
3069         if (ret) {
3070                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
3071                 rte_flow_error_set(ctx->error, ENOMEM,
3072                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3073                                    "cannot create action");
3074                 return NULL;
3075         }
3076         return &cache->entry;
3077 }
3078
3079 /**
3080  * Find existing table port ID resource or create and register a new one.
3081  *
3082  * @param[in, out] dev
3083  *   Pointer to rte_eth_dev structure.
3084  * @param[in, out] resource
3085  *   Pointer to port ID action resource.
3086  * @parm[in, out] dev_flow
3087  *   Pointer to the dev_flow.
3088  * @param[out] error
3089  *   pointer to error structure.
3090  *
3091  * @return
3092  *   0 on success otherwise -errno and errno is set.
3093  */
3094 static int
3095 flow_dv_port_id_action_resource_register
3096                         (struct rte_eth_dev *dev,
3097                          struct mlx5_flow_dv_port_id_action_resource *resource,
3098                          struct mlx5_flow *dev_flow,
3099                          struct rte_flow_error *error)
3100 {
3101         struct mlx5_priv *priv = dev->data->dev_private;
3102         struct mlx5_cache_entry *entry;
3103         struct mlx5_flow_dv_port_id_action_resource *cache;
3104         struct mlx5_flow_cb_ctx ctx = {
3105                 .error = error,
3106                 .data = resource,
3107         };
3108
3109         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3110         if (!entry)
3111                 return -rte_errno;
3112         cache = container_of(entry, typeof(*cache), entry);
3113         dev_flow->dv.port_id_action = cache;
3114         dev_flow->handle->rix_port_id_action = cache->idx;
3115         return 0;
3116 }
3117
3118 int
3119 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3120                          struct mlx5_cache_entry *entry, void *cb_ctx)
3121 {
3122         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3123         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3124         struct mlx5_flow_dv_push_vlan_action_resource *res =
3125                         container_of(entry, typeof(*res), entry);
3126
3127         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3128 }
3129
3130 struct mlx5_cache_entry *
3131 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3132                           struct mlx5_cache_entry *entry __rte_unused,
3133                           void *cb_ctx)
3134 {
3135         struct mlx5_dev_ctx_shared *sh = list->ctx;
3136         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3137         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3138         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3139         struct mlx5dv_dr_domain *domain;
3140         uint32_t idx;
3141         int ret;
3142
3143         /* Register new port id action resource. */
3144         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3145         if (!cache) {
3146                 rte_flow_error_set(ctx->error, ENOMEM,
3147                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3148                                    "cannot allocate push_vlan action cache memory");
3149                 return NULL;
3150         }
3151         *cache = *ref;
3152         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3153                 domain = sh->fdb_domain;
3154         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3155                 domain = sh->rx_domain;
3156         else
3157                 domain = sh->tx_domain;
3158         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3159                                                         &cache->action);
3160         if (ret) {
3161                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3162                 rte_flow_error_set(ctx->error, ENOMEM,
3163                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3164                                    "cannot create push vlan action");
3165                 return NULL;
3166         }
3167         return &cache->entry;
3168 }
3169
3170 /**
3171  * Find existing push vlan resource or create and register a new one.
3172  *
3173  * @param [in, out] dev
3174  *   Pointer to rte_eth_dev structure.
3175  * @param[in, out] resource
3176  *   Pointer to port ID action resource.
3177  * @parm[in, out] dev_flow
3178  *   Pointer to the dev_flow.
3179  * @param[out] error
3180  *   pointer to error structure.
3181  *
3182  * @return
3183  *   0 on success otherwise -errno and errno is set.
3184  */
3185 static int
3186 flow_dv_push_vlan_action_resource_register
3187                        (struct rte_eth_dev *dev,
3188                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3189                         struct mlx5_flow *dev_flow,
3190                         struct rte_flow_error *error)
3191 {
3192         struct mlx5_priv *priv = dev->data->dev_private;
3193         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3194         struct mlx5_cache_entry *entry;
3195         struct mlx5_flow_cb_ctx ctx = {
3196                 .error = error,
3197                 .data = resource,
3198         };
3199
3200         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3201         if (!entry)
3202                 return -rte_errno;
3203         cache = container_of(entry, typeof(*cache), entry);
3204
3205         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3206         dev_flow->dv.push_vlan_res = cache;
3207         return 0;
3208 }
3209
3210 /**
3211  * Get the size of specific rte_flow_item_type hdr size
3212  *
3213  * @param[in] item_type
3214  *   Tested rte_flow_item_type.
3215  *
3216  * @return
3217  *   sizeof struct item_type, 0 if void or irrelevant.
3218  */
3219 static size_t
3220 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3221 {
3222         size_t retval;
3223
3224         switch (item_type) {
3225         case RTE_FLOW_ITEM_TYPE_ETH:
3226                 retval = sizeof(struct rte_ether_hdr);
3227                 break;
3228         case RTE_FLOW_ITEM_TYPE_VLAN:
3229                 retval = sizeof(struct rte_vlan_hdr);
3230                 break;
3231         case RTE_FLOW_ITEM_TYPE_IPV4:
3232                 retval = sizeof(struct rte_ipv4_hdr);
3233                 break;
3234         case RTE_FLOW_ITEM_TYPE_IPV6:
3235                 retval = sizeof(struct rte_ipv6_hdr);
3236                 break;
3237         case RTE_FLOW_ITEM_TYPE_UDP:
3238                 retval = sizeof(struct rte_udp_hdr);
3239                 break;
3240         case RTE_FLOW_ITEM_TYPE_TCP:
3241                 retval = sizeof(struct rte_tcp_hdr);
3242                 break;
3243         case RTE_FLOW_ITEM_TYPE_VXLAN:
3244         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3245                 retval = sizeof(struct rte_vxlan_hdr);
3246                 break;
3247         case RTE_FLOW_ITEM_TYPE_GRE:
3248         case RTE_FLOW_ITEM_TYPE_NVGRE:
3249                 retval = sizeof(struct rte_gre_hdr);
3250                 break;
3251         case RTE_FLOW_ITEM_TYPE_MPLS:
3252                 retval = sizeof(struct rte_mpls_hdr);
3253                 break;
3254         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3255         default:
3256                 retval = 0;
3257                 break;
3258         }
3259         return retval;
3260 }
3261
3262 #define MLX5_ENCAP_IPV4_VERSION         0x40
3263 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3264 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3265 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3266 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3267 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3268 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3269
3270 /**
3271  * Convert the encap action data from list of rte_flow_item to raw buffer
3272  *
3273  * @param[in] items
3274  *   Pointer to rte_flow_item objects list.
3275  * @param[out] buf
3276  *   Pointer to the output buffer.
3277  * @param[out] size
3278  *   Pointer to the output buffer size.
3279  * @param[out] error
3280  *   Pointer to the error structure.
3281  *
3282  * @return
3283  *   0 on success, a negative errno value otherwise and rte_errno is set.
3284  */
3285 static int
3286 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3287                            size_t *size, struct rte_flow_error *error)
3288 {
3289         struct rte_ether_hdr *eth = NULL;
3290         struct rte_vlan_hdr *vlan = NULL;
3291         struct rte_ipv4_hdr *ipv4 = NULL;
3292         struct rte_ipv6_hdr *ipv6 = NULL;
3293         struct rte_udp_hdr *udp = NULL;
3294         struct rte_vxlan_hdr *vxlan = NULL;
3295         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3296         struct rte_gre_hdr *gre = NULL;
3297         size_t len;
3298         size_t temp_size = 0;
3299
3300         if (!items)
3301                 return rte_flow_error_set(error, EINVAL,
3302                                           RTE_FLOW_ERROR_TYPE_ACTION,
3303                                           NULL, "invalid empty data");
3304         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3305                 len = flow_dv_get_item_hdr_len(items->type);
3306                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3307                         return rte_flow_error_set(error, EINVAL,
3308                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3309                                                   (void *)items->type,
3310                                                   "items total size is too big"
3311                                                   " for encap action");
3312                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3313                 switch (items->type) {
3314                 case RTE_FLOW_ITEM_TYPE_ETH:
3315                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3316                         break;
3317                 case RTE_FLOW_ITEM_TYPE_VLAN:
3318                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3319                         if (!eth)
3320                                 return rte_flow_error_set(error, EINVAL,
3321                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3322                                                 (void *)items->type,
3323                                                 "eth header not found");
3324                         if (!eth->ether_type)
3325                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3326                         break;
3327                 case RTE_FLOW_ITEM_TYPE_IPV4:
3328                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3329                         if (!vlan && !eth)
3330                                 return rte_flow_error_set(error, EINVAL,
3331                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3332                                                 (void *)items->type,
3333                                                 "neither eth nor vlan"
3334                                                 " header found");
3335                         if (vlan && !vlan->eth_proto)
3336                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3337                         else if (eth && !eth->ether_type)
3338                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3339                         if (!ipv4->version_ihl)
3340                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3341                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3342                         if (!ipv4->time_to_live)
3343                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3344                         break;
3345                 case RTE_FLOW_ITEM_TYPE_IPV6:
3346                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3347                         if (!vlan && !eth)
3348                                 return rte_flow_error_set(error, EINVAL,
3349                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3350                                                 (void *)items->type,
3351                                                 "neither eth nor vlan"
3352                                                 " header found");
3353                         if (vlan && !vlan->eth_proto)
3354                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3355                         else if (eth && !eth->ether_type)
3356                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3357                         if (!ipv6->vtc_flow)
3358                                 ipv6->vtc_flow =
3359                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3360                         if (!ipv6->hop_limits)
3361                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3362                         break;
3363                 case RTE_FLOW_ITEM_TYPE_UDP:
3364                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3365                         if (!ipv4 && !ipv6)
3366                                 return rte_flow_error_set(error, EINVAL,
3367                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3368                                                 (void *)items->type,
3369                                                 "ip header not found");
3370                         if (ipv4 && !ipv4->next_proto_id)
3371                                 ipv4->next_proto_id = IPPROTO_UDP;
3372                         else if (ipv6 && !ipv6->proto)
3373                                 ipv6->proto = IPPROTO_UDP;
3374                         break;
3375                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3376                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3377                         if (!udp)
3378                                 return rte_flow_error_set(error, EINVAL,
3379                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3380                                                 (void *)items->type,
3381                                                 "udp header not found");
3382                         if (!udp->dst_port)
3383                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3384                         if (!vxlan->vx_flags)
3385                                 vxlan->vx_flags =
3386                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3387                         break;
3388                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3389                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3390                         if (!udp)
3391                                 return rte_flow_error_set(error, EINVAL,
3392                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3393                                                 (void *)items->type,
3394                                                 "udp header not found");
3395                         if (!vxlan_gpe->proto)
3396                                 return rte_flow_error_set(error, EINVAL,
3397                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3398                                                 (void *)items->type,
3399                                                 "next protocol not found");
3400                         if (!udp->dst_port)
3401                                 udp->dst_port =
3402                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3403                         if (!vxlan_gpe->vx_flags)
3404                                 vxlan_gpe->vx_flags =
3405                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3406                         break;
3407                 case RTE_FLOW_ITEM_TYPE_GRE:
3408                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3409                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3410                         if (!gre->proto)
3411                                 return rte_flow_error_set(error, EINVAL,
3412                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3413                                                 (void *)items->type,
3414                                                 "next protocol not found");
3415                         if (!ipv4 && !ipv6)
3416                                 return rte_flow_error_set(error, EINVAL,
3417                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3418                                                 (void *)items->type,
3419                                                 "ip header not found");
3420                         if (ipv4 && !ipv4->next_proto_id)
3421                                 ipv4->next_proto_id = IPPROTO_GRE;
3422                         else if (ipv6 && !ipv6->proto)
3423                                 ipv6->proto = IPPROTO_GRE;
3424                         break;
3425                 case RTE_FLOW_ITEM_TYPE_VOID:
3426                         break;
3427                 default:
3428                         return rte_flow_error_set(error, EINVAL,
3429                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3430                                                   (void *)items->type,
3431                                                   "unsupported item type");
3432                         break;
3433                 }
3434                 temp_size += len;
3435         }
3436         *size = temp_size;
3437         return 0;
3438 }
3439
3440 static int
3441 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3442 {
3443         struct rte_ether_hdr *eth = NULL;
3444         struct rte_vlan_hdr *vlan = NULL;
3445         struct rte_ipv6_hdr *ipv6 = NULL;
3446         struct rte_udp_hdr *udp = NULL;
3447         char *next_hdr;
3448         uint16_t proto;
3449
3450         eth = (struct rte_ether_hdr *)data;
3451         next_hdr = (char *)(eth + 1);
3452         proto = RTE_BE16(eth->ether_type);
3453
3454         /* VLAN skipping */
3455         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3456                 vlan = (struct rte_vlan_hdr *)next_hdr;
3457                 proto = RTE_BE16(vlan->eth_proto);
3458                 next_hdr += sizeof(struct rte_vlan_hdr);
3459         }
3460
3461         /* HW calculates IPv4 csum. no need to proceed */
3462         if (proto == RTE_ETHER_TYPE_IPV4)
3463                 return 0;
3464
3465         /* non IPv4/IPv6 header. not supported */
3466         if (proto != RTE_ETHER_TYPE_IPV6) {
3467                 return rte_flow_error_set(error, ENOTSUP,
3468                                           RTE_FLOW_ERROR_TYPE_ACTION,
3469                                           NULL, "Cannot offload non IPv4/IPv6");
3470         }
3471
3472         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3473
3474         /* ignore non UDP */
3475         if (ipv6->proto != IPPROTO_UDP)
3476                 return 0;
3477
3478         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3479         udp->dgram_cksum = 0;
3480
3481         return 0;
3482 }
3483
3484 /**
3485  * Convert L2 encap action to DV specification.
3486  *
3487  * @param[in] dev
3488  *   Pointer to rte_eth_dev structure.
3489  * @param[in] action
3490  *   Pointer to action structure.
3491  * @param[in, out] dev_flow
3492  *   Pointer to the mlx5_flow.
3493  * @param[in] transfer
3494  *   Mark if the flow is E-Switch flow.
3495  * @param[out] error
3496  *   Pointer to the error structure.
3497  *
3498  * @return
3499  *   0 on success, a negative errno value otherwise and rte_errno is set.
3500  */
3501 static int
3502 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3503                                const struct rte_flow_action *action,
3504                                struct mlx5_flow *dev_flow,
3505                                uint8_t transfer,
3506                                struct rte_flow_error *error)
3507 {
3508         const struct rte_flow_item *encap_data;
3509         const struct rte_flow_action_raw_encap *raw_encap_data;
3510         struct mlx5_flow_dv_encap_decap_resource res = {
3511                 .reformat_type =
3512                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3513                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3514                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3515         };
3516
3517         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3518                 raw_encap_data =
3519                         (const struct rte_flow_action_raw_encap *)action->conf;
3520                 res.size = raw_encap_data->size;
3521                 memcpy(res.buf, raw_encap_data->data, res.size);
3522         } else {
3523                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3524                         encap_data =
3525                                 ((const struct rte_flow_action_vxlan_encap *)
3526                                                 action->conf)->definition;
3527                 else
3528                         encap_data =
3529                                 ((const struct rte_flow_action_nvgre_encap *)
3530                                                 action->conf)->definition;
3531                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3532                                                &res.size, error))
3533                         return -rte_errno;
3534         }
3535         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3536                 return -rte_errno;
3537         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3538                 return rte_flow_error_set(error, EINVAL,
3539                                           RTE_FLOW_ERROR_TYPE_ACTION,
3540                                           NULL, "can't create L2 encap action");
3541         return 0;
3542 }
3543
3544 /**
3545  * Convert L2 decap action to DV specification.
3546  *
3547  * @param[in] dev
3548  *   Pointer to rte_eth_dev structure.
3549  * @param[in, out] dev_flow
3550  *   Pointer to the mlx5_flow.
3551  * @param[in] transfer
3552  *   Mark if the flow is E-Switch flow.
3553  * @param[out] error
3554  *   Pointer to the error structure.
3555  *
3556  * @return
3557  *   0 on success, a negative errno value otherwise and rte_errno is set.
3558  */
3559 static int
3560 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3561                                struct mlx5_flow *dev_flow,
3562                                uint8_t transfer,
3563                                struct rte_flow_error *error)
3564 {
3565         struct mlx5_flow_dv_encap_decap_resource res = {
3566                 .size = 0,
3567                 .reformat_type =
3568                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3569                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3570                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3571         };
3572
3573         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3574                 return rte_flow_error_set(error, EINVAL,
3575                                           RTE_FLOW_ERROR_TYPE_ACTION,
3576                                           NULL, "can't create L2 decap action");
3577         return 0;
3578 }
3579
3580 /**
3581  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3582  *
3583  * @param[in] dev
3584  *   Pointer to rte_eth_dev structure.
3585  * @param[in] action
3586  *   Pointer to action structure.
3587  * @param[in, out] dev_flow
3588  *   Pointer to the mlx5_flow.
3589  * @param[in] attr
3590  *   Pointer to the flow attributes.
3591  * @param[out] error
3592  *   Pointer to the error structure.
3593  *
3594  * @return
3595  *   0 on success, a negative errno value otherwise and rte_errno is set.
3596  */
3597 static int
3598 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3599                                 const struct rte_flow_action *action,
3600                                 struct mlx5_flow *dev_flow,
3601                                 const struct rte_flow_attr *attr,
3602                                 struct rte_flow_error *error)
3603 {
3604         const struct rte_flow_action_raw_encap *encap_data;
3605         struct mlx5_flow_dv_encap_decap_resource res;
3606
3607         memset(&res, 0, sizeof(res));
3608         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3609         res.size = encap_data->size;
3610         memcpy(res.buf, encap_data->data, res.size);
3611         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3612                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3613                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3614         if (attr->transfer)
3615                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3616         else
3617                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3618                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3619         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3620                 return rte_flow_error_set(error, EINVAL,
3621                                           RTE_FLOW_ERROR_TYPE_ACTION,
3622                                           NULL, "can't create encap action");
3623         return 0;
3624 }
3625
3626 /**
3627  * Create action push VLAN.
3628  *
3629  * @param[in] dev
3630  *   Pointer to rte_eth_dev structure.
3631  * @param[in] attr
3632  *   Pointer to the flow attributes.
3633  * @param[in] vlan
3634  *   Pointer to the vlan to push to the Ethernet header.
3635  * @param[in, out] dev_flow
3636  *   Pointer to the mlx5_flow.
3637  * @param[out] error
3638  *   Pointer to the error structure.
3639  *
3640  * @return
3641  *   0 on success, a negative errno value otherwise and rte_errno is set.
3642  */
3643 static int
3644 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3645                                 const struct rte_flow_attr *attr,
3646                                 const struct rte_vlan_hdr *vlan,
3647                                 struct mlx5_flow *dev_flow,
3648                                 struct rte_flow_error *error)
3649 {
3650         struct mlx5_flow_dv_push_vlan_action_resource res;
3651
3652         memset(&res, 0, sizeof(res));
3653         res.vlan_tag =
3654                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3655                                  vlan->vlan_tci);
3656         if (attr->transfer)
3657                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3658         else
3659                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3660                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3661         return flow_dv_push_vlan_action_resource_register
3662                                             (dev, &res, dev_flow, error);
3663 }
3664
3665 static int fdb_mirror;
3666
3667 /**
3668  * Validate the modify-header actions.
3669  *
3670  * @param[in] action_flags
3671  *   Holds the actions detected until now.
3672  * @param[in] action
3673  *   Pointer to the modify action.
3674  * @param[out] error
3675  *   Pointer to error structure.
3676  *
3677  * @return
3678  *   0 on success, a negative errno value otherwise and rte_errno is set.
3679  */
3680 static int
3681 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3682                                    const struct rte_flow_action *action,
3683                                    struct rte_flow_error *error)
3684 {
3685         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3686                 return rte_flow_error_set(error, EINVAL,
3687                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3688                                           NULL, "action configuration not set");
3689         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3690                 return rte_flow_error_set(error, EINVAL,
3691                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3692                                           "can't have encap action before"
3693                                           " modify action");
3694         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3695                 return rte_flow_error_set(error, EINVAL,
3696                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3697                                           "can't support sample action before"
3698                                           " modify action for E-Switch"
3699                                           " mirroring");
3700         return 0;
3701 }
3702
3703 /**
3704  * Validate the modify-header MAC address actions.
3705  *
3706  * @param[in] action_flags
3707  *   Holds the actions detected until now.
3708  * @param[in] action
3709  *   Pointer to the modify action.
3710  * @param[in] item_flags
3711  *   Holds the items detected.
3712  * @param[out] error
3713  *   Pointer to error structure.
3714  *
3715  * @return
3716  *   0 on success, a negative errno value otherwise and rte_errno is set.
3717  */
3718 static int
3719 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3720                                    const struct rte_flow_action *action,
3721                                    const uint64_t item_flags,
3722                                    struct rte_flow_error *error)
3723 {
3724         int ret = 0;
3725
3726         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3727         if (!ret) {
3728                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3729                         return rte_flow_error_set(error, EINVAL,
3730                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3731                                                   NULL,
3732                                                   "no L2 item in pattern");
3733         }
3734         return ret;
3735 }
3736
3737 /**
3738  * Validate the modify-header IPv4 address actions.
3739  *
3740  * @param[in] action_flags
3741  *   Holds the actions detected until now.
3742  * @param[in] action
3743  *   Pointer to the modify action.
3744  * @param[in] item_flags
3745  *   Holds the items detected.
3746  * @param[out] error
3747  *   Pointer to error structure.
3748  *
3749  * @return
3750  *   0 on success, a negative errno value otherwise and rte_errno is set.
3751  */
3752 static int
3753 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3754                                     const struct rte_flow_action *action,
3755                                     const uint64_t item_flags,
3756                                     struct rte_flow_error *error)
3757 {
3758         int ret = 0;
3759         uint64_t layer;
3760
3761         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3762         if (!ret) {
3763                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3764                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3765                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3766                 if (!(item_flags & layer))
3767                         return rte_flow_error_set(error, EINVAL,
3768                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3769                                                   NULL,
3770                                                   "no ipv4 item in pattern");
3771         }
3772         return ret;
3773 }
3774
3775 /**
3776  * Validate the modify-header IPv6 address actions.
3777  *
3778  * @param[in] action_flags
3779  *   Holds the actions detected until now.
3780  * @param[in] action
3781  *   Pointer to the modify action.
3782  * @param[in] item_flags
3783  *   Holds the items detected.
3784  * @param[out] error
3785  *   Pointer to error structure.
3786  *
3787  * @return
3788  *   0 on success, a negative errno value otherwise and rte_errno is set.
3789  */
3790 static int
3791 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3792                                     const struct rte_flow_action *action,
3793                                     const uint64_t item_flags,
3794                                     struct rte_flow_error *error)
3795 {
3796         int ret = 0;
3797         uint64_t layer;
3798
3799         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3800         if (!ret) {
3801                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3802                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3803                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3804                 if (!(item_flags & layer))
3805                         return rte_flow_error_set(error, EINVAL,
3806                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3807                                                   NULL,
3808                                                   "no ipv6 item in pattern");
3809         }
3810         return ret;
3811 }
3812
3813 /**
3814  * Validate the modify-header TP actions.
3815  *
3816  * @param[in] action_flags
3817  *   Holds the actions detected until now.
3818  * @param[in] action
3819  *   Pointer to the modify action.
3820  * @param[in] item_flags
3821  *   Holds the items detected.
3822  * @param[out] error
3823  *   Pointer to error structure.
3824  *
3825  * @return
3826  *   0 on success, a negative errno value otherwise and rte_errno is set.
3827  */
3828 static int
3829 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3830                                   const struct rte_flow_action *action,
3831                                   const uint64_t item_flags,
3832                                   struct rte_flow_error *error)
3833 {
3834         int ret = 0;
3835         uint64_t layer;
3836
3837         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3838         if (!ret) {
3839                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3840                                  MLX5_FLOW_LAYER_INNER_L4 :
3841                                  MLX5_FLOW_LAYER_OUTER_L4;
3842                 if (!(item_flags & layer))
3843                         return rte_flow_error_set(error, EINVAL,
3844                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3845                                                   NULL, "no transport layer "
3846                                                   "in pattern");
3847         }
3848         return ret;
3849 }
3850
3851 /**
3852  * Validate the modify-header actions of increment/decrement
3853  * TCP Sequence-number.
3854  *
3855  * @param[in] action_flags
3856  *   Holds the actions detected until now.
3857  * @param[in] action
3858  *   Pointer to the modify action.
3859  * @param[in] item_flags
3860  *   Holds the items detected.
3861  * @param[out] error
3862  *   Pointer to error structure.
3863  *
3864  * @return
3865  *   0 on success, a negative errno value otherwise and rte_errno is set.
3866  */
3867 static int
3868 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3869                                        const struct rte_flow_action *action,
3870                                        const uint64_t item_flags,
3871                                        struct rte_flow_error *error)
3872 {
3873         int ret = 0;
3874         uint64_t layer;
3875
3876         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3877         if (!ret) {
3878                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3879                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3880                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3881                 if (!(item_flags & layer))
3882                         return rte_flow_error_set(error, EINVAL,
3883                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3884                                                   NULL, "no TCP item in"
3885                                                   " pattern");
3886                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3887                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3888                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3889                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3890                         return rte_flow_error_set(error, EINVAL,
3891                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3892                                                   NULL,
3893                                                   "cannot decrease and increase"
3894                                                   " TCP sequence number"
3895                                                   " at the same time");
3896         }
3897         return ret;
3898 }
3899
3900 /**
3901  * Validate the modify-header actions of increment/decrement
3902  * TCP Acknowledgment number.
3903  *
3904  * @param[in] action_flags
3905  *   Holds the actions detected until now.
3906  * @param[in] action
3907  *   Pointer to the modify action.
3908  * @param[in] item_flags
3909  *   Holds the items detected.
3910  * @param[out] error
3911  *   Pointer to error structure.
3912  *
3913  * @return
3914  *   0 on success, a negative errno value otherwise and rte_errno is set.
3915  */
3916 static int
3917 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3918                                        const struct rte_flow_action *action,
3919                                        const uint64_t item_flags,
3920                                        struct rte_flow_error *error)
3921 {
3922         int ret = 0;
3923         uint64_t layer;
3924
3925         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3926         if (!ret) {
3927                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3928                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3929                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3930                 if (!(item_flags & layer))
3931                         return rte_flow_error_set(error, EINVAL,
3932                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3933                                                   NULL, "no TCP item in"
3934                                                   " pattern");
3935                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3936                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3937                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3938                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3939                         return rte_flow_error_set(error, EINVAL,
3940                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3941                                                   NULL,
3942                                                   "cannot decrease and increase"
3943                                                   " TCP acknowledgment number"
3944                                                   " at the same time");
3945         }
3946         return ret;
3947 }
3948
3949 /**
3950  * Validate the modify-header TTL actions.
3951  *
3952  * @param[in] action_flags
3953  *   Holds the actions detected until now.
3954  * @param[in] action
3955  *   Pointer to the modify action.
3956  * @param[in] item_flags
3957  *   Holds the items detected.
3958  * @param[out] error
3959  *   Pointer to error structure.
3960  *
3961  * @return
3962  *   0 on success, a negative errno value otherwise and rte_errno is set.
3963  */
3964 static int
3965 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3966                                    const struct rte_flow_action *action,
3967                                    const uint64_t item_flags,
3968                                    struct rte_flow_error *error)
3969 {
3970         int ret = 0;
3971         uint64_t layer;
3972
3973         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3974         if (!ret) {
3975                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3976                                  MLX5_FLOW_LAYER_INNER_L3 :
3977                                  MLX5_FLOW_LAYER_OUTER_L3;
3978                 if (!(item_flags & layer))
3979                         return rte_flow_error_set(error, EINVAL,
3980                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3981                                                   NULL,
3982                                                   "no IP protocol in pattern");
3983         }
3984         return ret;
3985 }
3986
3987 /**
3988  * Validate jump action.
3989  *
3990  * @param[in] action
3991  *   Pointer to the jump action.
3992  * @param[in] action_flags
3993  *   Holds the actions detected until now.
3994  * @param[in] attributes
3995  *   Pointer to flow attributes
3996  * @param[in] external
3997  *   Action belongs to flow rule created by request external to PMD.
3998  * @param[out] error
3999  *   Pointer to error structure.
4000  *
4001  * @return
4002  *   0 on success, a negative errno value otherwise and rte_errno is set.
4003  */
4004 static int
4005 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
4006                              const struct mlx5_flow_tunnel *tunnel,
4007                              const struct rte_flow_action *action,
4008                              uint64_t action_flags,
4009                              const struct rte_flow_attr *attributes,
4010                              bool external, struct rte_flow_error *error)
4011 {
4012         uint32_t target_group, table;
4013         int ret = 0;
4014         struct flow_grp_info grp_info = {
4015                 .external = !!external,
4016                 .transfer = !!attributes->transfer,
4017                 .fdb_def_rule = 1,
4018                 .std_tbl_fix = 0
4019         };
4020         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4021                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4022                 return rte_flow_error_set(error, EINVAL,
4023                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4024                                           "can't have 2 fate actions in"
4025                                           " same flow");
4026         if (action_flags & MLX5_FLOW_ACTION_METER)
4027                 return rte_flow_error_set(error, ENOTSUP,
4028                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4029                                           "jump with meter not support");
4030         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
4031                 return rte_flow_error_set(error, EINVAL,
4032                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4033                                           "E-Switch mirroring can't support"
4034                                           " Sample action and jump action in"
4035                                           " same flow now");
4036         if (!action->conf)
4037                 return rte_flow_error_set(error, EINVAL,
4038                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4039                                           NULL, "action configuration not set");
4040         target_group =
4041                 ((const struct rte_flow_action_jump *)action->conf)->group;
4042         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
4043                                        &grp_info, error);
4044         if (ret)
4045                 return ret;
4046         if (attributes->group == target_group &&
4047             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
4048                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
4049                 return rte_flow_error_set(error, EINVAL,
4050                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4051                                           "target group must be other than"
4052                                           " the current flow group");
4053         return 0;
4054 }
4055
4056 /*
4057  * Validate the port_id action.
4058  *
4059  * @param[in] dev
4060  *   Pointer to rte_eth_dev structure.
4061  * @param[in] action_flags
4062  *   Bit-fields that holds the actions detected until now.
4063  * @param[in] action
4064  *   Port_id RTE action structure.
4065  * @param[in] attr
4066  *   Attributes of flow that includes this action.
4067  * @param[out] error
4068  *   Pointer to error structure.
4069  *
4070  * @return
4071  *   0 on success, a negative errno value otherwise and rte_errno is set.
4072  */
4073 static int
4074 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
4075                                 uint64_t action_flags,
4076                                 const struct rte_flow_action *action,
4077                                 const struct rte_flow_attr *attr,
4078                                 struct rte_flow_error *error)
4079 {
4080         const struct rte_flow_action_port_id *port_id;
4081         struct mlx5_priv *act_priv;
4082         struct mlx5_priv *dev_priv;
4083         uint16_t port;
4084
4085         if (!attr->transfer)
4086                 return rte_flow_error_set(error, ENOTSUP,
4087                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4088                                           NULL,
4089                                           "port id action is valid in transfer"
4090                                           " mode only");
4091         if (!action || !action->conf)
4092                 return rte_flow_error_set(error, ENOTSUP,
4093                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4094                                           NULL,
4095                                           "port id action parameters must be"
4096                                           " specified");
4097         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4098                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4099                 return rte_flow_error_set(error, EINVAL,
4100                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4101                                           "can have only one fate actions in"
4102                                           " a flow");
4103         dev_priv = mlx5_dev_to_eswitch_info(dev);
4104         if (!dev_priv)
4105                 return rte_flow_error_set(error, rte_errno,
4106                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4107                                           NULL,
4108                                           "failed to obtain E-Switch info");
4109         port_id = action->conf;
4110         port = port_id->original ? dev->data->port_id : port_id->id;
4111         act_priv = mlx5_port_to_eswitch_info(port, false);
4112         if (!act_priv)
4113                 return rte_flow_error_set
4114                                 (error, rte_errno,
4115                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4116                                  "failed to obtain E-Switch port id for port");
4117         if (act_priv->domain_id != dev_priv->domain_id)
4118                 return rte_flow_error_set
4119                                 (error, EINVAL,
4120                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4121                                  "port does not belong to"
4122                                  " E-Switch being configured");
4123         return 0;
4124 }
4125
4126 /**
4127  * Get the maximum number of modify header actions.
4128  *
4129  * @param dev
4130  *   Pointer to rte_eth_dev structure.
4131  * @param flags
4132  *   Flags bits to check if root level.
4133  *
4134  * @return
4135  *   Max number of modify header actions device can support.
4136  */
4137 static inline unsigned int
4138 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4139                               uint64_t flags)
4140 {
4141         /*
4142          * There's no way to directly query the max capacity from FW.
4143          * The maximal value on root table should be assumed to be supported.
4144          */
4145         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4146                 return MLX5_MAX_MODIFY_NUM;
4147         else
4148                 return MLX5_ROOT_TBL_MODIFY_NUM;
4149 }
4150
4151 /**
4152  * Validate the meter action.
4153  *
4154  * @param[in] dev
4155  *   Pointer to rte_eth_dev structure.
4156  * @param[in] action_flags
4157  *   Bit-fields that holds the actions detected until now.
4158  * @param[in] action
4159  *   Pointer to the meter action.
4160  * @param[in] attr
4161  *   Attributes of flow that includes this action.
4162  * @param[out] error
4163  *   Pointer to error structure.
4164  *
4165  * @return
4166  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4167  */
4168 static int
4169 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4170                                 uint64_t action_flags,
4171                                 const struct rte_flow_action *action,
4172                                 const struct rte_flow_attr *attr,
4173                                 struct rte_flow_error *error)
4174 {
4175         struct mlx5_priv *priv = dev->data->dev_private;
4176         const struct rte_flow_action_meter *am = action->conf;
4177         struct mlx5_flow_meter *fm;
4178
4179         if (!am)
4180                 return rte_flow_error_set(error, EINVAL,
4181                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4182                                           "meter action conf is NULL");
4183
4184         if (action_flags & MLX5_FLOW_ACTION_METER)
4185                 return rte_flow_error_set(error, ENOTSUP,
4186                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4187                                           "meter chaining not support");
4188         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4189                 return rte_flow_error_set(error, ENOTSUP,
4190                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4191                                           "meter with jump not support");
4192         if (!priv->mtr_en)
4193                 return rte_flow_error_set(error, ENOTSUP,
4194                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4195                                           NULL,
4196                                           "meter action not supported");
4197         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4198         if (!fm)
4199                 return rte_flow_error_set(error, EINVAL,
4200                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4201                                           "Meter not found");
4202         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4203               (!fm->ingress && !attr->ingress && attr->egress) ||
4204               (!fm->egress && !attr->egress && attr->ingress))))
4205                 return rte_flow_error_set(error, EINVAL,
4206                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4207                                           "Flow attributes are either invalid "
4208                                           "or have a conflict with current "
4209                                           "meter attributes");
4210         return 0;
4211 }
4212
4213 /**
4214  * Validate the age action.
4215  *
4216  * @param[in] action_flags
4217  *   Holds the actions detected until now.
4218  * @param[in] action
4219  *   Pointer to the age action.
4220  * @param[in] dev
4221  *   Pointer to the Ethernet device structure.
4222  * @param[out] error
4223  *   Pointer to error structure.
4224  *
4225  * @return
4226  *   0 on success, a negative errno value otherwise and rte_errno is set.
4227  */
4228 static int
4229 flow_dv_validate_action_age(uint64_t action_flags,
4230                             const struct rte_flow_action *action,
4231                             struct rte_eth_dev *dev,
4232                             struct rte_flow_error *error)
4233 {
4234         struct mlx5_priv *priv = dev->data->dev_private;
4235         const struct rte_flow_action_age *age = action->conf;
4236
4237         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4238             !priv->sh->aso_age_mng))
4239                 return rte_flow_error_set(error, ENOTSUP,
4240                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4241                                           NULL,
4242                                           "age action not supported");
4243         if (!(action->conf))
4244                 return rte_flow_error_set(error, EINVAL,
4245                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4246                                           "configuration cannot be null");
4247         if (!(age->timeout))
4248                 return rte_flow_error_set(error, EINVAL,
4249                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4250                                           "invalid timeout value 0");
4251         if (action_flags & MLX5_FLOW_ACTION_AGE)
4252                 return rte_flow_error_set(error, EINVAL,
4253                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4254                                           "duplicate age actions set");
4255         return 0;
4256 }
4257
4258 /**
4259  * Validate the modify-header IPv4 DSCP actions.
4260  *
4261  * @param[in] action_flags
4262  *   Holds the actions detected until now.
4263  * @param[in] action
4264  *   Pointer to the modify action.
4265  * @param[in] item_flags
4266  *   Holds the items detected.
4267  * @param[out] error
4268  *   Pointer to error structure.
4269  *
4270  * @return
4271  *   0 on success, a negative errno value otherwise and rte_errno is set.
4272  */
4273 static int
4274 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4275                                          const struct rte_flow_action *action,
4276                                          const uint64_t item_flags,
4277                                          struct rte_flow_error *error)
4278 {
4279         int ret = 0;
4280
4281         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4282         if (!ret) {
4283                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4284                         return rte_flow_error_set(error, EINVAL,
4285                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4286                                                   NULL,
4287                                                   "no ipv4 item in pattern");
4288         }
4289         return ret;
4290 }
4291
4292 /**
4293  * Validate the modify-header IPv6 DSCP actions.
4294  *
4295  * @param[in] action_flags
4296  *   Holds the actions detected until now.
4297  * @param[in] action
4298  *   Pointer to the modify action.
4299  * @param[in] item_flags
4300  *   Holds the items detected.
4301  * @param[out] error
4302  *   Pointer to error structure.
4303  *
4304  * @return
4305  *   0 on success, a negative errno value otherwise and rte_errno is set.
4306  */
4307 static int
4308 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4309                                          const struct rte_flow_action *action,
4310                                          const uint64_t item_flags,
4311                                          struct rte_flow_error *error)
4312 {
4313         int ret = 0;
4314
4315         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4316         if (!ret) {
4317                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4318                         return rte_flow_error_set(error, EINVAL,
4319                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4320                                                   NULL,
4321                                                   "no ipv6 item in pattern");
4322         }
4323         return ret;
4324 }
4325
4326 /**
4327  * Match modify-header resource.
4328  *
4329  * @param list
4330  *   Pointer to the hash list.
4331  * @param entry
4332  *   Pointer to exist resource entry object.
4333  * @param key
4334  *   Key of the new entry.
4335  * @param ctx
4336  *   Pointer to new modify-header resource.
4337  *
4338  * @return
4339  *   0 on matching, non-zero otherwise.
4340  */
4341 int
4342 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4343                         struct mlx5_hlist_entry *entry,
4344                         uint64_t key __rte_unused, void *cb_ctx)
4345 {
4346         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4347         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4348         struct mlx5_flow_dv_modify_hdr_resource *resource =
4349                         container_of(entry, typeof(*resource), entry);
4350         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4351
4352         key_len += ref->actions_num * sizeof(ref->actions[0]);
4353         return ref->actions_num != resource->actions_num ||
4354                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4355 }
4356
4357 struct mlx5_hlist_entry *
4358 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4359                          void *cb_ctx)
4360 {
4361         struct mlx5_dev_ctx_shared *sh = list->ctx;
4362         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4363         struct mlx5dv_dr_domain *ns;
4364         struct mlx5_flow_dv_modify_hdr_resource *entry;
4365         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4366         int ret;
4367         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4368         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4369
4370         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4371                             SOCKET_ID_ANY);
4372         if (!entry) {
4373                 rte_flow_error_set(ctx->error, ENOMEM,
4374                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4375                                    "cannot allocate resource memory");
4376                 return NULL;
4377         }
4378         rte_memcpy(&entry->ft_type,
4379                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4380                    key_len + data_len);
4381         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4382                 ns = sh->fdb_domain;
4383         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4384                 ns = sh->tx_domain;
4385         else
4386                 ns = sh->rx_domain;
4387         ret = mlx5_flow_os_create_flow_action_modify_header
4388                                         (sh->ctx, ns, entry,
4389                                          data_len, &entry->action);
4390         if (ret) {
4391                 mlx5_free(entry);
4392                 rte_flow_error_set(ctx->error, ENOMEM,
4393                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4394                                    NULL, "cannot create modification action");
4395                 return NULL;
4396         }
4397         return &entry->entry;
4398 }
4399
4400 /**
4401  * Validate the sample action.
4402  *
4403  * @param[in] action_flags
4404  *   Holds the actions detected until now.
4405  * @param[in] action
4406  *   Pointer to the sample action.
4407  * @param[in] dev
4408  *   Pointer to the Ethernet device structure.
4409  * @param[in] attr
4410  *   Attributes of flow that includes this action.
4411  * @param[in] item_flags
4412  *   Holds the items detected.
4413  * @param[in] rss
4414  *   Pointer to the RSS action.
4415  * @param[out] sample_rss
4416  *   Pointer to the RSS action in sample action list.
4417  * @param[out] error
4418  *   Pointer to error structure.
4419  *
4420  * @return
4421  *   0 on success, a negative errno value otherwise and rte_errno is set.
4422  */
4423 static int
4424 flow_dv_validate_action_sample(uint64_t action_flags,
4425                                const struct rte_flow_action *action,
4426                                struct rte_eth_dev *dev,
4427                                const struct rte_flow_attr *attr,
4428                                uint64_t item_flags,
4429                                const struct rte_flow_action_rss *rss,
4430                                const struct rte_flow_action_rss **sample_rss,
4431                                struct rte_flow_error *error)
4432 {
4433         struct mlx5_priv *priv = dev->data->dev_private;
4434         struct mlx5_dev_config *dev_conf = &priv->config;
4435         const struct rte_flow_action_sample *sample = action->conf;
4436         const struct rte_flow_action *act;
4437         uint64_t sub_action_flags = 0;
4438         uint16_t queue_index = 0xFFFF;
4439         int actions_n = 0;
4440         int ret;
4441         fdb_mirror = 0;
4442
4443         if (!sample)
4444                 return rte_flow_error_set(error, EINVAL,
4445                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4446                                           "configuration cannot be NULL");
4447         if (sample->ratio == 0)
4448                 return rte_flow_error_set(error, EINVAL,
4449                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4450                                           "ratio value starts from 1");
4451         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4452                 return rte_flow_error_set(error, ENOTSUP,
4453                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4454                                           NULL,
4455                                           "sample action not supported");
4456         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4457                 return rte_flow_error_set(error, EINVAL,
4458                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4459                                           "Multiple sample actions not "
4460                                           "supported");
4461         if (action_flags & MLX5_FLOW_ACTION_METER)
4462                 return rte_flow_error_set(error, EINVAL,
4463                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4464                                           "wrong action order, meter should "
4465                                           "be after sample action");
4466         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4467                 return rte_flow_error_set(error, EINVAL,
4468                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4469                                           "wrong action order, jump should "
4470                                           "be after sample action");
4471         act = sample->actions;
4472         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4473                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4474                         return rte_flow_error_set(error, ENOTSUP,
4475                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4476                                                   act, "too many actions");
4477                 switch (act->type) {
4478                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4479                         ret = mlx5_flow_validate_action_queue(act,
4480                                                               sub_action_flags,
4481                                                               dev,
4482                                                               attr, error);
4483                         if (ret < 0)
4484                                 return ret;
4485                         queue_index = ((const struct rte_flow_action_queue *)
4486                                                         (act->conf))->index;
4487                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4488                         ++actions_n;
4489                         break;
4490                 case RTE_FLOW_ACTION_TYPE_RSS:
4491                         *sample_rss = act->conf;
4492                         ret = mlx5_flow_validate_action_rss(act,
4493                                                             sub_action_flags,
4494                                                             dev, attr,
4495                                                             item_flags,
4496                                                             error);
4497                         if (ret < 0)
4498                                 return ret;
4499                         if (rss && *sample_rss &&
4500                             ((*sample_rss)->level != rss->level ||
4501                             (*sample_rss)->types != rss->types))
4502                                 return rte_flow_error_set(error, ENOTSUP,
4503                                         RTE_FLOW_ERROR_TYPE_ACTION,
4504                                         NULL,
4505                                         "Can't use the different RSS types "
4506                                         "or level in the same flow");
4507                         if (*sample_rss != NULL && (*sample_rss)->queue_num)
4508                                 queue_index = (*sample_rss)->queue[0];
4509                         sub_action_flags |= MLX5_FLOW_ACTION_RSS;
4510                         ++actions_n;
4511                         break;
4512                 case RTE_FLOW_ACTION_TYPE_MARK:
4513                         ret = flow_dv_validate_action_mark(dev, act,
4514                                                            sub_action_flags,
4515                                                            attr, error);
4516                         if (ret < 0)
4517                                 return ret;
4518                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4519                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4520                                                 MLX5_FLOW_ACTION_MARK_EXT;
4521                         else
4522                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4523                         ++actions_n;
4524                         break;
4525                 case RTE_FLOW_ACTION_TYPE_COUNT:
4526                         ret = flow_dv_validate_action_count(dev, error);
4527                         if (ret < 0)
4528                                 return ret;
4529                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4530                         ++actions_n;
4531                         break;
4532                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4533                         ret = flow_dv_validate_action_port_id(dev,
4534                                                               sub_action_flags,
4535                                                               act,
4536                                                               attr,
4537                                                               error);
4538                         if (ret)
4539                                 return ret;
4540                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4541                         ++actions_n;
4542                         break;
4543                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4544                         ret = flow_dv_validate_action_raw_encap_decap
4545                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4546                                  &actions_n, action, item_flags, error);
4547                         if (ret < 0)
4548                                 return ret;
4549                         ++actions_n;
4550                         break;
4551                 default:
4552                         return rte_flow_error_set(error, ENOTSUP,
4553                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4554                                                   NULL,
4555                                                   "Doesn't support optional "
4556                                                   "action");
4557                 }
4558         }
4559         if (attr->ingress && !attr->transfer) {
4560                 if (!(sub_action_flags & (MLX5_FLOW_ACTION_QUEUE |
4561                                           MLX5_FLOW_ACTION_RSS)))
4562                         return rte_flow_error_set(error, EINVAL,
4563                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4564                                                   NULL,
4565                                                   "Ingress must has a dest "
4566                                                   "QUEUE for Sample");
4567         } else if (attr->egress && !attr->transfer) {
4568                 return rte_flow_error_set(error, ENOTSUP,
4569                                           RTE_FLOW_ERROR_TYPE_ACTION,
4570                                           NULL,
4571                                           "Sample Only support Ingress "
4572                                           "or E-Switch");
4573         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4574                 MLX5_ASSERT(attr->transfer);
4575                 if (sample->ratio > 1)
4576                         return rte_flow_error_set(error, ENOTSUP,
4577                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4578                                                   NULL,
4579                                                   "E-Switch doesn't support "
4580                                                   "any optional action "
4581                                                   "for sampling");
4582                 fdb_mirror = 1;
4583                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4584                         return rte_flow_error_set(error, ENOTSUP,
4585                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4586                                                   NULL,
4587                                                   "unsupported action QUEUE");
4588                 if (sub_action_flags & MLX5_FLOW_ACTION_RSS)
4589                         return rte_flow_error_set(error, ENOTSUP,
4590                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4591                                                   NULL,
4592                                                   "unsupported action QUEUE");
4593                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4594                         return rte_flow_error_set(error, EINVAL,
4595                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4596                                                   NULL,
4597                                                   "E-Switch must has a dest "
4598                                                   "port for mirroring");
4599         }
4600         /* Continue validation for Xcap actions.*/
4601         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4602             (queue_index == 0xFFFF ||
4603              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4604                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4605                      MLX5_FLOW_XCAP_ACTIONS)
4606                         return rte_flow_error_set(error, ENOTSUP,
4607                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4608                                                   NULL, "encap and decap "
4609                                                   "combination aren't "
4610                                                   "supported");
4611                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4612                                                         MLX5_FLOW_ACTION_ENCAP))
4613                         return rte_flow_error_set(error, ENOTSUP,
4614                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4615                                                   NULL, "encap is not supported"
4616                                                   " for ingress traffic");
4617         }
4618         return 0;
4619 }
4620
4621 /**
4622  * Find existing modify-header resource or create and register a new one.
4623  *
4624  * @param dev[in, out]
4625  *   Pointer to rte_eth_dev structure.
4626  * @param[in, out] resource
4627  *   Pointer to modify-header resource.
4628  * @parm[in, out] dev_flow
4629  *   Pointer to the dev_flow.
4630  * @param[out] error
4631  *   pointer to error structure.
4632  *
4633  * @return
4634  *   0 on success otherwise -errno and errno is set.
4635  */
4636 static int
4637 flow_dv_modify_hdr_resource_register
4638                         (struct rte_eth_dev *dev,
4639                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4640                          struct mlx5_flow *dev_flow,
4641                          struct rte_flow_error *error)
4642 {
4643         struct mlx5_priv *priv = dev->data->dev_private;
4644         struct mlx5_dev_ctx_shared *sh = priv->sh;
4645         uint32_t key_len = sizeof(*resource) -
4646                            offsetof(typeof(*resource), ft_type) +
4647                            resource->actions_num * sizeof(resource->actions[0]);
4648         struct mlx5_hlist_entry *entry;
4649         struct mlx5_flow_cb_ctx ctx = {
4650                 .error = error,
4651                 .data = resource,
4652         };
4653         uint64_t key64;
4654
4655         resource->flags = dev_flow->dv.group ? 0 :
4656                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4657         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4658                                     resource->flags))
4659                 return rte_flow_error_set(error, EOVERFLOW,
4660                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4661                                           "too many modify header items");
4662         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4663         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
4664         if (!entry)
4665                 return -rte_errno;
4666         resource = container_of(entry, typeof(*resource), entry);
4667         dev_flow->handle->dvh.modify_hdr = resource;
4668         return 0;
4669 }
4670
4671 /**
4672  * Get DV flow counter by index.
4673  *
4674  * @param[in] dev
4675  *   Pointer to the Ethernet device structure.
4676  * @param[in] idx
4677  *   mlx5 flow counter index in the container.
4678  * @param[out] ppool
4679  *   mlx5 flow counter pool in the container,
4680  *
4681  * @return
4682  *   Pointer to the counter, NULL otherwise.
4683  */
4684 static struct mlx5_flow_counter *
4685 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4686                            uint32_t idx,
4687                            struct mlx5_flow_counter_pool **ppool)
4688 {
4689         struct mlx5_priv *priv = dev->data->dev_private;
4690         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4691         struct mlx5_flow_counter_pool *pool;
4692
4693         /* Decrease to original index and clear shared bit. */
4694         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4695         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4696         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4697         MLX5_ASSERT(pool);
4698         if (ppool)
4699                 *ppool = pool;
4700         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4701 }
4702
4703 /**
4704  * Check the devx counter belongs to the pool.
4705  *
4706  * @param[in] pool
4707  *   Pointer to the counter pool.
4708  * @param[in] id
4709  *   The counter devx ID.
4710  *
4711  * @return
4712  *   True if counter belongs to the pool, false otherwise.
4713  */
4714 static bool
4715 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4716 {
4717         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4718                    MLX5_COUNTERS_PER_POOL;
4719
4720         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4721                 return true;
4722         return false;
4723 }
4724
4725 /**
4726  * Get a pool by devx counter ID.
4727  *
4728  * @param[in] cmng
4729  *   Pointer to the counter management.
4730  * @param[in] id
4731  *   The counter devx ID.
4732  *
4733  * @return
4734  *   The counter pool pointer if exists, NULL otherwise,
4735  */
4736 static struct mlx5_flow_counter_pool *
4737 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4738 {
4739         uint32_t i;
4740         struct mlx5_flow_counter_pool *pool = NULL;
4741
4742         rte_spinlock_lock(&cmng->pool_update_sl);
4743         /* Check last used pool. */
4744         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4745             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4746                 pool = cmng->pools[cmng->last_pool_idx];
4747                 goto out;
4748         }
4749         /* ID out of range means no suitable pool in the container. */
4750         if (id > cmng->max_id || id < cmng->min_id)
4751                 goto out;
4752         /*
4753          * Find the pool from the end of the container, since mostly counter
4754          * ID is sequence increasing, and the last pool should be the needed
4755          * one.
4756          */
4757         i = cmng->n_valid;
4758         while (i--) {
4759                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4760
4761                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4762                         pool = pool_tmp;
4763                         break;
4764                 }
4765         }
4766 out:
4767         rte_spinlock_unlock(&cmng->pool_update_sl);
4768         return pool;
4769 }
4770
4771 /**
4772  * Resize a counter container.
4773  *
4774  * @param[in] dev
4775  *   Pointer to the Ethernet device structure.
4776  *
4777  * @return
4778  *   0 on success, otherwise negative errno value and rte_errno is set.
4779  */
4780 static int
4781 flow_dv_container_resize(struct rte_eth_dev *dev)
4782 {
4783         struct mlx5_priv *priv = dev->data->dev_private;
4784         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4785         void *old_pools = cmng->pools;
4786         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4787         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4788         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4789
4790         if (!pools) {
4791                 rte_errno = ENOMEM;
4792                 return -ENOMEM;
4793         }
4794         if (old_pools)
4795                 memcpy(pools, old_pools, cmng->n *
4796                                        sizeof(struct mlx5_flow_counter_pool *));
4797         cmng->n = resize;
4798         cmng->pools = pools;
4799         if (old_pools)
4800                 mlx5_free(old_pools);
4801         return 0;
4802 }
4803
4804 /**
4805  * Query a devx flow counter.
4806  *
4807  * @param[in] dev
4808  *   Pointer to the Ethernet device structure.
4809  * @param[in] cnt
4810  *   Index to the flow counter.
4811  * @param[out] pkts
4812  *   The statistics value of packets.
4813  * @param[out] bytes
4814  *   The statistics value of bytes.
4815  *
4816  * @return
4817  *   0 on success, otherwise a negative errno value and rte_errno is set.
4818  */
4819 static inline int
4820 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4821                      uint64_t *bytes)
4822 {
4823         struct mlx5_priv *priv = dev->data->dev_private;
4824         struct mlx5_flow_counter_pool *pool = NULL;
4825         struct mlx5_flow_counter *cnt;
4826         int offset;
4827
4828         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4829         MLX5_ASSERT(pool);
4830         if (priv->sh->cmng.counter_fallback)
4831                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4832                                         0, pkts, bytes, 0, NULL, NULL, 0);
4833         rte_spinlock_lock(&pool->sl);
4834         if (!pool->raw) {
4835                 *pkts = 0;
4836                 *bytes = 0;
4837         } else {
4838                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4839                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4840                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4841         }
4842         rte_spinlock_unlock(&pool->sl);
4843         return 0;
4844 }
4845
4846 /**
4847  * Create and initialize a new counter pool.
4848  *
4849  * @param[in] dev
4850  *   Pointer to the Ethernet device structure.
4851  * @param[out] dcs
4852  *   The devX counter handle.
4853  * @param[in] age
4854  *   Whether the pool is for counter that was allocated for aging.
4855  * @param[in/out] cont_cur
4856  *   Pointer to the container pointer, it will be update in pool resize.
4857  *
4858  * @return
4859  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4860  */
4861 static struct mlx5_flow_counter_pool *
4862 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4863                     uint32_t age)
4864 {
4865         struct mlx5_priv *priv = dev->data->dev_private;
4866         struct mlx5_flow_counter_pool *pool;
4867         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4868         bool fallback = priv->sh->cmng.counter_fallback;
4869         uint32_t size = sizeof(*pool);
4870
4871         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4872         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4873         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4874         if (!pool) {
4875                 rte_errno = ENOMEM;
4876                 return NULL;
4877         }
4878         pool->raw = NULL;
4879         pool->is_aged = !!age;
4880         pool->query_gen = 0;
4881         pool->min_dcs = dcs;
4882         rte_spinlock_init(&pool->sl);
4883         rte_spinlock_init(&pool->csl);
4884         TAILQ_INIT(&pool->counters[0]);
4885         TAILQ_INIT(&pool->counters[1]);
4886         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4887         rte_spinlock_lock(&cmng->pool_update_sl);
4888         pool->index = cmng->n_valid;
4889         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4890                 mlx5_free(pool);
4891                 rte_spinlock_unlock(&cmng->pool_update_sl);
4892                 return NULL;
4893         }
4894         cmng->pools[pool->index] = pool;
4895         cmng->n_valid++;
4896         if (unlikely(fallback)) {
4897                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4898
4899                 if (base < cmng->min_id)
4900                         cmng->min_id = base;
4901                 if (base > cmng->max_id)
4902                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4903                 cmng->last_pool_idx = pool->index;
4904         }
4905         rte_spinlock_unlock(&cmng->pool_update_sl);
4906         return pool;
4907 }
4908
4909 /**
4910  * Prepare a new counter and/or a new counter pool.
4911  *
4912  * @param[in] dev
4913  *   Pointer to the Ethernet device structure.
4914  * @param[out] cnt_free
4915  *   Where to put the pointer of a new counter.
4916  * @param[in] age
4917  *   Whether the pool is for counter that was allocated for aging.
4918  *
4919  * @return
4920  *   The counter pool pointer and @p cnt_free is set on success,
4921  *   NULL otherwise and rte_errno is set.
4922  */
4923 static struct mlx5_flow_counter_pool *
4924 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4925                              struct mlx5_flow_counter **cnt_free,
4926                              uint32_t age)
4927 {
4928         struct mlx5_priv *priv = dev->data->dev_private;
4929         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4930         struct mlx5_flow_counter_pool *pool;
4931         struct mlx5_counters tmp_tq;
4932         struct mlx5_devx_obj *dcs = NULL;
4933         struct mlx5_flow_counter *cnt;
4934         enum mlx5_counter_type cnt_type =
4935                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4936         bool fallback = priv->sh->cmng.counter_fallback;
4937         uint32_t i;
4938
4939         if (fallback) {
4940                 /* bulk_bitmap must be 0 for single counter allocation. */
4941                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4942                 if (!dcs)
4943                         return NULL;
4944                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4945                 if (!pool) {
4946                         pool = flow_dv_pool_create(dev, dcs, age);
4947                         if (!pool) {
4948                                 mlx5_devx_cmd_destroy(dcs);
4949                                 return NULL;
4950                         }
4951                 }
4952                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4953                 cnt = MLX5_POOL_GET_CNT(pool, i);
4954                 cnt->pool = pool;
4955                 cnt->dcs_when_free = dcs;
4956                 *cnt_free = cnt;
4957                 return pool;
4958         }
4959         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4960         if (!dcs) {
4961                 rte_errno = ENODATA;
4962                 return NULL;
4963         }
4964         pool = flow_dv_pool_create(dev, dcs, age);
4965         if (!pool) {
4966                 mlx5_devx_cmd_destroy(dcs);
4967                 return NULL;
4968         }
4969         TAILQ_INIT(&tmp_tq);
4970         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4971                 cnt = MLX5_POOL_GET_CNT(pool, i);
4972                 cnt->pool = pool;
4973                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4974         }
4975         rte_spinlock_lock(&cmng->csl[cnt_type]);
4976         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4977         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4978         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4979         (*cnt_free)->pool = pool;
4980         return pool;
4981 }
4982
4983 /**
4984  * Allocate a flow counter.
4985  *
4986  * @param[in] dev
4987  *   Pointer to the Ethernet device structure.
4988  * @param[in] age
4989  *   Whether the counter was allocated for aging.
4990  *
4991  * @return
4992  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4993  */
4994 static uint32_t
4995 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4996 {
4997         struct mlx5_priv *priv = dev->data->dev_private;
4998         struct mlx5_flow_counter_pool *pool = NULL;
4999         struct mlx5_flow_counter *cnt_free = NULL;
5000         bool fallback = priv->sh->cmng.counter_fallback;
5001         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
5002         enum mlx5_counter_type cnt_type =
5003                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
5004         uint32_t cnt_idx;
5005
5006         if (!priv->config.devx) {
5007                 rte_errno = ENOTSUP;
5008                 return 0;
5009         }
5010         /* Get free counters from container. */
5011         rte_spinlock_lock(&cmng->csl[cnt_type]);
5012         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
5013         if (cnt_free)
5014                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
5015         rte_spinlock_unlock(&cmng->csl[cnt_type]);
5016         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
5017                 goto err;
5018         pool = cnt_free->pool;
5019         if (fallback)
5020                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
5021         /* Create a DV counter action only in the first time usage. */
5022         if (!cnt_free->action) {
5023                 uint16_t offset;
5024                 struct mlx5_devx_obj *dcs;
5025                 int ret;
5026
5027                 if (!fallback) {
5028                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
5029                         dcs = pool->min_dcs;
5030                 } else {
5031                         offset = 0;
5032                         dcs = cnt_free->dcs_when_free;
5033                 }
5034                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
5035                                                             &cnt_free->action);
5036                 if (ret) {
5037                         rte_errno = errno;
5038                         goto err;
5039                 }
5040         }
5041         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
5042                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
5043         /* Update the counter reset values. */
5044         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
5045                                  &cnt_free->bytes))
5046                 goto err;
5047         if (!fallback && !priv->sh->cmng.query_thread_on)
5048                 /* Start the asynchronous batch query by the host thread. */
5049                 mlx5_set_query_alarm(priv->sh);
5050         return cnt_idx;
5051 err:
5052         if (cnt_free) {
5053                 cnt_free->pool = pool;
5054                 if (fallback)
5055                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
5056                 rte_spinlock_lock(&cmng->csl[cnt_type]);
5057                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
5058                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
5059         }
5060         return 0;
5061 }
5062
5063 /**
5064  * Allocate a shared flow counter.
5065  *
5066  * @param[in] ctx
5067  *   Pointer to the shared counter configuration.
5068  * @param[in] data
5069  *   Pointer to save the allocated counter index.
5070  *
5071  * @return
5072  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5073  */
5074
5075 static int32_t
5076 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
5077 {
5078         struct mlx5_shared_counter_conf *conf = ctx;
5079         struct rte_eth_dev *dev = conf->dev;
5080         struct mlx5_flow_counter *cnt;
5081
5082         data->dword = flow_dv_counter_alloc(dev, 0);
5083         data->dword |= MLX5_CNT_SHARED_OFFSET;
5084         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
5085         cnt->shared_info.id = conf->id;
5086         return 0;
5087 }
5088
5089 /**
5090  * Get a shared flow counter.
5091  *
5092  * @param[in] dev
5093  *   Pointer to the Ethernet device structure.
5094  * @param[in] id
5095  *   Counter identifier.
5096  *
5097  * @return
5098  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
5099  */
5100 static uint32_t
5101 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
5102 {
5103         struct mlx5_priv *priv = dev->data->dev_private;
5104         struct mlx5_shared_counter_conf conf = {
5105                 .dev = dev,
5106                 .id = id,
5107         };
5108         union mlx5_l3t_data data = {
5109                 .dword = 0,
5110         };
5111
5112         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
5113                                flow_dv_counter_alloc_shared_cb, &conf);
5114         return data.dword;
5115 }
5116
5117 /**
5118  * Get age param from counter index.
5119  *
5120  * @param[in] dev
5121  *   Pointer to the Ethernet device structure.
5122  * @param[in] counter
5123  *   Index to the counter handler.
5124  *
5125  * @return
5126  *   The aging parameter specified for the counter index.
5127  */
5128 static struct mlx5_age_param*
5129 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5130                                 uint32_t counter)
5131 {
5132         struct mlx5_flow_counter *cnt;
5133         struct mlx5_flow_counter_pool *pool = NULL;
5134
5135         flow_dv_counter_get_by_idx(dev, counter, &pool);
5136         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5137         cnt = MLX5_POOL_GET_CNT(pool, counter);
5138         return MLX5_CNT_TO_AGE(cnt);
5139 }
5140
5141 /**
5142  * Remove a flow counter from aged counter list.
5143  *
5144  * @param[in] dev
5145  *   Pointer to the Ethernet device structure.
5146  * @param[in] counter
5147  *   Index to the counter handler.
5148  * @param[in] cnt
5149  *   Pointer to the counter handler.
5150  */
5151 static void
5152 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5153                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5154 {
5155         struct mlx5_age_info *age_info;
5156         struct mlx5_age_param *age_param;
5157         struct mlx5_priv *priv = dev->data->dev_private;
5158         uint16_t expected = AGE_CANDIDATE;
5159
5160         age_info = GET_PORT_AGE_INFO(priv);
5161         age_param = flow_dv_counter_idx_get_age(dev, counter);
5162         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5163                                          AGE_FREE, false, __ATOMIC_RELAXED,
5164                                          __ATOMIC_RELAXED)) {
5165                 /**
5166                  * We need the lock even it is age timeout,
5167                  * since counter may still in process.
5168                  */
5169                 rte_spinlock_lock(&age_info->aged_sl);
5170                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5171                 rte_spinlock_unlock(&age_info->aged_sl);
5172                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5173         }
5174 }
5175
5176 /**
5177  * Release a flow counter.
5178  *
5179  * @param[in] dev
5180  *   Pointer to the Ethernet device structure.
5181  * @param[in] counter
5182  *   Index to the counter handler.
5183  */
5184 static void
5185 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5186 {
5187         struct mlx5_priv *priv = dev->data->dev_private;
5188         struct mlx5_flow_counter_pool *pool = NULL;
5189         struct mlx5_flow_counter *cnt;
5190         enum mlx5_counter_type cnt_type;
5191
5192         if (!counter)
5193                 return;
5194         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5195         MLX5_ASSERT(pool);
5196         if (IS_SHARED_CNT(counter) &&
5197             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5198                 return;
5199         if (pool->is_aged)
5200                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5201         cnt->pool = pool;
5202         /*
5203          * Put the counter back to list to be updated in none fallback mode.
5204          * Currently, we are using two list alternately, while one is in query,
5205          * add the freed counter to the other list based on the pool query_gen
5206          * value. After query finishes, add counter the list to the global
5207          * container counter list. The list changes while query starts. In
5208          * this case, lock will not be needed as query callback and release
5209          * function both operate with the different list.
5210          *
5211          */
5212         if (!priv->sh->cmng.counter_fallback) {
5213                 rte_spinlock_lock(&pool->csl);
5214                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5215                 rte_spinlock_unlock(&pool->csl);
5216         } else {
5217                 cnt->dcs_when_free = cnt->dcs_when_active;
5218                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5219                                            MLX5_COUNTER_TYPE_ORIGIN;
5220                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5221                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5222                                   cnt, next);
5223                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5224         }
5225 }
5226
5227 /**
5228  * Verify the @p attributes will be correctly understood by the NIC and store
5229  * them in the @p flow if everything is correct.
5230  *
5231  * @param[in] dev
5232  *   Pointer to dev struct.
5233  * @param[in] attributes
5234  *   Pointer to flow attributes
5235  * @param[in] external
5236  *   This flow rule is created by request external to PMD.
5237  * @param[out] error
5238  *   Pointer to error structure.
5239  *
5240  * @return
5241  *   - 0 on success and non root table.
5242  *   - 1 on success and root table.
5243  *   - a negative errno value otherwise and rte_errno is set.
5244  */
5245 static int
5246 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5247                             const struct mlx5_flow_tunnel *tunnel,
5248                             const struct rte_flow_attr *attributes,
5249                             const struct flow_grp_info *grp_info,
5250                             struct rte_flow_error *error)
5251 {
5252         struct mlx5_priv *priv = dev->data->dev_private;
5253         uint32_t priority_max = priv->config.flow_prio - 1;
5254         int ret = 0;
5255
5256 #ifndef HAVE_MLX5DV_DR
5257         RTE_SET_USED(tunnel);
5258         RTE_SET_USED(grp_info);
5259         if (attributes->group)
5260                 return rte_flow_error_set(error, ENOTSUP,
5261                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5262                                           NULL,
5263                                           "groups are not supported");
5264 #else
5265         uint32_t table = 0;
5266
5267         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5268                                        grp_info, error);
5269         if (ret)
5270                 return ret;
5271         if (!table)
5272                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5273 #endif
5274         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5275             attributes->priority >= priority_max)
5276                 return rte_flow_error_set(error, ENOTSUP,
5277                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5278                                           NULL,
5279                                           "priority out of range");
5280         if (attributes->transfer) {
5281                 if (!priv->config.dv_esw_en)
5282                         return rte_flow_error_set
5283                                 (error, ENOTSUP,
5284                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5285                                  "E-Switch dr is not supported");
5286                 if (!(priv->representor || priv->master))
5287                         return rte_flow_error_set
5288                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5289                                  NULL, "E-Switch configuration can only be"
5290                                  " done by a master or a representor device");
5291                 if (attributes->egress)
5292                         return rte_flow_error_set
5293                                 (error, ENOTSUP,
5294                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5295                                  "egress is not supported");
5296         }
5297         if (!(attributes->egress ^ attributes->ingress))
5298                 return rte_flow_error_set(error, ENOTSUP,
5299                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5300                                           "must specify exactly one of "
5301                                           "ingress or egress");
5302         return ret;
5303 }
5304
5305 /**
5306  * Internal validation function. For validating both actions and items.
5307  *
5308  * @param[in] dev
5309  *   Pointer to the rte_eth_dev structure.
5310  * @param[in] attr
5311  *   Pointer to the flow attributes.
5312  * @param[in] items
5313  *   Pointer to the list of items.
5314  * @param[in] actions
5315  *   Pointer to the list of actions.
5316  * @param[in] external
5317  *   This flow rule is created by request external to PMD.
5318  * @param[in] hairpin
5319  *   Number of hairpin TX actions, 0 means classic flow.
5320  * @param[out] error
5321  *   Pointer to the error structure.
5322  *
5323  * @return
5324  *   0 on success, a negative errno value otherwise and rte_errno is set.
5325  */
5326 static int
5327 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5328                  const struct rte_flow_item items[],
5329                  const struct rte_flow_action actions[],
5330                  bool external, int hairpin, struct rte_flow_error *error)
5331 {
5332         int ret;
5333         uint64_t action_flags = 0;
5334         uint64_t item_flags = 0;
5335         uint64_t last_item = 0;
5336         uint8_t next_protocol = 0xff;
5337         uint16_t ether_type = 0;
5338         int actions_n = 0;
5339         uint8_t item_ipv6_proto = 0;
5340         const struct rte_flow_item *geneve_item = NULL;
5341         const struct rte_flow_item *gre_item = NULL;
5342         const struct rte_flow_item *gtp_item = NULL;
5343         const struct rte_flow_action_raw_decap *decap;
5344         const struct rte_flow_action_raw_encap *encap;
5345         const struct rte_flow_action_rss *rss = NULL;
5346         const struct rte_flow_action_rss *sample_rss = NULL;
5347         const struct rte_flow_item_tcp nic_tcp_mask = {
5348                 .hdr = {
5349                         .tcp_flags = 0xFF,
5350                         .src_port = RTE_BE16(UINT16_MAX),
5351                         .dst_port = RTE_BE16(UINT16_MAX),
5352                 }
5353         };
5354         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5355                 .hdr = {
5356                         .src_addr =
5357                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5358                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5359                         .dst_addr =
5360                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5361                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5362                         .vtc_flow = RTE_BE32(0xffffffff),
5363                         .proto = 0xff,
5364                         .hop_limits = 0xff,
5365                 },
5366                 .has_frag_ext = 1,
5367         };
5368         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5369                 .hdr = {
5370                         .common = {
5371                                 .u32 =
5372                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5373                                         .type = 0xFF,
5374                                         }).u32),
5375                         },
5376                         .dummy[0] = 0xffffffff,
5377                 },
5378         };
5379         struct mlx5_priv *priv = dev->data->dev_private;
5380         struct mlx5_dev_config *dev_conf = &priv->config;
5381         uint16_t queue_index = 0xFFFF;
5382         const struct rte_flow_item_vlan *vlan_m = NULL;
5383         int16_t rw_act_num = 0;
5384         uint64_t is_root;
5385         const struct mlx5_flow_tunnel *tunnel;
5386         struct flow_grp_info grp_info = {
5387                 .external = !!external,
5388                 .transfer = !!attr->transfer,
5389                 .fdb_def_rule = !!priv->fdb_def_rule,
5390         };
5391         const struct rte_eth_hairpin_conf *conf;
5392
5393         if (items == NULL)
5394                 return -1;
5395         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5396                 tunnel = flow_items_to_tunnel(items);
5397                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5398                                 MLX5_FLOW_ACTION_DECAP;
5399         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5400                 tunnel = flow_actions_to_tunnel(actions);
5401                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5402         } else {
5403                 tunnel = NULL;
5404         }
5405         if (tunnel && priv->representor)
5406                 return rte_flow_error_set(error, ENOTSUP,
5407                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5408                                           "decap not supported "
5409                                           "for VF representor");
5410         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5411                                 (dev, tunnel, attr, items, actions);
5412         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
5413         if (ret < 0)
5414                 return ret;
5415         is_root = (uint64_t)ret;
5416         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5417                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5418                 int type = items->type;
5419
5420                 if (!mlx5_flow_os_item_supported(type))
5421                         return rte_flow_error_set(error, ENOTSUP,
5422                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5423                                                   NULL, "item not supported");
5424                 switch (type) {
5425                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5426                         if (items[0].type != (typeof(items[0].type))
5427                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5428                                 return rte_flow_error_set
5429                                                 (error, EINVAL,
5430                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5431                                                 NULL, "MLX5 private items "
5432                                                 "must be the first");
5433                         break;
5434                 case RTE_FLOW_ITEM_TYPE_VOID:
5435                         break;
5436                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5437                         ret = flow_dv_validate_item_port_id
5438                                         (dev, items, attr, item_flags, error);
5439                         if (ret < 0)
5440                                 return ret;
5441                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5442                         break;
5443                 case RTE_FLOW_ITEM_TYPE_ETH:
5444                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5445                                                           true, error);
5446                         if (ret < 0)
5447                                 return ret;
5448                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5449                                              MLX5_FLOW_LAYER_OUTER_L2;
5450                         if (items->mask != NULL && items->spec != NULL) {
5451                                 ether_type =
5452                                         ((const struct rte_flow_item_eth *)
5453                                          items->spec)->type;
5454                                 ether_type &=
5455                                         ((const struct rte_flow_item_eth *)
5456                                          items->mask)->type;
5457                                 ether_type = rte_be_to_cpu_16(ether_type);
5458                         } else {
5459                                 ether_type = 0;
5460                         }
5461                         break;
5462                 case RTE_FLOW_ITEM_TYPE_VLAN:
5463                         ret = flow_dv_validate_item_vlan(items, item_flags,
5464                                                          dev, error);
5465                         if (ret < 0)
5466                                 return ret;
5467                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5468                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5469                         if (items->mask != NULL && items->spec != NULL) {
5470                                 ether_type =
5471                                         ((const struct rte_flow_item_vlan *)
5472                                          items->spec)->inner_type;
5473                                 ether_type &=
5474                                         ((const struct rte_flow_item_vlan *)
5475                                          items->mask)->inner_type;
5476                                 ether_type = rte_be_to_cpu_16(ether_type);
5477                         } else {
5478                                 ether_type = 0;
5479                         }
5480                         /* Store outer VLAN mask for of_push_vlan action. */
5481                         if (!tunnel)
5482                                 vlan_m = items->mask;
5483                         break;
5484                 case RTE_FLOW_ITEM_TYPE_IPV4:
5485                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5486                                                   &item_flags, &tunnel);
5487                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5488                                                          last_item, ether_type,
5489                                                          error);
5490                         if (ret < 0)
5491                                 return ret;
5492                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5493                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5494                         if (items->mask != NULL &&
5495                             ((const struct rte_flow_item_ipv4 *)
5496                              items->mask)->hdr.next_proto_id) {
5497                                 next_protocol =
5498                                         ((const struct rte_flow_item_ipv4 *)
5499                                          (items->spec))->hdr.next_proto_id;
5500                                 next_protocol &=
5501                                         ((const struct rte_flow_item_ipv4 *)
5502                                          (items->mask))->hdr.next_proto_id;
5503                         } else {
5504                                 /* Reset for inner layer. */
5505                                 next_protocol = 0xff;
5506                         }
5507                         break;
5508                 case RTE_FLOW_ITEM_TYPE_IPV6:
5509                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5510                                                   &item_flags, &tunnel);
5511                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5512                                                            last_item,
5513                                                            ether_type,
5514                                                            &nic_ipv6_mask,
5515                                                            error);
5516                         if (ret < 0)
5517                                 return ret;
5518                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5519                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5520                         if (items->mask != NULL &&
5521                             ((const struct rte_flow_item_ipv6 *)
5522                              items->mask)->hdr.proto) {
5523                                 item_ipv6_proto =
5524                                         ((const struct rte_flow_item_ipv6 *)
5525                                          items->spec)->hdr.proto;
5526                                 next_protocol =
5527                                         ((const struct rte_flow_item_ipv6 *)
5528                                          items->spec)->hdr.proto;
5529                                 next_protocol &=
5530                                         ((const struct rte_flow_item_ipv6 *)
5531                                          items->mask)->hdr.proto;
5532                         } else {
5533                                 /* Reset for inner layer. */
5534                                 next_protocol = 0xff;
5535                         }
5536                         break;
5537                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5538                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5539                                                                   item_flags,
5540                                                                   error);
5541                         if (ret < 0)
5542                                 return ret;
5543                         last_item = tunnel ?
5544                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5545                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5546                         if (items->mask != NULL &&
5547                             ((const struct rte_flow_item_ipv6_frag_ext *)
5548                              items->mask)->hdr.next_header) {
5549                                 next_protocol =
5550                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5551                                  items->spec)->hdr.next_header;
5552                                 next_protocol &=
5553                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5554                                  items->mask)->hdr.next_header;
5555                         } else {
5556                                 /* Reset for inner layer. */
5557                                 next_protocol = 0xff;
5558                         }
5559                         break;
5560                 case RTE_FLOW_ITEM_TYPE_TCP:
5561                         ret = mlx5_flow_validate_item_tcp
5562                                                 (items, item_flags,
5563                                                  next_protocol,
5564                                                  &nic_tcp_mask,
5565                                                  error);
5566                         if (ret < 0)
5567                                 return ret;
5568                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5569                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5570                         break;
5571                 case RTE_FLOW_ITEM_TYPE_UDP:
5572                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5573                                                           next_protocol,
5574                                                           error);
5575                         if (ret < 0)
5576                                 return ret;
5577                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5578                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5579                         break;
5580                 case RTE_FLOW_ITEM_TYPE_GRE:
5581                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5582                                                           next_protocol, error);
5583                         if (ret < 0)
5584                                 return ret;
5585                         gre_item = items;
5586                         last_item = MLX5_FLOW_LAYER_GRE;
5587                         break;
5588                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5589                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5590                                                             next_protocol,
5591                                                             error);
5592                         if (ret < 0)
5593                                 return ret;
5594                         last_item = MLX5_FLOW_LAYER_NVGRE;
5595                         break;
5596                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5597                         ret = mlx5_flow_validate_item_gre_key
5598                                 (items, item_flags, gre_item, error);
5599                         if (ret < 0)
5600                                 return ret;
5601                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5602                         break;
5603                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5604                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5605                                                             error);
5606                         if (ret < 0)
5607                                 return ret;
5608                         last_item = MLX5_FLOW_LAYER_VXLAN;
5609                         break;
5610                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5611                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5612                                                                 item_flags, dev,
5613                                                                 error);
5614                         if (ret < 0)
5615                                 return ret;
5616                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5617                         break;
5618                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5619                         ret = mlx5_flow_validate_item_geneve(items,
5620                                                              item_flags, dev,
5621                                                              error);
5622                         if (ret < 0)
5623                                 return ret;
5624                         geneve_item = items;
5625                         last_item = MLX5_FLOW_LAYER_GENEVE;
5626                         break;
5627                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
5628                         ret = mlx5_flow_validate_item_geneve_opt(items,
5629                                                                  last_item,
5630                                                                  geneve_item,
5631                                                                  dev,
5632                                                                  error);
5633                         if (ret < 0)
5634                                 return ret;
5635                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
5636                         break;
5637                 case RTE_FLOW_ITEM_TYPE_MPLS:
5638                         ret = mlx5_flow_validate_item_mpls(dev, items,
5639                                                            item_flags,
5640                                                            last_item, error);
5641                         if (ret < 0)
5642                                 return ret;
5643                         last_item = MLX5_FLOW_LAYER_MPLS;
5644                         break;
5645
5646                 case RTE_FLOW_ITEM_TYPE_MARK:
5647                         ret = flow_dv_validate_item_mark(dev, items, attr,
5648                                                          error);
5649                         if (ret < 0)
5650                                 return ret;
5651                         last_item = MLX5_FLOW_ITEM_MARK;
5652                         break;
5653                 case RTE_FLOW_ITEM_TYPE_META:
5654                         ret = flow_dv_validate_item_meta(dev, items, attr,
5655                                                          error);
5656                         if (ret < 0)
5657                                 return ret;
5658                         last_item = MLX5_FLOW_ITEM_METADATA;
5659                         break;
5660                 case RTE_FLOW_ITEM_TYPE_ICMP:
5661                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5662                                                            next_protocol,
5663                                                            error);
5664                         if (ret < 0)
5665                                 return ret;
5666                         last_item = MLX5_FLOW_LAYER_ICMP;
5667                         break;
5668                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5669                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5670                                                             next_protocol,
5671                                                             error);
5672                         if (ret < 0)
5673                                 return ret;
5674                         item_ipv6_proto = IPPROTO_ICMPV6;
5675                         last_item = MLX5_FLOW_LAYER_ICMP6;
5676                         break;
5677                 case RTE_FLOW_ITEM_TYPE_TAG:
5678                         ret = flow_dv_validate_item_tag(dev, items,
5679                                                         attr, error);
5680                         if (ret < 0)
5681                                 return ret;
5682                         last_item = MLX5_FLOW_ITEM_TAG;
5683                         break;
5684                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5685                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5686                         break;
5687                 case RTE_FLOW_ITEM_TYPE_GTP:
5688                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5689                                                         error);
5690                         if (ret < 0)
5691                                 return ret;
5692                         gtp_item = items;
5693                         last_item = MLX5_FLOW_LAYER_GTP;
5694                         break;
5695                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
5696                         ret = flow_dv_validate_item_gtp_psc(items, last_item,
5697                                                             gtp_item, attr,
5698                                                             error);
5699                         if (ret < 0)
5700                                 return ret;
5701                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
5702                         break;
5703                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5704                         /* Capacity will be checked in the translate stage. */
5705                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5706                                                             last_item,
5707                                                             ether_type,
5708                                                             &nic_ecpri_mask,
5709                                                             error);
5710                         if (ret < 0)
5711                                 return ret;
5712                         last_item = MLX5_FLOW_LAYER_ECPRI;
5713                         break;
5714                 default:
5715                         return rte_flow_error_set(error, ENOTSUP,
5716                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5717                                                   NULL, "item not supported");
5718                 }
5719                 item_flags |= last_item;
5720         }
5721         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5722                 int type = actions->type;
5723
5724                 if (!mlx5_flow_os_action_supported(type))
5725                         return rte_flow_error_set(error, ENOTSUP,
5726                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5727                                                   actions,
5728                                                   "action not supported");
5729                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5730                         return rte_flow_error_set(error, ENOTSUP,
5731                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5732                                                   actions, "too many actions");
5733                 switch (type) {
5734                 case RTE_FLOW_ACTION_TYPE_VOID:
5735                         break;
5736                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5737                         ret = flow_dv_validate_action_port_id(dev,
5738                                                               action_flags,
5739                                                               actions,
5740                                                               attr,
5741                                                               error);
5742                         if (ret)
5743                                 return ret;
5744                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5745                         ++actions_n;
5746                         break;
5747                 case RTE_FLOW_ACTION_TYPE_FLAG:
5748                         ret = flow_dv_validate_action_flag(dev, action_flags,
5749                                                            attr, error);
5750                         if (ret < 0)
5751                                 return ret;
5752                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5753                                 /* Count all modify-header actions as one. */
5754                                 if (!(action_flags &
5755                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5756                                         ++actions_n;
5757                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5758                                                 MLX5_FLOW_ACTION_MARK_EXT;
5759                         } else {
5760                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5761                                 ++actions_n;
5762                         }
5763                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5764                         break;
5765                 case RTE_FLOW_ACTION_TYPE_MARK:
5766                         ret = flow_dv_validate_action_mark(dev, actions,
5767                                                            action_flags,
5768                                                            attr, error);
5769                         if (ret < 0)
5770                                 return ret;
5771                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5772                                 /* Count all modify-header actions as one. */
5773                                 if (!(action_flags &
5774                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5775                                         ++actions_n;
5776                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5777                                                 MLX5_FLOW_ACTION_MARK_EXT;
5778                         } else {
5779                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5780                                 ++actions_n;
5781                         }
5782                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5783                         break;
5784                 case RTE_FLOW_ACTION_TYPE_SET_META:
5785                         ret = flow_dv_validate_action_set_meta(dev, actions,
5786                                                                action_flags,
5787                                                                attr, error);
5788                         if (ret < 0)
5789                                 return ret;
5790                         /* Count all modify-header actions as one action. */
5791                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5792                                 ++actions_n;
5793                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5794                         rw_act_num += MLX5_ACT_NUM_SET_META;
5795                         break;
5796                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5797                         ret = flow_dv_validate_action_set_tag(dev, actions,
5798                                                               action_flags,
5799                                                               attr, error);
5800                         if (ret < 0)
5801                                 return ret;
5802                         /* Count all modify-header actions as one action. */
5803                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5804                                 ++actions_n;
5805                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5806                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5807                         break;
5808                 case RTE_FLOW_ACTION_TYPE_DROP:
5809                         ret = mlx5_flow_validate_action_drop(action_flags,
5810                                                              attr, error);
5811                         if (ret < 0)
5812                                 return ret;
5813                         action_flags |= MLX5_FLOW_ACTION_DROP;
5814                         ++actions_n;
5815                         break;
5816                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5817                         ret = mlx5_flow_validate_action_queue(actions,
5818                                                               action_flags, dev,
5819                                                               attr, error);
5820                         if (ret < 0)
5821                                 return ret;
5822                         queue_index = ((const struct rte_flow_action_queue *)
5823                                                         (actions->conf))->index;
5824                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5825                         ++actions_n;
5826                         break;
5827                 case RTE_FLOW_ACTION_TYPE_RSS:
5828                         rss = actions->conf;
5829                         ret = mlx5_flow_validate_action_rss(actions,
5830                                                             action_flags, dev,
5831                                                             attr, item_flags,
5832                                                             error);
5833                         if (ret < 0)
5834                                 return ret;
5835                         if (rss && sample_rss &&
5836                             (sample_rss->level != rss->level ||
5837                             sample_rss->types != rss->types))
5838                                 return rte_flow_error_set(error, ENOTSUP,
5839                                         RTE_FLOW_ERROR_TYPE_ACTION,
5840                                         NULL,
5841                                         "Can't use the different RSS types "
5842                                         "or level in the same flow");
5843                         if (rss != NULL && rss->queue_num)
5844                                 queue_index = rss->queue[0];
5845                         action_flags |= MLX5_FLOW_ACTION_RSS;
5846                         ++actions_n;
5847                         break;
5848                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5849                         ret =
5850                         mlx5_flow_validate_action_default_miss(action_flags,
5851                                         attr, error);
5852                         if (ret < 0)
5853                                 return ret;
5854                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5855                         ++actions_n;
5856                         break;
5857                 case RTE_FLOW_ACTION_TYPE_COUNT:
5858                         ret = flow_dv_validate_action_count(dev, error);
5859                         if (ret < 0)
5860                                 return ret;
5861                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5862                         ++actions_n;
5863                         break;
5864                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5865                         if (flow_dv_validate_action_pop_vlan(dev,
5866                                                              action_flags,
5867                                                              actions,
5868                                                              item_flags, attr,
5869                                                              error))
5870                                 return -rte_errno;
5871                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5872                         ++actions_n;
5873                         break;
5874                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5875                         ret = flow_dv_validate_action_push_vlan(dev,
5876                                                                 action_flags,
5877                                                                 vlan_m,
5878                                                                 actions, attr,
5879                                                                 error);
5880                         if (ret < 0)
5881                                 return ret;
5882                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5883                         ++actions_n;
5884                         break;
5885                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5886                         ret = flow_dv_validate_action_set_vlan_pcp
5887                                                 (action_flags, actions, error);
5888                         if (ret < 0)
5889                                 return ret;
5890                         /* Count PCP with push_vlan command. */
5891                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5892                         break;
5893                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5894                         ret = flow_dv_validate_action_set_vlan_vid
5895                                                 (item_flags, action_flags,
5896                                                  actions, error);
5897                         if (ret < 0)
5898                                 return ret;
5899                         /* Count VID with push_vlan command. */
5900                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5901                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5902                         break;
5903                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5904                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5905                         ret = flow_dv_validate_action_l2_encap(dev,
5906                                                                action_flags,
5907                                                                actions, attr,
5908                                                                error);
5909                         if (ret < 0)
5910                                 return ret;
5911                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5912                         ++actions_n;
5913                         break;
5914                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5915                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5916                         ret = flow_dv_validate_action_decap(dev, action_flags,
5917                                                             actions, item_flags,
5918                                                             attr, error);
5919                         if (ret < 0)
5920                                 return ret;
5921                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5922                         ++actions_n;
5923                         break;
5924                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5925                         ret = flow_dv_validate_action_raw_encap_decap
5926                                 (dev, NULL, actions->conf, attr, &action_flags,
5927                                  &actions_n, actions, item_flags, error);
5928                         if (ret < 0)
5929                                 return ret;
5930                         break;
5931                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5932                         decap = actions->conf;
5933                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5934                                 ;
5935                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5936                                 encap = NULL;
5937                                 actions--;
5938                         } else {
5939                                 encap = actions->conf;
5940                         }
5941                         ret = flow_dv_validate_action_raw_encap_decap
5942                                            (dev,
5943                                             decap ? decap : &empty_decap, encap,
5944                                             attr, &action_flags, &actions_n,
5945                                             actions, item_flags, error);
5946                         if (ret < 0)
5947                                 return ret;
5948                         break;
5949                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5950                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5951                         ret = flow_dv_validate_action_modify_mac(action_flags,
5952                                                                  actions,
5953                                                                  item_flags,
5954                                                                  error);
5955                         if (ret < 0)
5956                                 return ret;
5957                         /* Count all modify-header actions as one action. */
5958                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5959                                 ++actions_n;
5960                         action_flags |= actions->type ==
5961                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5962                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5963                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5964                         /*
5965                          * Even if the source and destination MAC addresses have
5966                          * overlap in the header with 4B alignment, the convert
5967                          * function will handle them separately and 4 SW actions
5968                          * will be created. And 2 actions will be added each
5969                          * time no matter how many bytes of address will be set.
5970                          */
5971                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5972                         break;
5973                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5974                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5975                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5976                                                                   actions,
5977                                                                   item_flags,
5978                                                                   error);
5979                         if (ret < 0)
5980                                 return ret;
5981                         /* Count all modify-header actions as one action. */
5982                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5983                                 ++actions_n;
5984                         action_flags |= actions->type ==
5985                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5986                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5987                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5988                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5989                         break;
5990                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5991                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5992                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5993                                                                   actions,
5994                                                                   item_flags,
5995                                                                   error);
5996                         if (ret < 0)
5997                                 return ret;
5998                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5999                                 return rte_flow_error_set(error, ENOTSUP,
6000                                         RTE_FLOW_ERROR_TYPE_ACTION,
6001                                         actions,
6002                                         "Can't change header "
6003                                         "with ICMPv6 proto");
6004                         /* Count all modify-header actions as one action. */
6005                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6006                                 ++actions_n;
6007                         action_flags |= actions->type ==
6008                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
6009                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
6010                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
6011                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
6012                         break;
6013                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
6014                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
6015                         ret = flow_dv_validate_action_modify_tp(action_flags,
6016                                                                 actions,
6017                                                                 item_flags,
6018                                                                 error);
6019                         if (ret < 0)
6020                                 return ret;
6021                         /* Count all modify-header actions as one action. */
6022                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6023                                 ++actions_n;
6024                         action_flags |= actions->type ==
6025                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
6026                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
6027                                                 MLX5_FLOW_ACTION_SET_TP_DST;
6028                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
6029                         break;
6030                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
6031                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
6032                         ret = flow_dv_validate_action_modify_ttl(action_flags,
6033                                                                  actions,
6034                                                                  item_flags,
6035                                                                  error);
6036                         if (ret < 0)
6037                                 return ret;
6038                         /* Count all modify-header actions as one action. */
6039                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6040                                 ++actions_n;
6041                         action_flags |= actions->type ==
6042                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
6043                                                 MLX5_FLOW_ACTION_SET_TTL :
6044                                                 MLX5_FLOW_ACTION_DEC_TTL;
6045                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
6046                         break;
6047                 case RTE_FLOW_ACTION_TYPE_JUMP:
6048                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
6049                                                            action_flags,
6050                                                            attr, external,
6051                                                            error);
6052                         if (ret)
6053                                 return ret;
6054                         ++actions_n;
6055                         action_flags |= MLX5_FLOW_ACTION_JUMP;
6056                         break;
6057                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
6058                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
6059                         ret = flow_dv_validate_action_modify_tcp_seq
6060                                                                 (action_flags,
6061                                                                  actions,
6062                                                                  item_flags,
6063                                                                  error);
6064                         if (ret < 0)
6065                                 return ret;
6066                         /* Count all modify-header actions as one action. */
6067                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6068                                 ++actions_n;
6069                         action_flags |= actions->type ==
6070                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
6071                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
6072                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
6073                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
6074                         break;
6075                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
6076                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
6077                         ret = flow_dv_validate_action_modify_tcp_ack
6078                                                                 (action_flags,
6079                                                                  actions,
6080                                                                  item_flags,
6081                                                                  error);
6082                         if (ret < 0)
6083                                 return ret;
6084                         /* Count all modify-header actions as one action. */
6085                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6086                                 ++actions_n;
6087                         action_flags |= actions->type ==
6088                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
6089                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
6090                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
6091                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
6092                         break;
6093                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
6094                         break;
6095                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
6096                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
6097                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6098                         break;
6099                 case RTE_FLOW_ACTION_TYPE_METER:
6100                         ret = mlx5_flow_validate_action_meter(dev,
6101                                                               action_flags,
6102                                                               actions, attr,
6103                                                               error);
6104                         if (ret < 0)
6105                                 return ret;
6106                         action_flags |= MLX5_FLOW_ACTION_METER;
6107                         ++actions_n;
6108                         /* Meter action will add one more TAG action. */
6109                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
6110                         break;
6111                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
6112                         if (!attr->transfer && !attr->group)
6113                                 return rte_flow_error_set(error, ENOTSUP,
6114                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6115                                                                            NULL,
6116                           "Shared ASO age action is not supported for group 0");
6117                         action_flags |= MLX5_FLOW_ACTION_AGE;
6118                         ++actions_n;
6119                         break;
6120                 case RTE_FLOW_ACTION_TYPE_AGE:
6121                         ret = flow_dv_validate_action_age(action_flags,
6122                                                           actions, dev,
6123                                                           error);
6124                         if (ret < 0)
6125                                 return ret;
6126                         action_flags |= MLX5_FLOW_ACTION_AGE;
6127                         ++actions_n;
6128                         break;
6129                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
6130                         ret = flow_dv_validate_action_modify_ipv4_dscp
6131                                                          (action_flags,
6132                                                           actions,
6133                                                           item_flags,
6134                                                           error);
6135                         if (ret < 0)
6136                                 return ret;
6137                         /* Count all modify-header actions as one action. */
6138                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6139                                 ++actions_n;
6140                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
6141                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6142                         break;
6143                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
6144                         ret = flow_dv_validate_action_modify_ipv6_dscp
6145                                                                 (action_flags,
6146                                                                  actions,
6147                                                                  item_flags,
6148                                                                  error);
6149                         if (ret < 0)
6150                                 return ret;
6151                         /* Count all modify-header actions as one action. */
6152                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6153                                 ++actions_n;
6154                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6155                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6156                         break;
6157                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6158                         ret = flow_dv_validate_action_sample(action_flags,
6159                                                              actions, dev,
6160                                                              attr, item_flags,
6161                                                              rss, &sample_rss,
6162                                                              error);
6163                         if (ret < 0)
6164                                 return ret;
6165                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6166                         ++actions_n;
6167                         break;
6168                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6169                         if (actions[0].type != (typeof(actions[0].type))
6170                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6171                                 return rte_flow_error_set
6172                                                 (error, EINVAL,
6173                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6174                                                 NULL, "MLX5 private action "
6175                                                 "must be the first");
6176
6177                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6178                         break;
6179                 default:
6180                         return rte_flow_error_set(error, ENOTSUP,
6181                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6182                                                   actions,
6183                                                   "action not supported");
6184                 }
6185         }
6186         /*
6187          * Validate actions in flow rules
6188          * - Explicit decap action is prohibited by the tunnel offload API.
6189          * - Drop action in tunnel steer rule is prohibited by the API.
6190          * - Application cannot use MARK action because it's value can mask
6191          *   tunnel default miss nitification.
6192          * - JUMP in tunnel match rule has no support in current PMD
6193          *   implementation.
6194          * - TAG & META are reserved for future uses.
6195          */
6196         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6197                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6198                                             MLX5_FLOW_ACTION_MARK     |
6199                                             MLX5_FLOW_ACTION_SET_TAG  |
6200                                             MLX5_FLOW_ACTION_SET_META |
6201                                             MLX5_FLOW_ACTION_DROP;
6202
6203                 if (action_flags & bad_actions_mask)
6204                         return rte_flow_error_set
6205                                         (error, EINVAL,
6206                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6207                                         "Invalid RTE action in tunnel "
6208                                         "set decap rule");
6209                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6210                         return rte_flow_error_set
6211                                         (error, EINVAL,
6212                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6213                                         "tunnel set decap rule must terminate "
6214                                         "with JUMP");
6215                 if (!attr->ingress)
6216                         return rte_flow_error_set
6217                                         (error, EINVAL,
6218                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6219                                         "tunnel flows for ingress traffic only");
6220         }
6221         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6222                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6223                                             MLX5_FLOW_ACTION_MARK    |
6224                                             MLX5_FLOW_ACTION_SET_TAG |
6225                                             MLX5_FLOW_ACTION_SET_META;
6226
6227                 if (action_flags & bad_actions_mask)
6228                         return rte_flow_error_set
6229                                         (error, EINVAL,
6230                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6231                                         "Invalid RTE action in tunnel "
6232                                         "set match rule");
6233         }
6234         /*
6235          * Validate the drop action mutual exclusion with other actions.
6236          * Drop action is mutually-exclusive with any other action, except for
6237          * Count action.
6238          */
6239         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6240             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6241                 return rte_flow_error_set(error, EINVAL,
6242                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6243                                           "Drop action is mutually-exclusive "
6244                                           "with any other action, except for "
6245                                           "Count action");
6246         /* Eswitch has few restrictions on using items and actions */
6247         if (attr->transfer) {
6248                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6249                     action_flags & MLX5_FLOW_ACTION_FLAG)
6250                         return rte_flow_error_set(error, ENOTSUP,
6251                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6252                                                   NULL,
6253                                                   "unsupported action FLAG");
6254                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6255                     action_flags & MLX5_FLOW_ACTION_MARK)
6256                         return rte_flow_error_set(error, ENOTSUP,
6257                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6258                                                   NULL,
6259                                                   "unsupported action MARK");
6260                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6261                         return rte_flow_error_set(error, ENOTSUP,
6262                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6263                                                   NULL,
6264                                                   "unsupported action QUEUE");
6265                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6266                         return rte_flow_error_set(error, ENOTSUP,
6267                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6268                                                   NULL,
6269                                                   "unsupported action RSS");
6270                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6271                         return rte_flow_error_set(error, EINVAL,
6272                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6273                                                   actions,
6274                                                   "no fate action is found");
6275         } else {
6276                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6277                         return rte_flow_error_set(error, EINVAL,
6278                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6279                                                   actions,
6280                                                   "no fate action is found");
6281         }
6282         /*
6283          * Continue validation for Xcap and VLAN actions.
6284          * If hairpin is working in explicit TX rule mode, there is no actions
6285          * splitting and the validation of hairpin ingress flow should be the
6286          * same as other standard flows.
6287          */
6288         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6289                              MLX5_FLOW_VLAN_ACTIONS)) &&
6290             (queue_index == 0xFFFF ||
6291              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6292              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6293              conf->tx_explicit != 0))) {
6294                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6295                     MLX5_FLOW_XCAP_ACTIONS)
6296                         return rte_flow_error_set(error, ENOTSUP,
6297                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6298                                                   NULL, "encap and decap "
6299                                                   "combination aren't supported");
6300                 if (!attr->transfer && attr->ingress) {
6301                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6302                                 return rte_flow_error_set
6303                                                 (error, ENOTSUP,
6304                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6305                                                  NULL, "encap is not supported"
6306                                                  " for ingress traffic");
6307                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6308                                 return rte_flow_error_set
6309                                                 (error, ENOTSUP,
6310                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6311                                                  NULL, "push VLAN action not "
6312                                                  "supported for ingress");
6313                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6314                                         MLX5_FLOW_VLAN_ACTIONS)
6315                                 return rte_flow_error_set
6316                                                 (error, ENOTSUP,
6317                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6318                                                  NULL, "no support for "
6319                                                  "multiple VLAN actions");
6320                 }
6321         }
6322         /*
6323          * Hairpin flow will add one more TAG action in TX implicit mode.
6324          * In TX explicit mode, there will be no hairpin flow ID.
6325          */
6326         if (hairpin > 0)
6327                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6328         /* extra metadata enabled: one more TAG action will be add. */
6329         if (dev_conf->dv_flow_en &&
6330             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6331             mlx5_flow_ext_mreg_supported(dev))
6332                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6333         if ((uint32_t)rw_act_num >
6334                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6335                 return rte_flow_error_set(error, ENOTSUP,
6336                                           RTE_FLOW_ERROR_TYPE_ACTION,
6337                                           NULL, "too many header modify"
6338                                           " actions to support");
6339         }
6340         return 0;
6341 }
6342
6343 /**
6344  * Internal preparation function. Allocates the DV flow size,
6345  * this size is constant.
6346  *
6347  * @param[in] dev
6348  *   Pointer to the rte_eth_dev structure.
6349  * @param[in] attr
6350  *   Pointer to the flow attributes.
6351  * @param[in] items
6352  *   Pointer to the list of items.
6353  * @param[in] actions
6354  *   Pointer to the list of actions.
6355  * @param[out] error
6356  *   Pointer to the error structure.
6357  *
6358  * @return
6359  *   Pointer to mlx5_flow object on success,
6360  *   otherwise NULL and rte_errno is set.
6361  */
6362 static struct mlx5_flow *
6363 flow_dv_prepare(struct rte_eth_dev *dev,
6364                 const struct rte_flow_attr *attr __rte_unused,
6365                 const struct rte_flow_item items[] __rte_unused,
6366                 const struct rte_flow_action actions[] __rte_unused,
6367                 struct rte_flow_error *error)
6368 {
6369         uint32_t handle_idx = 0;
6370         struct mlx5_flow *dev_flow;
6371         struct mlx5_flow_handle *dev_handle;
6372         struct mlx5_priv *priv = dev->data->dev_private;
6373         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6374
6375         MLX5_ASSERT(wks);
6376         /* In case of corrupting the memory. */
6377         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6378                 rte_flow_error_set(error, ENOSPC,
6379                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6380                                    "not free temporary device flow");
6381                 return NULL;
6382         }
6383         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6384                                    &handle_idx);
6385         if (!dev_handle) {
6386                 rte_flow_error_set(error, ENOMEM,
6387                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6388                                    "not enough memory to create flow handle");
6389                 return NULL;
6390         }
6391         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
6392         dev_flow = &wks->flows[wks->flow_idx++];
6393         memset(dev_flow, 0, sizeof(*dev_flow));
6394         dev_flow->handle = dev_handle;
6395         dev_flow->handle_idx = handle_idx;
6396         /*
6397          * In some old rdma-core releases, before continuing, a check of the
6398          * length of matching parameter will be done at first. It needs to use
6399          * the length without misc4 param. If the flow has misc4 support, then
6400          * the length needs to be adjusted accordingly. Each param member is
6401          * aligned with a 64B boundary naturally.
6402          */
6403         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6404                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6405         dev_flow->ingress = attr->ingress;
6406         dev_flow->dv.transfer = attr->transfer;
6407         return dev_flow;
6408 }
6409
6410 #ifdef RTE_LIBRTE_MLX5_DEBUG
6411 /**
6412  * Sanity check for match mask and value. Similar to check_valid_spec() in
6413  * kernel driver. If unmasked bit is present in value, it returns failure.
6414  *
6415  * @param match_mask
6416  *   pointer to match mask buffer.
6417  * @param match_value
6418  *   pointer to match value buffer.
6419  *
6420  * @return
6421  *   0 if valid, -EINVAL otherwise.
6422  */
6423 static int
6424 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6425 {
6426         uint8_t *m = match_mask;
6427         uint8_t *v = match_value;
6428         unsigned int i;
6429
6430         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6431                 if (v[i] & ~m[i]) {
6432                         DRV_LOG(ERR,
6433                                 "match_value differs from match_criteria"
6434                                 " %p[%u] != %p[%u]",
6435                                 match_value, i, match_mask, i);
6436                         return -EINVAL;
6437                 }
6438         }
6439         return 0;
6440 }
6441 #endif
6442
6443 /**
6444  * Add match of ip_version.
6445  *
6446  * @param[in] group
6447  *   Flow group.
6448  * @param[in] headers_v
6449  *   Values header pointer.
6450  * @param[in] headers_m
6451  *   Masks header pointer.
6452  * @param[in] ip_version
6453  *   The IP version to set.
6454  */
6455 static inline void
6456 flow_dv_set_match_ip_version(uint32_t group,
6457                              void *headers_v,
6458                              void *headers_m,
6459                              uint8_t ip_version)
6460 {
6461         if (group == 0)
6462                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6463         else
6464                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6465                          ip_version);
6466         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6467         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6468         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6469 }
6470
6471 /**
6472  * Add Ethernet item to matcher and to the value.
6473  *
6474  * @param[in, out] matcher
6475  *   Flow matcher.
6476  * @param[in, out] key
6477  *   Flow matcher value.
6478  * @param[in] item
6479  *   Flow pattern to translate.
6480  * @param[in] inner
6481  *   Item is inner pattern.
6482  */
6483 static void
6484 flow_dv_translate_item_eth(void *matcher, void *key,
6485                            const struct rte_flow_item *item, int inner,
6486                            uint32_t group)
6487 {
6488         const struct rte_flow_item_eth *eth_m = item->mask;
6489         const struct rte_flow_item_eth *eth_v = item->spec;
6490         const struct rte_flow_item_eth nic_mask = {
6491                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6492                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6493                 .type = RTE_BE16(0xffff),
6494                 .has_vlan = 0,
6495         };
6496         void *hdrs_m;
6497         void *hdrs_v;
6498         char *l24_v;
6499         unsigned int i;
6500
6501         if (!eth_v)
6502                 return;
6503         if (!eth_m)
6504                 eth_m = &nic_mask;
6505         if (inner) {
6506                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6507                                          inner_headers);
6508                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6509         } else {
6510                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6511                                          outer_headers);
6512                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6513         }
6514         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6515                &eth_m->dst, sizeof(eth_m->dst));
6516         /* The value must be in the range of the mask. */
6517         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6518         for (i = 0; i < sizeof(eth_m->dst); ++i)
6519                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6520         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6521                &eth_m->src, sizeof(eth_m->src));
6522         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6523         /* The value must be in the range of the mask. */
6524         for (i = 0; i < sizeof(eth_m->dst); ++i)
6525                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6526         /*
6527          * HW supports match on one Ethertype, the Ethertype following the last
6528          * VLAN tag of the packet (see PRM).
6529          * Set match on ethertype only if ETH header is not followed by VLAN.
6530          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6531          * ethertype, and use ip_version field instead.
6532          * eCPRI over Ether layer will use type value 0xAEFE.
6533          */
6534         if (eth_m->type == 0xFFFF) {
6535                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6536                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6537                 switch (eth_v->type) {
6538                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6539                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6540                         return;
6541                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6542                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6543                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6544                         return;
6545                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6546                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6547                         return;
6548                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6549                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6550                         return;
6551                 default:
6552                         break;
6553                 }
6554         }
6555         if (eth_m->has_vlan) {
6556                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6557                 if (eth_v->has_vlan) {
6558                         /*
6559                          * Here, when also has_more_vlan field in VLAN item is
6560                          * not set, only single-tagged packets will be matched.
6561                          */
6562                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6563                         return;
6564                 }
6565         }
6566         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6567                  rte_be_to_cpu_16(eth_m->type));
6568         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6569         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6570 }
6571
6572 /**
6573  * Add VLAN item to matcher and to the value.
6574  *
6575  * @param[in, out] dev_flow
6576  *   Flow descriptor.
6577  * @param[in, out] matcher
6578  *   Flow matcher.
6579  * @param[in, out] key
6580  *   Flow matcher value.
6581  * @param[in] item
6582  *   Flow pattern to translate.
6583  * @param[in] inner
6584  *   Item is inner pattern.
6585  */
6586 static void
6587 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6588                             void *matcher, void *key,
6589                             const struct rte_flow_item *item,
6590                             int inner, uint32_t group)
6591 {
6592         const struct rte_flow_item_vlan *vlan_m = item->mask;
6593         const struct rte_flow_item_vlan *vlan_v = item->spec;
6594         void *hdrs_m;
6595         void *hdrs_v;
6596         uint16_t tci_m;
6597         uint16_t tci_v;
6598
6599         if (inner) {
6600                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6601                                          inner_headers);
6602                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6603         } else {
6604                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6605                                          outer_headers);
6606                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6607                 /*
6608                  * This is workaround, masks are not supported,
6609                  * and pre-validated.
6610                  */
6611                 if (vlan_v)
6612                         dev_flow->handle->vf_vlan.tag =
6613                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6614         }
6615         /*
6616          * When VLAN item exists in flow, mark packet as tagged,
6617          * even if TCI is not specified.
6618          */
6619         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6620                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6621                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6622         }
6623         if (!vlan_v)
6624                 return;
6625         if (!vlan_m)
6626                 vlan_m = &rte_flow_item_vlan_mask;
6627         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6628         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6629         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6630         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6631         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6632         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6633         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6634         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6635         /*
6636          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6637          * ethertype, and use ip_version field instead.
6638          */
6639         if (vlan_m->inner_type == 0xFFFF) {
6640                 switch (vlan_v->inner_type) {
6641                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6642                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6643                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6644                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6645                         return;
6646                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6647                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6648                         return;
6649                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6650                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6651                         return;
6652                 default:
6653                         break;
6654                 }
6655         }
6656         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6657                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6658                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6659                 /* Only one vlan_tag bit can be set. */
6660                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6661                 return;
6662         }
6663         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6664                  rte_be_to_cpu_16(vlan_m->inner_type));
6665         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6666                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6667 }
6668
6669 /**
6670  * Add IPV4 item to matcher and to the value.
6671  *
6672  * @param[in, out] matcher
6673  *   Flow matcher.
6674  * @param[in, out] key
6675  *   Flow matcher value.
6676  * @param[in] item
6677  *   Flow pattern to translate.
6678  * @param[in] inner
6679  *   Item is inner pattern.
6680  * @param[in] group
6681  *   The group to insert the rule.
6682  */
6683 static void
6684 flow_dv_translate_item_ipv4(void *matcher, void *key,
6685                             const struct rte_flow_item *item,
6686                             int inner, uint32_t group)
6687 {
6688         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6689         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6690         const struct rte_flow_item_ipv4 nic_mask = {
6691                 .hdr = {
6692                         .src_addr = RTE_BE32(0xffffffff),
6693                         .dst_addr = RTE_BE32(0xffffffff),
6694                         .type_of_service = 0xff,
6695                         .next_proto_id = 0xff,
6696                         .time_to_live = 0xff,
6697                 },
6698         };
6699         void *headers_m;
6700         void *headers_v;
6701         char *l24_m;
6702         char *l24_v;
6703         uint8_t tos;
6704
6705         if (inner) {
6706                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6707                                          inner_headers);
6708                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6709         } else {
6710                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6711                                          outer_headers);
6712                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6713         }
6714         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6715         if (!ipv4_v)
6716                 return;
6717         if (!ipv4_m)
6718                 ipv4_m = &nic_mask;
6719         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6720                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6721         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6722                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6723         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6724         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6725         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6726                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6727         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6728                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6729         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6730         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6731         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6732         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6733                  ipv4_m->hdr.type_of_service);
6734         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6735         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6736                  ipv4_m->hdr.type_of_service >> 2);
6737         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6738         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6739                  ipv4_m->hdr.next_proto_id);
6740         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6741                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6742         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6743                  ipv4_m->hdr.time_to_live);
6744         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6745                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6746         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6747                  !!(ipv4_m->hdr.fragment_offset));
6748         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6749                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6750 }
6751
6752 /**
6753  * Add IPV6 item to matcher and to the value.
6754  *
6755  * @param[in, out] matcher
6756  *   Flow matcher.
6757  * @param[in, out] key
6758  *   Flow matcher value.
6759  * @param[in] item
6760  *   Flow pattern to translate.
6761  * @param[in] inner
6762  *   Item is inner pattern.
6763  * @param[in] group
6764  *   The group to insert the rule.
6765  */
6766 static void
6767 flow_dv_translate_item_ipv6(void *matcher, void *key,
6768                             const struct rte_flow_item *item,
6769                             int inner, uint32_t group)
6770 {
6771         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6772         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6773         const struct rte_flow_item_ipv6 nic_mask = {
6774                 .hdr = {
6775                         .src_addr =
6776                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6777                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6778                         .dst_addr =
6779                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6780                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6781                         .vtc_flow = RTE_BE32(0xffffffff),
6782                         .proto = 0xff,
6783                         .hop_limits = 0xff,
6784                 },
6785         };
6786         void *headers_m;
6787         void *headers_v;
6788         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6789         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6790         char *l24_m;
6791         char *l24_v;
6792         uint32_t vtc_m;
6793         uint32_t vtc_v;
6794         int i;
6795         int size;
6796
6797         if (inner) {
6798                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6799                                          inner_headers);
6800                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6801         } else {
6802                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6803                                          outer_headers);
6804                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6805         }
6806         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6807         if (!ipv6_v)
6808                 return;
6809         if (!ipv6_m)
6810                 ipv6_m = &nic_mask;
6811         size = sizeof(ipv6_m->hdr.dst_addr);
6812         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6813                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6814         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6815                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6816         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6817         for (i = 0; i < size; ++i)
6818                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6819         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6820                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6821         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6822                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6823         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6824         for (i = 0; i < size; ++i)
6825                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6826         /* TOS. */
6827         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6828         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6829         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6830         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6831         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6832         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6833         /* Label. */
6834         if (inner) {
6835                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6836                          vtc_m);
6837                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6838                          vtc_v);
6839         } else {
6840                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6841                          vtc_m);
6842                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6843                          vtc_v);
6844         }
6845         /* Protocol. */
6846         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6847                  ipv6_m->hdr.proto);
6848         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6849                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6850         /* Hop limit. */
6851         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6852                  ipv6_m->hdr.hop_limits);
6853         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6854                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6855         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6856                  !!(ipv6_m->has_frag_ext));
6857         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6858                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6859 }
6860
6861 /**
6862  * Add IPV6 fragment extension item to matcher and to the value.
6863  *
6864  * @param[in, out] matcher
6865  *   Flow matcher.
6866  * @param[in, out] key
6867  *   Flow matcher value.
6868  * @param[in] item
6869  *   Flow pattern to translate.
6870  * @param[in] inner
6871  *   Item is inner pattern.
6872  */
6873 static void
6874 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6875                                      const struct rte_flow_item *item,
6876                                      int inner)
6877 {
6878         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6879         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6880         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6881                 .hdr = {
6882                         .next_header = 0xff,
6883                         .frag_data = RTE_BE16(0xffff),
6884                 },
6885         };
6886         void *headers_m;
6887         void *headers_v;
6888
6889         if (inner) {
6890                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6891                                          inner_headers);
6892                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6893         } else {
6894                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6895                                          outer_headers);
6896                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6897         }
6898         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6899         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6900         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6901         if (!ipv6_frag_ext_v)
6902                 return;
6903         if (!ipv6_frag_ext_m)
6904                 ipv6_frag_ext_m = &nic_mask;
6905         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6906                  ipv6_frag_ext_m->hdr.next_header);
6907         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6908                  ipv6_frag_ext_v->hdr.next_header &
6909                  ipv6_frag_ext_m->hdr.next_header);
6910 }
6911
6912 /**
6913  * Add TCP item to matcher and to the value.
6914  *
6915  * @param[in, out] matcher
6916  *   Flow matcher.
6917  * @param[in, out] key
6918  *   Flow matcher value.
6919  * @param[in] item
6920  *   Flow pattern to translate.
6921  * @param[in] inner
6922  *   Item is inner pattern.
6923  */
6924 static void
6925 flow_dv_translate_item_tcp(void *matcher, void *key,
6926                            const struct rte_flow_item *item,
6927                            int inner)
6928 {
6929         const struct rte_flow_item_tcp *tcp_m = item->mask;
6930         const struct rte_flow_item_tcp *tcp_v = item->spec;
6931         void *headers_m;
6932         void *headers_v;
6933
6934         if (inner) {
6935                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6936                                          inner_headers);
6937                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6938         } else {
6939                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6940                                          outer_headers);
6941                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6942         }
6943         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6944         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6945         if (!tcp_v)
6946                 return;
6947         if (!tcp_m)
6948                 tcp_m = &rte_flow_item_tcp_mask;
6949         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6950                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6951         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6952                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6953         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6954                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6955         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6956                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6957         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6958                  tcp_m->hdr.tcp_flags);
6959         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6960                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6961 }
6962
6963 /**
6964  * Add UDP item to matcher and to the value.
6965  *
6966  * @param[in, out] matcher
6967  *   Flow matcher.
6968  * @param[in, out] key
6969  *   Flow matcher value.
6970  * @param[in] item
6971  *   Flow pattern to translate.
6972  * @param[in] inner
6973  *   Item is inner pattern.
6974  */
6975 static void
6976 flow_dv_translate_item_udp(void *matcher, void *key,
6977                            const struct rte_flow_item *item,
6978                            int inner)
6979 {
6980         const struct rte_flow_item_udp *udp_m = item->mask;
6981         const struct rte_flow_item_udp *udp_v = item->spec;
6982         void *headers_m;
6983         void *headers_v;
6984
6985         if (inner) {
6986                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6987                                          inner_headers);
6988                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6989         } else {
6990                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6991                                          outer_headers);
6992                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6993         }
6994         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6995         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6996         if (!udp_v)
6997                 return;
6998         if (!udp_m)
6999                 udp_m = &rte_flow_item_udp_mask;
7000         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
7001                  rte_be_to_cpu_16(udp_m->hdr.src_port));
7002         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
7003                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
7004         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
7005                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
7006         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7007                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
7008 }
7009
7010 /**
7011  * Add GRE optional Key item to matcher and to the value.
7012  *
7013  * @param[in, out] matcher
7014  *   Flow matcher.
7015  * @param[in, out] key
7016  *   Flow matcher value.
7017  * @param[in] item
7018  *   Flow pattern to translate.
7019  * @param[in] inner
7020  *   Item is inner pattern.
7021  */
7022 static void
7023 flow_dv_translate_item_gre_key(void *matcher, void *key,
7024                                    const struct rte_flow_item *item)
7025 {
7026         const rte_be32_t *key_m = item->mask;
7027         const rte_be32_t *key_v = item->spec;
7028         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7029         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7030         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
7031
7032         /* GRE K bit must be on and should already be validated */
7033         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
7034         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
7035         if (!key_v)
7036                 return;
7037         if (!key_m)
7038                 key_m = &gre_key_default_mask;
7039         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
7040                  rte_be_to_cpu_32(*key_m) >> 8);
7041         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
7042                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
7043         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
7044                  rte_be_to_cpu_32(*key_m) & 0xFF);
7045         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
7046                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
7047 }
7048
7049 /**
7050  * Add GRE item to matcher and to the value.
7051  *
7052  * @param[in, out] matcher
7053  *   Flow matcher.
7054  * @param[in, out] key
7055  *   Flow matcher value.
7056  * @param[in] item
7057  *   Flow pattern to translate.
7058  * @param[in] inner
7059  *   Item is inner pattern.
7060  */
7061 static void
7062 flow_dv_translate_item_gre(void *matcher, void *key,
7063                            const struct rte_flow_item *item,
7064                            int inner)
7065 {
7066         const struct rte_flow_item_gre *gre_m = item->mask;
7067         const struct rte_flow_item_gre *gre_v = item->spec;
7068         void *headers_m;
7069         void *headers_v;
7070         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7071         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7072         struct {
7073                 union {
7074                         __extension__
7075                         struct {
7076                                 uint16_t version:3;
7077                                 uint16_t rsvd0:9;
7078                                 uint16_t s_present:1;
7079                                 uint16_t k_present:1;
7080                                 uint16_t rsvd_bit1:1;
7081                                 uint16_t c_present:1;
7082                         };
7083                         uint16_t value;
7084                 };
7085         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
7086
7087         if (inner) {
7088                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7089                                          inner_headers);
7090                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7091         } else {
7092                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7093                                          outer_headers);
7094                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7095         }
7096         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7097         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
7098         if (!gre_v)
7099                 return;
7100         if (!gre_m)
7101                 gre_m = &rte_flow_item_gre_mask;
7102         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
7103                  rte_be_to_cpu_16(gre_m->protocol));
7104         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7105                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
7106         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
7107         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
7108         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
7109                  gre_crks_rsvd0_ver_m.c_present);
7110         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
7111                  gre_crks_rsvd0_ver_v.c_present &
7112                  gre_crks_rsvd0_ver_m.c_present);
7113         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
7114                  gre_crks_rsvd0_ver_m.k_present);
7115         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
7116                  gre_crks_rsvd0_ver_v.k_present &
7117                  gre_crks_rsvd0_ver_m.k_present);
7118         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
7119                  gre_crks_rsvd0_ver_m.s_present);
7120         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
7121                  gre_crks_rsvd0_ver_v.s_present &
7122                  gre_crks_rsvd0_ver_m.s_present);
7123 }
7124
7125 /**
7126  * Add NVGRE item to matcher and to the value.
7127  *
7128  * @param[in, out] matcher
7129  *   Flow matcher.
7130  * @param[in, out] key
7131  *   Flow matcher value.
7132  * @param[in] item
7133  *   Flow pattern to translate.
7134  * @param[in] inner
7135  *   Item is inner pattern.
7136  */
7137 static void
7138 flow_dv_translate_item_nvgre(void *matcher, void *key,
7139                              const struct rte_flow_item *item,
7140                              int inner)
7141 {
7142         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
7143         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
7144         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7145         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7146         const char *tni_flow_id_m;
7147         const char *tni_flow_id_v;
7148         char *gre_key_m;
7149         char *gre_key_v;
7150         int size;
7151         int i;
7152
7153         /* For NVGRE, GRE header fields must be set with defined values. */
7154         const struct rte_flow_item_gre gre_spec = {
7155                 .c_rsvd0_ver = RTE_BE16(0x2000),
7156                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7157         };
7158         const struct rte_flow_item_gre gre_mask = {
7159                 .c_rsvd0_ver = RTE_BE16(0xB000),
7160                 .protocol = RTE_BE16(UINT16_MAX),
7161         };
7162         const struct rte_flow_item gre_item = {
7163                 .spec = &gre_spec,
7164                 .mask = &gre_mask,
7165                 .last = NULL,
7166         };
7167         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7168         if (!nvgre_v)
7169                 return;
7170         if (!nvgre_m)
7171                 nvgre_m = &rte_flow_item_nvgre_mask;
7172         tni_flow_id_m = (const char *)nvgre_m->tni;
7173         tni_flow_id_v = (const char *)nvgre_v->tni;
7174         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7175         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7176         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7177         memcpy(gre_key_m, tni_flow_id_m, size);
7178         for (i = 0; i < size; ++i)
7179                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7180 }
7181
7182 /**
7183  * Add VXLAN item to matcher and to the value.
7184  *
7185  * @param[in, out] matcher
7186  *   Flow matcher.
7187  * @param[in, out] key
7188  *   Flow matcher value.
7189  * @param[in] item
7190  *   Flow pattern to translate.
7191  * @param[in] inner
7192  *   Item is inner pattern.
7193  */
7194 static void
7195 flow_dv_translate_item_vxlan(void *matcher, void *key,
7196                              const struct rte_flow_item *item,
7197                              int inner)
7198 {
7199         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7200         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7201         void *headers_m;
7202         void *headers_v;
7203         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7204         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7205         char *vni_m;
7206         char *vni_v;
7207         uint16_t dport;
7208         int size;
7209         int i;
7210
7211         if (inner) {
7212                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7213                                          inner_headers);
7214                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7215         } else {
7216                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7217                                          outer_headers);
7218                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7219         }
7220         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7221                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7222         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7223                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7224                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7225         }
7226         if (!vxlan_v)
7227                 return;
7228         if (!vxlan_m)
7229                 vxlan_m = &rte_flow_item_vxlan_mask;
7230         size = sizeof(vxlan_m->vni);
7231         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7232         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7233         memcpy(vni_m, vxlan_m->vni, size);
7234         for (i = 0; i < size; ++i)
7235                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7236 }
7237
7238 /**
7239  * Add VXLAN-GPE item to matcher and to the value.
7240  *
7241  * @param[in, out] matcher
7242  *   Flow matcher.
7243  * @param[in, out] key
7244  *   Flow matcher value.
7245  * @param[in] item
7246  *   Flow pattern to translate.
7247  * @param[in] inner
7248  *   Item is inner pattern.
7249  */
7250
7251 static void
7252 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7253                                  const struct rte_flow_item *item, int inner)
7254 {
7255         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7256         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7257         void *headers_m;
7258         void *headers_v;
7259         void *misc_m =
7260                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7261         void *misc_v =
7262                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7263         char *vni_m;
7264         char *vni_v;
7265         uint16_t dport;
7266         int size;
7267         int i;
7268         uint8_t flags_m = 0xff;
7269         uint8_t flags_v = 0xc;
7270
7271         if (inner) {
7272                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7273                                          inner_headers);
7274                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7275         } else {
7276                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7277                                          outer_headers);
7278                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7279         }
7280         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7281                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7282         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7283                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7284                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7285         }
7286         if (!vxlan_v)
7287                 return;
7288         if (!vxlan_m)
7289                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7290         size = sizeof(vxlan_m->vni);
7291         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7292         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7293         memcpy(vni_m, vxlan_m->vni, size);
7294         for (i = 0; i < size; ++i)
7295                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7296         if (vxlan_m->flags) {
7297                 flags_m = vxlan_m->flags;
7298                 flags_v = vxlan_v->flags;
7299         }
7300         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7301         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7302         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7303                  vxlan_m->protocol);
7304         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7305                  vxlan_v->protocol);
7306 }
7307
7308 /**
7309  * Add Geneve item to matcher and to the value.
7310  *
7311  * @param[in, out] matcher
7312  *   Flow matcher.
7313  * @param[in, out] key
7314  *   Flow matcher value.
7315  * @param[in] item
7316  *   Flow pattern to translate.
7317  * @param[in] inner
7318  *   Item is inner pattern.
7319  */
7320
7321 static void
7322 flow_dv_translate_item_geneve(void *matcher, void *key,
7323                               const struct rte_flow_item *item, int inner)
7324 {
7325         const struct rte_flow_item_geneve *geneve_m = item->mask;
7326         const struct rte_flow_item_geneve *geneve_v = item->spec;
7327         void *headers_m;
7328         void *headers_v;
7329         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7330         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7331         uint16_t dport;
7332         uint16_t gbhdr_m;
7333         uint16_t gbhdr_v;
7334         char *vni_m;
7335         char *vni_v;
7336         size_t size, i;
7337
7338         if (inner) {
7339                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7340                                          inner_headers);
7341                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7342         } else {
7343                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7344                                          outer_headers);
7345                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7346         }
7347         dport = MLX5_UDP_PORT_GENEVE;
7348         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7349                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7350                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7351         }
7352         if (!geneve_v)
7353                 return;
7354         if (!geneve_m)
7355                 geneve_m = &rte_flow_item_geneve_mask;
7356         size = sizeof(geneve_m->vni);
7357         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7358         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7359         memcpy(vni_m, geneve_m->vni, size);
7360         for (i = 0; i < size; ++i)
7361                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7362         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7363                  rte_be_to_cpu_16(geneve_m->protocol));
7364         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7365                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7366         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7367         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7368         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7369                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7370         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7371                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7372         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7373                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7374         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7375                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7376                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7377 }
7378
7379 /**
7380  * Create Geneve TLV option resource.
7381  *
7382  * @param dev[in, out]
7383  *   Pointer to rte_eth_dev structure.
7384  * @param[in, out] tag_be24
7385  *   Tag value in big endian then R-shift 8.
7386  * @parm[in, out] dev_flow
7387  *   Pointer to the dev_flow.
7388  * @param[out] error
7389  *   pointer to error structure.
7390  *
7391  * @return
7392  *   0 on success otherwise -errno and errno is set.
7393  */
7394
7395 int
7396 flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
7397                                              const struct rte_flow_item *item,
7398                                              struct rte_flow_error *error)
7399 {
7400         struct mlx5_priv *priv = dev->data->dev_private;
7401         struct mlx5_dev_ctx_shared *sh = priv->sh;
7402         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
7403                         sh->geneve_tlv_option_resource;
7404         struct mlx5_devx_obj *obj;
7405         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
7406         int ret = 0;
7407
7408         if (!geneve_opt_v)
7409                 return -1;
7410         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
7411         if (geneve_opt_resource != NULL) {
7412                 if (geneve_opt_resource->option_class ==
7413                         geneve_opt_v->option_class &&
7414                         geneve_opt_resource->option_type ==
7415                         geneve_opt_v->option_type &&
7416                         geneve_opt_resource->length ==
7417                         geneve_opt_v->option_len) {
7418                         /* We already have GENVE TLV option obj allocated. */
7419                         __atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
7420                                            __ATOMIC_RELAXED);
7421                 } else {
7422                         ret = rte_flow_error_set(error, ENOMEM,
7423                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7424                                 "Only one GENEVE TLV option supported");
7425                         goto exit;
7426                 }
7427         } else {
7428                 /* Create a GENEVE TLV object and resource. */
7429                 obj = mlx5_devx_cmd_create_geneve_tlv_option(sh->ctx,
7430                                 geneve_opt_v->option_class,
7431                                 geneve_opt_v->option_type,
7432                                 geneve_opt_v->option_len);
7433                 if (!obj) {
7434                         ret = rte_flow_error_set(error, ENODATA,
7435                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7436                                 "Failed to create GENEVE TLV Devx object");
7437                         goto exit;
7438                 }
7439                 sh->geneve_tlv_option_resource =
7440                                 mlx5_malloc(MLX5_MEM_ZERO,
7441                                                 sizeof(*geneve_opt_resource),
7442                                                 0, SOCKET_ID_ANY);
7443                 if (!sh->geneve_tlv_option_resource) {
7444                         claim_zero(mlx5_devx_cmd_destroy(obj));
7445                         ret = rte_flow_error_set(error, ENOMEM,
7446                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7447                                 "GENEVE TLV object memory allocation failed");
7448                         goto exit;
7449                 }
7450                 geneve_opt_resource = sh->geneve_tlv_option_resource;
7451                 geneve_opt_resource->obj = obj;
7452                 geneve_opt_resource->option_class = geneve_opt_v->option_class;
7453                 geneve_opt_resource->option_type = geneve_opt_v->option_type;
7454                 geneve_opt_resource->length = geneve_opt_v->option_len;
7455                 __atomic_store_n(&geneve_opt_resource->refcnt, 1,
7456                                 __ATOMIC_RELAXED);
7457         }
7458 exit:
7459         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
7460         return ret;
7461 }
7462
7463 /**
7464  * Add Geneve TLV option item to matcher.
7465  *
7466  * @param[in, out] dev
7467  *   Pointer to rte_eth_dev structure.
7468  * @param[in, out] matcher
7469  *   Flow matcher.
7470  * @param[in, out] key
7471  *   Flow matcher value.
7472  * @param[in] item
7473  *   Flow pattern to translate.
7474  * @param[out] error
7475  *   Pointer to error structure.
7476  */
7477 static int
7478 flow_dv_translate_item_geneve_opt(struct rte_eth_dev *dev, void *matcher,
7479                                   void *key, const struct rte_flow_item *item,
7480                                   struct rte_flow_error *error)
7481 {
7482         const struct rte_flow_item_geneve_opt *geneve_opt_m = item->mask;
7483         const struct rte_flow_item_geneve_opt *geneve_opt_v = item->spec;
7484         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7485         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7486         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7487                         misc_parameters_3);
7488         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7489         rte_be32_t opt_data_key = 0, opt_data_mask = 0;
7490         int ret = 0;
7491
7492         if (!geneve_opt_v)
7493                 return -1;
7494         if (!geneve_opt_m)
7495                 geneve_opt_m = &rte_flow_item_geneve_opt_mask;
7496         ret = flow_dev_geneve_tlv_option_resource_register(dev, item,
7497                                                            error);
7498         if (ret) {
7499                 DRV_LOG(ERR, "Failed to create geneve_tlv_obj");
7500                 return ret;
7501         }
7502         /*
7503          * Set the option length in GENEVE header if not requested.
7504          * The GENEVE TLV option length is expressed by the option length field
7505          * in the GENEVE header.
7506          * If the option length was not requested but the GENEVE TLV option item
7507          * is present we set the option length field implicitly.
7508          */
7509         if (!MLX5_GET16(fte_match_set_misc, misc_m, geneve_opt_len)) {
7510                 MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7511                          MLX5_GENEVE_OPTLEN_MASK);
7512                 MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7513                          geneve_opt_v->option_len + 1);
7514         }
7515         /* Set the data. */
7516         if (geneve_opt_v->data) {
7517                 memcpy(&opt_data_key, geneve_opt_v->data,
7518                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
7519                                 sizeof(opt_data_key)));
7520                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
7521                                 sizeof(opt_data_key));
7522                 memcpy(&opt_data_mask, geneve_opt_m->data,
7523                         RTE_MIN((uint32_t)(geneve_opt_v->option_len * 4),
7524                                 sizeof(opt_data_mask)));
7525                 MLX5_ASSERT((uint32_t)(geneve_opt_v->option_len * 4) <=
7526                                 sizeof(opt_data_mask));
7527                 MLX5_SET(fte_match_set_misc3, misc3_m,
7528                                 geneve_tlv_option_0_data,
7529                                 rte_be_to_cpu_32(opt_data_mask));
7530                 MLX5_SET(fte_match_set_misc3, misc3_v,
7531                                 geneve_tlv_option_0_data,
7532                         rte_be_to_cpu_32(opt_data_key & opt_data_mask));
7533         }
7534         return ret;
7535 }
7536
7537 /**
7538  * Add MPLS item to matcher and to the value.
7539  *
7540  * @param[in, out] matcher
7541  *   Flow matcher.
7542  * @param[in, out] key
7543  *   Flow matcher value.
7544  * @param[in] item
7545  *   Flow pattern to translate.
7546  * @param[in] prev_layer
7547  *   The protocol layer indicated in previous item.
7548  * @param[in] inner
7549  *   Item is inner pattern.
7550  */
7551 static void
7552 flow_dv_translate_item_mpls(void *matcher, void *key,
7553                             const struct rte_flow_item *item,
7554                             uint64_t prev_layer,
7555                             int inner)
7556 {
7557         const uint32_t *in_mpls_m = item->mask;
7558         const uint32_t *in_mpls_v = item->spec;
7559         uint32_t *out_mpls_m = 0;
7560         uint32_t *out_mpls_v = 0;
7561         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7562         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7563         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7564                                      misc_parameters_2);
7565         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7566         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7567         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7568
7569         switch (prev_layer) {
7570         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7571                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7572                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7573                          MLX5_UDP_PORT_MPLS);
7574                 break;
7575         case MLX5_FLOW_LAYER_GRE:
7576                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7577                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7578                          RTE_ETHER_TYPE_MPLS);
7579                 break;
7580         default:
7581                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7582                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7583                          IPPROTO_MPLS);
7584                 break;
7585         }
7586         if (!in_mpls_v)
7587                 return;
7588         if (!in_mpls_m)
7589                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7590         switch (prev_layer) {
7591         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7592                 out_mpls_m =
7593                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7594                                                  outer_first_mpls_over_udp);
7595                 out_mpls_v =
7596                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7597                                                  outer_first_mpls_over_udp);
7598                 break;
7599         case MLX5_FLOW_LAYER_GRE:
7600                 out_mpls_m =
7601                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7602                                                  outer_first_mpls_over_gre);
7603                 out_mpls_v =
7604                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7605                                                  outer_first_mpls_over_gre);
7606                 break;
7607         default:
7608                 /* Inner MPLS not over GRE is not supported. */
7609                 if (!inner) {
7610                         out_mpls_m =
7611                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7612                                                          misc2_m,
7613                                                          outer_first_mpls);
7614                         out_mpls_v =
7615                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7616                                                          misc2_v,
7617                                                          outer_first_mpls);
7618                 }
7619                 break;
7620         }
7621         if (out_mpls_m && out_mpls_v) {
7622                 *out_mpls_m = *in_mpls_m;
7623                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7624         }
7625 }
7626
7627 /**
7628  * Add metadata register item to matcher
7629  *
7630  * @param[in, out] matcher
7631  *   Flow matcher.
7632  * @param[in, out] key
7633  *   Flow matcher value.
7634  * @param[in] reg_type
7635  *   Type of device metadata register
7636  * @param[in] value
7637  *   Register value
7638  * @param[in] mask
7639  *   Register mask
7640  */
7641 static void
7642 flow_dv_match_meta_reg(void *matcher, void *key,
7643                        enum modify_reg reg_type,
7644                        uint32_t data, uint32_t mask)
7645 {
7646         void *misc2_m =
7647                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7648         void *misc2_v =
7649                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7650         uint32_t temp;
7651
7652         data &= mask;
7653         switch (reg_type) {
7654         case REG_A:
7655                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7656                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7657                 break;
7658         case REG_B:
7659                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7660                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7661                 break;
7662         case REG_C_0:
7663                 /*
7664                  * The metadata register C0 field might be divided into
7665                  * source vport index and META item value, we should set
7666                  * this field according to specified mask, not as whole one.
7667                  */
7668                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7669                 temp |= mask;
7670                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7671                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7672                 temp &= ~mask;
7673                 temp |= data;
7674                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7675                 break;
7676         case REG_C_1:
7677                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7678                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7679                 break;
7680         case REG_C_2:
7681                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7682                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7683                 break;
7684         case REG_C_3:
7685                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7686                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7687                 break;
7688         case REG_C_4:
7689                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7690                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7691                 break;
7692         case REG_C_5:
7693                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7694                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7695                 break;
7696         case REG_C_6:
7697                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7698                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7699                 break;
7700         case REG_C_7:
7701                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7702                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7703                 break;
7704         default:
7705                 MLX5_ASSERT(false);
7706                 break;
7707         }
7708 }
7709
7710 /**
7711  * Add MARK item to matcher
7712  *
7713  * @param[in] dev
7714  *   The device to configure through.
7715  * @param[in, out] matcher
7716  *   Flow matcher.
7717  * @param[in, out] key
7718  *   Flow matcher value.
7719  * @param[in] item
7720  *   Flow pattern to translate.
7721  */
7722 static void
7723 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7724                             void *matcher, void *key,
7725                             const struct rte_flow_item *item)
7726 {
7727         struct mlx5_priv *priv = dev->data->dev_private;
7728         const struct rte_flow_item_mark *mark;
7729         uint32_t value;
7730         uint32_t mask;
7731
7732         mark = item->mask ? (const void *)item->mask :
7733                             &rte_flow_item_mark_mask;
7734         mask = mark->id & priv->sh->dv_mark_mask;
7735         mark = (const void *)item->spec;
7736         MLX5_ASSERT(mark);
7737         value = mark->id & priv->sh->dv_mark_mask & mask;
7738         if (mask) {
7739                 enum modify_reg reg;
7740
7741                 /* Get the metadata register index for the mark. */
7742                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7743                 MLX5_ASSERT(reg > 0);
7744                 if (reg == REG_C_0) {
7745                         struct mlx5_priv *priv = dev->data->dev_private;
7746                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7747                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7748
7749                         mask &= msk_c0;
7750                         mask <<= shl_c0;
7751                         value <<= shl_c0;
7752                 }
7753                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7754         }
7755 }
7756
7757 /**
7758  * Add META item to matcher
7759  *
7760  * @param[in] dev
7761  *   The devich to configure through.
7762  * @param[in, out] matcher
7763  *   Flow matcher.
7764  * @param[in, out] key
7765  *   Flow matcher value.
7766  * @param[in] attr
7767  *   Attributes of flow that includes this item.
7768  * @param[in] item
7769  *   Flow pattern to translate.
7770  */
7771 static void
7772 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7773                             void *matcher, void *key,
7774                             const struct rte_flow_attr *attr,
7775                             const struct rte_flow_item *item)
7776 {
7777         const struct rte_flow_item_meta *meta_m;
7778         const struct rte_flow_item_meta *meta_v;
7779
7780         meta_m = (const void *)item->mask;
7781         if (!meta_m)
7782                 meta_m = &rte_flow_item_meta_mask;
7783         meta_v = (const void *)item->spec;
7784         if (meta_v) {
7785                 int reg;
7786                 uint32_t value = meta_v->data;
7787                 uint32_t mask = meta_m->data;
7788
7789                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7790                 if (reg < 0)
7791                         return;
7792                 MLX5_ASSERT(reg != REG_NON);
7793                 /*
7794                  * In datapath code there is no endianness
7795                  * coversions for perfromance reasons, all
7796                  * pattern conversions are done in rte_flow.
7797                  */
7798                 value = rte_cpu_to_be_32(value);
7799                 mask = rte_cpu_to_be_32(mask);
7800                 if (reg == REG_C_0) {
7801                         struct mlx5_priv *priv = dev->data->dev_private;
7802                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7803                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7804 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7805                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7806
7807                         value >>= shr_c0;
7808                         mask >>= shr_c0;
7809 #endif
7810                         value <<= shl_c0;
7811                         mask <<= shl_c0;
7812                         MLX5_ASSERT(msk_c0);
7813                         MLX5_ASSERT(!(~msk_c0 & mask));
7814                 }
7815                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7816         }
7817 }
7818
7819 /**
7820  * Add vport metadata Reg C0 item to matcher
7821  *
7822  * @param[in, out] matcher
7823  *   Flow matcher.
7824  * @param[in, out] key
7825  *   Flow matcher value.
7826  * @param[in] reg
7827  *   Flow pattern to translate.
7828  */
7829 static void
7830 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7831                                   uint32_t value, uint32_t mask)
7832 {
7833         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7834 }
7835
7836 /**
7837  * Add tag item to matcher
7838  *
7839  * @param[in] dev
7840  *   The devich to configure through.
7841  * @param[in, out] matcher
7842  *   Flow matcher.
7843  * @param[in, out] key
7844  *   Flow matcher value.
7845  * @param[in] item
7846  *   Flow pattern to translate.
7847  */
7848 static void
7849 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7850                                 void *matcher, void *key,
7851                                 const struct rte_flow_item *item)
7852 {
7853         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7854         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7855         uint32_t mask, value;
7856
7857         MLX5_ASSERT(tag_v);
7858         value = tag_v->data;
7859         mask = tag_m ? tag_m->data : UINT32_MAX;
7860         if (tag_v->id == REG_C_0) {
7861                 struct mlx5_priv *priv = dev->data->dev_private;
7862                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7863                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7864
7865                 mask &= msk_c0;
7866                 mask <<= shl_c0;
7867                 value <<= shl_c0;
7868         }
7869         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7870 }
7871
7872 /**
7873  * Add TAG item to matcher
7874  *
7875  * @param[in] dev
7876  *   The devich to configure through.
7877  * @param[in, out] matcher
7878  *   Flow matcher.
7879  * @param[in, out] key
7880  *   Flow matcher value.
7881  * @param[in] item
7882  *   Flow pattern to translate.
7883  */
7884 static void
7885 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7886                            void *matcher, void *key,
7887                            const struct rte_flow_item *item)
7888 {
7889         const struct rte_flow_item_tag *tag_v = item->spec;
7890         const struct rte_flow_item_tag *tag_m = item->mask;
7891         enum modify_reg reg;
7892
7893         MLX5_ASSERT(tag_v);
7894         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7895         /* Get the metadata register index for the tag. */
7896         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7897         MLX5_ASSERT(reg > 0);
7898         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7899 }
7900
7901 /**
7902  * Add source vport match to the specified matcher.
7903  *
7904  * @param[in, out] matcher
7905  *   Flow matcher.
7906  * @param[in, out] key
7907  *   Flow matcher value.
7908  * @param[in] port
7909  *   Source vport value to match
7910  * @param[in] mask
7911  *   Mask
7912  */
7913 static void
7914 flow_dv_translate_item_source_vport(void *matcher, void *key,
7915                                     int16_t port, uint16_t mask)
7916 {
7917         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7918         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7919
7920         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7921         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7922 }
7923
7924 /**
7925  * Translate port-id item to eswitch match on  port-id.
7926  *
7927  * @param[in] dev
7928  *   The devich to configure through.
7929  * @param[in, out] matcher
7930  *   Flow matcher.
7931  * @param[in, out] key
7932  *   Flow matcher value.
7933  * @param[in] item
7934  *   Flow pattern to translate.
7935  * @param[in]
7936  *   Flow attributes.
7937  *
7938  * @return
7939  *   0 on success, a negative errno value otherwise.
7940  */
7941 static int
7942 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7943                                void *key, const struct rte_flow_item *item,
7944                                const struct rte_flow_attr *attr)
7945 {
7946         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7947         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7948         struct mlx5_priv *priv;
7949         uint16_t mask, id;
7950
7951         mask = pid_m ? pid_m->id : 0xffff;
7952         id = pid_v ? pid_v->id : dev->data->port_id;
7953         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7954         if (!priv)
7955                 return -rte_errno;
7956         /*
7957          * Translate to vport field or to metadata, depending on mode.
7958          * Kernel can use either misc.source_port or half of C0 metadata
7959          * register.
7960          */
7961         if (priv->vport_meta_mask) {
7962                 /*
7963                  * Provide the hint for SW steering library
7964                  * to insert the flow into ingress domain and
7965                  * save the extra vport match.
7966                  */
7967                 if (mask == 0xffff && priv->vport_id == 0xffff &&
7968                     priv->pf_bond < 0 && attr->transfer)
7969                         flow_dv_translate_item_source_vport
7970                                 (matcher, key, priv->vport_id, mask);
7971                 else
7972                         flow_dv_translate_item_meta_vport
7973                                 (matcher, key,
7974                                  priv->vport_meta_tag,
7975                                  priv->vport_meta_mask);
7976         } else {
7977                 flow_dv_translate_item_source_vport(matcher, key,
7978                                                     priv->vport_id, mask);
7979         }
7980         return 0;
7981 }
7982
7983 /**
7984  * Add ICMP6 item to matcher and to the value.
7985  *
7986  * @param[in, out] matcher
7987  *   Flow matcher.
7988  * @param[in, out] key
7989  *   Flow matcher value.
7990  * @param[in] item
7991  *   Flow pattern to translate.
7992  * @param[in] inner
7993  *   Item is inner pattern.
7994  */
7995 static void
7996 flow_dv_translate_item_icmp6(void *matcher, void *key,
7997                               const struct rte_flow_item *item,
7998                               int inner)
7999 {
8000         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
8001         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
8002         void *headers_m;
8003         void *headers_v;
8004         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8005                                      misc_parameters_3);
8006         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8007         if (inner) {
8008                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8009                                          inner_headers);
8010                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8011         } else {
8012                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8013                                          outer_headers);
8014                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8015         }
8016         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8017         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
8018         if (!icmp6_v)
8019                 return;
8020         if (!icmp6_m)
8021                 icmp6_m = &rte_flow_item_icmp6_mask;
8022         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
8023         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
8024                  icmp6_v->type & icmp6_m->type);
8025         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
8026         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
8027                  icmp6_v->code & icmp6_m->code);
8028 }
8029
8030 /**
8031  * Add ICMP item to matcher and to the value.
8032  *
8033  * @param[in, out] matcher
8034  *   Flow matcher.
8035  * @param[in, out] key
8036  *   Flow matcher value.
8037  * @param[in] item
8038  *   Flow pattern to translate.
8039  * @param[in] inner
8040  *   Item is inner pattern.
8041  */
8042 static void
8043 flow_dv_translate_item_icmp(void *matcher, void *key,
8044                             const struct rte_flow_item *item,
8045                             int inner)
8046 {
8047         const struct rte_flow_item_icmp *icmp_m = item->mask;
8048         const struct rte_flow_item_icmp *icmp_v = item->spec;
8049         uint32_t icmp_header_data_m = 0;
8050         uint32_t icmp_header_data_v = 0;
8051         void *headers_m;
8052         void *headers_v;
8053         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8054                                      misc_parameters_3);
8055         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8056         if (inner) {
8057                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8058                                          inner_headers);
8059                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8060         } else {
8061                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8062                                          outer_headers);
8063                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8064         }
8065         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
8066         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
8067         if (!icmp_v)
8068                 return;
8069         if (!icmp_m)
8070                 icmp_m = &rte_flow_item_icmp_mask;
8071         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
8072                  icmp_m->hdr.icmp_type);
8073         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
8074                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
8075         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
8076                  icmp_m->hdr.icmp_code);
8077         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
8078                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
8079         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
8080         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
8081         if (icmp_header_data_m) {
8082                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
8083                 icmp_header_data_v |=
8084                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
8085                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
8086                          icmp_header_data_m);
8087                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
8088                          icmp_header_data_v & icmp_header_data_m);
8089         }
8090 }
8091
8092 /**
8093  * Add GTP item to matcher and to the value.
8094  *
8095  * @param[in, out] matcher
8096  *   Flow matcher.
8097  * @param[in, out] key
8098  *   Flow matcher value.
8099  * @param[in] item
8100  *   Flow pattern to translate.
8101  * @param[in] inner
8102  *   Item is inner pattern.
8103  */
8104 static void
8105 flow_dv_translate_item_gtp(void *matcher, void *key,
8106                            const struct rte_flow_item *item, int inner)
8107 {
8108         const struct rte_flow_item_gtp *gtp_m = item->mask;
8109         const struct rte_flow_item_gtp *gtp_v = item->spec;
8110         void *headers_m;
8111         void *headers_v;
8112         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8113                                      misc_parameters_3);
8114         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8115         uint16_t dport = RTE_GTPU_UDP_PORT;
8116
8117         if (inner) {
8118                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8119                                          inner_headers);
8120                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
8121         } else {
8122                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
8123                                          outer_headers);
8124                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
8125         }
8126         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
8127                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
8128                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
8129         }
8130         if (!gtp_v)
8131                 return;
8132         if (!gtp_m)
8133                 gtp_m = &rte_flow_item_gtp_mask;
8134         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
8135                  gtp_m->v_pt_rsv_flags);
8136         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
8137                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
8138         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
8139         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
8140                  gtp_v->msg_type & gtp_m->msg_type);
8141         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
8142                  rte_be_to_cpu_32(gtp_m->teid));
8143         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
8144                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
8145 }
8146
8147 /**
8148  * Add GTP PSC item to matcher.
8149  *
8150  * @param[in, out] matcher
8151  *   Flow matcher.
8152  * @param[in, out] key
8153  *   Flow matcher value.
8154  * @param[in] item
8155  *   Flow pattern to translate.
8156  */
8157 static int
8158 flow_dv_translate_item_gtp_psc(void *matcher, void *key,
8159                                const struct rte_flow_item *item)
8160 {
8161         const struct rte_flow_item_gtp_psc *gtp_psc_m = item->mask;
8162         const struct rte_flow_item_gtp_psc *gtp_psc_v = item->spec;
8163         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
8164                         misc_parameters_3);
8165         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
8166         union {
8167                 uint32_t w32;
8168                 struct {
8169                         uint16_t seq_num;
8170                         uint8_t npdu_num;
8171                         uint8_t next_ext_header_type;
8172                 };
8173         } dw_2;
8174         uint8_t gtp_flags;
8175
8176         /* Always set E-flag match on one, regardless of GTP item settings. */
8177         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_m, gtpu_msg_flags);
8178         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8179         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags, gtp_flags);
8180         gtp_flags = MLX5_GET(fte_match_set_misc3, misc3_v, gtpu_msg_flags);
8181         gtp_flags |= MLX5_GTP_EXT_HEADER_FLAG;
8182         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags, gtp_flags);
8183         /*Set next extension header type. */
8184         dw_2.seq_num = 0;
8185         dw_2.npdu_num = 0;
8186         dw_2.next_ext_header_type = 0xff;
8187         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_dw_2,
8188                  rte_cpu_to_be_32(dw_2.w32));
8189         dw_2.seq_num = 0;
8190         dw_2.npdu_num = 0;
8191         dw_2.next_ext_header_type = 0x85;
8192         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_dw_2,
8193                  rte_cpu_to_be_32(dw_2.w32));
8194         if (gtp_psc_v) {
8195                 union {
8196                         uint32_t w32;
8197                         struct {
8198                                 uint8_t len;
8199                                 uint8_t type_flags;
8200                                 uint8_t qfi;
8201                                 uint8_t reserved;
8202                         };
8203                 } dw_0;
8204
8205                 /*Set extension header PDU type and Qos. */
8206                 if (!gtp_psc_m)
8207                         gtp_psc_m = &rte_flow_item_gtp_psc_mask;
8208                 dw_0.w32 = 0;
8209                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_m->pdu_type);
8210                 dw_0.qfi = gtp_psc_m->qfi;
8211                 MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_first_ext_dw_0,
8212                          rte_cpu_to_be_32(dw_0.w32));
8213                 dw_0.w32 = 0;
8214                 dw_0.type_flags = MLX5_GTP_PDU_TYPE_SHIFT(gtp_psc_v->pdu_type &
8215                                                         gtp_psc_m->pdu_type);
8216                 dw_0.qfi = gtp_psc_v->qfi & gtp_psc_m->qfi;
8217                 MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_first_ext_dw_0,
8218                          rte_cpu_to_be_32(dw_0.w32));
8219         }
8220         return 0;
8221 }
8222
8223 /**
8224  * Add eCPRI item to matcher and to the value.
8225  *
8226  * @param[in] dev
8227  *   The devich to configure through.
8228  * @param[in, out] matcher
8229  *   Flow matcher.
8230  * @param[in, out] key
8231  *   Flow matcher value.
8232  * @param[in] item
8233  *   Flow pattern to translate.
8234  * @param[in] samples
8235  *   Sample IDs to be used in the matching.
8236  */
8237 static void
8238 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
8239                              void *key, const struct rte_flow_item *item)
8240 {
8241         struct mlx5_priv *priv = dev->data->dev_private;
8242         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
8243         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
8244         struct rte_ecpri_common_hdr common;
8245         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
8246                                      misc_parameters_4);
8247         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
8248         uint32_t *samples;
8249         void *dw_m;
8250         void *dw_v;
8251
8252         if (!ecpri_v)
8253                 return;
8254         if (!ecpri_m)
8255                 ecpri_m = &rte_flow_item_ecpri_mask;
8256         /*
8257          * Maximal four DW samples are supported in a single matching now.
8258          * Two are used now for a eCPRI matching:
8259          * 1. Type: one byte, mask should be 0x00ff0000 in network order
8260          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
8261          *    if any.
8262          */
8263         if (!ecpri_m->hdr.common.u32)
8264                 return;
8265         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
8266         /* Need to take the whole DW as the mask to fill the entry. */
8267         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
8268                             prog_sample_field_value_0);
8269         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
8270                             prog_sample_field_value_0);
8271         /* Already big endian (network order) in the header. */
8272         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
8273         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
8274         /* Sample#0, used for matching type, offset 0. */
8275         MLX5_SET(fte_match_set_misc4, misc4_m,
8276                  prog_sample_field_id_0, samples[0]);
8277         /* It makes no sense to set the sample ID in the mask field. */
8278         MLX5_SET(fte_match_set_misc4, misc4_v,
8279                  prog_sample_field_id_0, samples[0]);
8280         /*
8281          * Checking if message body part needs to be matched.
8282          * Some wildcard rules only matching type field should be supported.
8283          */
8284         if (ecpri_m->hdr.dummy[0]) {
8285                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
8286                 switch (common.type) {
8287                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
8288                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
8289                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
8290                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
8291                                             prog_sample_field_value_1);
8292                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
8293                                             prog_sample_field_value_1);
8294                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
8295                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
8296                                             ecpri_m->hdr.dummy[0];
8297                         /* Sample#1, to match message body, offset 4. */
8298                         MLX5_SET(fte_match_set_misc4, misc4_m,
8299                                  prog_sample_field_id_1, samples[1]);
8300                         MLX5_SET(fte_match_set_misc4, misc4_v,
8301                                  prog_sample_field_id_1, samples[1]);
8302                         break;
8303                 default:
8304                         /* Others, do not match any sample ID. */
8305                         break;
8306                 }
8307         }
8308 }
8309
8310 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
8311
8312 #define HEADER_IS_ZERO(match_criteria, headers)                              \
8313         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
8314                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
8315
8316 /**
8317  * Calculate flow matcher enable bitmap.
8318  *
8319  * @param match_criteria
8320  *   Pointer to flow matcher criteria.
8321  *
8322  * @return
8323  *   Bitmap of enabled fields.
8324  */
8325 static uint8_t
8326 flow_dv_matcher_enable(uint32_t *match_criteria)
8327 {
8328         uint8_t match_criteria_enable;
8329
8330         match_criteria_enable =
8331                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
8332                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
8333         match_criteria_enable |=
8334                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
8335                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
8336         match_criteria_enable |=
8337                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
8338                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
8339         match_criteria_enable |=
8340                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
8341                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
8342         match_criteria_enable |=
8343                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
8344                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
8345         match_criteria_enable |=
8346                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
8347                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
8348         return match_criteria_enable;
8349 }
8350
8351 struct mlx5_hlist_entry *
8352 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
8353 {
8354         struct mlx5_dev_ctx_shared *sh = list->ctx;
8355         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8356         struct rte_eth_dev *dev = ctx->dev;
8357         struct mlx5_flow_tbl_data_entry *tbl_data;
8358         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
8359         struct rte_flow_error *error = ctx->error;
8360         union mlx5_flow_tbl_key key = { .v64 = key64 };
8361         struct mlx5_flow_tbl_resource *tbl;
8362         void *domain;
8363         uint32_t idx = 0;
8364         int ret;
8365
8366         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
8367         if (!tbl_data) {
8368                 rte_flow_error_set(error, ENOMEM,
8369                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8370                                    NULL,
8371                                    "cannot allocate flow table data entry");
8372                 return NULL;
8373         }
8374         tbl_data->idx = idx;
8375         tbl_data->tunnel = tt_prm->tunnel;
8376         tbl_data->group_id = tt_prm->group_id;
8377         tbl_data->external = !!tt_prm->external;
8378         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
8379         tbl_data->is_egress = !!key.direction;
8380         tbl_data->is_transfer = !!key.domain;
8381         tbl_data->dummy = !!key.dummy;
8382         tbl_data->table_id = key.table_id;
8383         tbl = &tbl_data->tbl;
8384         if (key.dummy)
8385                 return &tbl_data->entry;
8386         if (key.domain)
8387                 domain = sh->fdb_domain;
8388         else if (key.direction)
8389                 domain = sh->tx_domain;
8390         else
8391                 domain = sh->rx_domain;
8392         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
8393         if (ret) {
8394                 rte_flow_error_set(error, ENOMEM,
8395                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8396                                    NULL, "cannot create flow table object");
8397                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8398                 return NULL;
8399         }
8400         if (key.table_id) {
8401                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
8402                                         (tbl->obj, &tbl_data->jump.action);
8403                 if (ret) {
8404                         rte_flow_error_set(error, ENOMEM,
8405                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8406                                            NULL,
8407                                            "cannot create flow jump action");
8408                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
8409                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8410                         return NULL;
8411                 }
8412         }
8413         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
8414               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
8415               key.table_id);
8416         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8417                              flow_dv_matcher_create_cb,
8418                              flow_dv_matcher_match_cb,
8419                              flow_dv_matcher_remove_cb);
8420         return &tbl_data->entry;
8421 }
8422
8423 int
8424 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
8425                      struct mlx5_hlist_entry *entry, uint64_t key64,
8426                      void *cb_ctx __rte_unused)
8427 {
8428         struct mlx5_flow_tbl_data_entry *tbl_data =
8429                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8430         union mlx5_flow_tbl_key key = { .v64 = key64 };
8431
8432         return tbl_data->table_id != key.table_id ||
8433                tbl_data->dummy != key.dummy ||
8434                tbl_data->is_transfer != key.domain ||
8435                tbl_data->is_egress != key.direction;
8436 }
8437
8438 /**
8439  * Get a flow table.
8440  *
8441  * @param[in, out] dev
8442  *   Pointer to rte_eth_dev structure.
8443  * @param[in] table_id
8444  *   Table id to use.
8445  * @param[in] egress
8446  *   Direction of the table.
8447  * @param[in] transfer
8448  *   E-Switch or NIC flow.
8449  * @param[in] dummy
8450  *   Dummy entry for dv API.
8451  * @param[out] error
8452  *   pointer to error structure.
8453  *
8454  * @return
8455  *   Returns tables resource based on the index, NULL in case of failed.
8456  */
8457 struct mlx5_flow_tbl_resource *
8458 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8459                          uint32_t table_id, uint8_t egress,
8460                          uint8_t transfer,
8461                          bool external,
8462                          const struct mlx5_flow_tunnel *tunnel,
8463                          uint32_t group_id, uint8_t dummy,
8464                          struct rte_flow_error *error)
8465 {
8466         struct mlx5_priv *priv = dev->data->dev_private;
8467         union mlx5_flow_tbl_key table_key = {
8468                 {
8469                         .table_id = table_id,
8470                         .dummy = dummy,
8471                         .domain = !!transfer,
8472                         .direction = !!egress,
8473                 }
8474         };
8475         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8476                 .tunnel = tunnel,
8477                 .group_id = group_id,
8478                 .external = external,
8479         };
8480         struct mlx5_flow_cb_ctx ctx = {
8481                 .dev = dev,
8482                 .error = error,
8483                 .data = &tt_prm,
8484         };
8485         struct mlx5_hlist_entry *entry;
8486         struct mlx5_flow_tbl_data_entry *tbl_data;
8487
8488         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8489         if (!entry) {
8490                 rte_flow_error_set(error, ENOMEM,
8491                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8492                                    "cannot get table");
8493                 return NULL;
8494         }
8495         DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
8496                 table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
8497         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8498         return &tbl_data->tbl;
8499 }
8500
8501 void
8502 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8503                       struct mlx5_hlist_entry *entry)
8504 {
8505         struct mlx5_dev_ctx_shared *sh = list->ctx;
8506         struct mlx5_flow_tbl_data_entry *tbl_data =
8507                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8508
8509         MLX5_ASSERT(entry && sh);
8510         if (tbl_data->jump.action)
8511                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8512         if (tbl_data->tbl.obj)
8513                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8514         if (tbl_data->tunnel_offload && tbl_data->external) {
8515                 struct mlx5_hlist_entry *he;
8516                 struct mlx5_hlist *tunnel_grp_hash;
8517                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8518                 union tunnel_tbl_key tunnel_key = {
8519                         .tunnel_id = tbl_data->tunnel ?
8520                                         tbl_data->tunnel->tunnel_id : 0,
8521                         .group = tbl_data->group_id
8522                 };
8523                 uint32_t table_id = tbl_data->table_id;
8524
8525                 tunnel_grp_hash = tbl_data->tunnel ?
8526                                         tbl_data->tunnel->groups :
8527                                         thub->groups;
8528                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8529                 if (he)
8530                         mlx5_hlist_unregister(tunnel_grp_hash, he);
8531                 DRV_LOG(DEBUG,
8532                         "Table_id %u tunnel %u group %u released.",
8533                         table_id,
8534                         tbl_data->tunnel ?
8535                         tbl_data->tunnel->tunnel_id : 0,
8536                         tbl_data->group_id);
8537         }
8538         mlx5_cache_list_destroy(&tbl_data->matchers);
8539         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8540 }
8541
8542 /**
8543  * Release a flow table.
8544  *
8545  * @param[in] sh
8546  *   Pointer to device shared structure.
8547  * @param[in] tbl
8548  *   Table resource to be released.
8549  *
8550  * @return
8551  *   Returns 0 if table was released, else return 1;
8552  */
8553 static int
8554 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8555                              struct mlx5_flow_tbl_resource *tbl)
8556 {
8557         struct mlx5_flow_tbl_data_entry *tbl_data =
8558                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8559
8560         if (!tbl)
8561                 return 0;
8562         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8563 }
8564
8565 int
8566 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8567                          struct mlx5_cache_entry *entry, void *cb_ctx)
8568 {
8569         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8570         struct mlx5_flow_dv_matcher *ref = ctx->data;
8571         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8572                                                         entry);
8573
8574         return cur->crc != ref->crc ||
8575                cur->priority != ref->priority ||
8576                memcmp((const void *)cur->mask.buf,
8577                       (const void *)ref->mask.buf, ref->mask.size);
8578 }
8579
8580 struct mlx5_cache_entry *
8581 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8582                           struct mlx5_cache_entry *entry __rte_unused,
8583                           void *cb_ctx)
8584 {
8585         struct mlx5_dev_ctx_shared *sh = list->ctx;
8586         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8587         struct mlx5_flow_dv_matcher *ref = ctx->data;
8588         struct mlx5_flow_dv_matcher *cache;
8589         struct mlx5dv_flow_matcher_attr dv_attr = {
8590                 .type = IBV_FLOW_ATTR_NORMAL,
8591                 .match_mask = (void *)&ref->mask,
8592         };
8593         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8594                                                             typeof(*tbl), tbl);
8595         int ret;
8596
8597         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8598         if (!cache) {
8599                 rte_flow_error_set(ctx->error, ENOMEM,
8600                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8601                                    "cannot create matcher");
8602                 return NULL;
8603         }
8604         *cache = *ref;
8605         dv_attr.match_criteria_enable =
8606                 flow_dv_matcher_enable(cache->mask.buf);
8607         dv_attr.priority = ref->priority;
8608         if (tbl->is_egress)
8609                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8610         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8611                                                &cache->matcher_object);
8612         if (ret) {
8613                 mlx5_free(cache);
8614                 rte_flow_error_set(ctx->error, ENOMEM,
8615                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8616                                    "cannot create matcher");
8617                 return NULL;
8618         }
8619         return &cache->entry;
8620 }
8621
8622 /**
8623  * Register the flow matcher.
8624  *
8625  * @param[in, out] dev
8626  *   Pointer to rte_eth_dev structure.
8627  * @param[in, out] matcher
8628  *   Pointer to flow matcher.
8629  * @param[in, out] key
8630  *   Pointer to flow table key.
8631  * @parm[in, out] dev_flow
8632  *   Pointer to the dev_flow.
8633  * @param[out] error
8634  *   pointer to error structure.
8635  *
8636  * @return
8637  *   0 on success otherwise -errno and errno is set.
8638  */
8639 static int
8640 flow_dv_matcher_register(struct rte_eth_dev *dev,
8641                          struct mlx5_flow_dv_matcher *ref,
8642                          union mlx5_flow_tbl_key *key,
8643                          struct mlx5_flow *dev_flow,
8644                          const struct mlx5_flow_tunnel *tunnel,
8645                          uint32_t group_id,
8646                          struct rte_flow_error *error)
8647 {
8648         struct mlx5_cache_entry *entry;
8649         struct mlx5_flow_dv_matcher *cache;
8650         struct mlx5_flow_tbl_resource *tbl;
8651         struct mlx5_flow_tbl_data_entry *tbl_data;
8652         struct mlx5_flow_cb_ctx ctx = {
8653                 .error = error,
8654                 .data = ref,
8655         };
8656
8657         /**
8658          * tunnel offload API requires this registration for cases when
8659          * tunnel match rule was inserted before tunnel set rule.
8660          */
8661         tbl = flow_dv_tbl_resource_get(dev, key->table_id,
8662                                        key->direction, key->domain,
8663                                        dev_flow->external, tunnel,
8664                                        group_id, 0, error);
8665         if (!tbl)
8666                 return -rte_errno;      /* No need to refill the error info */
8667         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8668         ref->tbl = tbl;
8669         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8670         if (!entry) {
8671                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8672                 return rte_flow_error_set(error, ENOMEM,
8673                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8674                                           "cannot allocate ref memory");
8675         }
8676         cache = container_of(entry, typeof(*cache), entry);
8677         dev_flow->handle->dvh.matcher = cache;
8678         return 0;
8679 }
8680
8681 struct mlx5_hlist_entry *
8682 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8683 {
8684         struct mlx5_dev_ctx_shared *sh = list->ctx;
8685         struct rte_flow_error *error = ctx;
8686         struct mlx5_flow_dv_tag_resource *entry;
8687         uint32_t idx = 0;
8688         int ret;
8689
8690         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8691         if (!entry) {
8692                 rte_flow_error_set(error, ENOMEM,
8693                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8694                                    "cannot allocate resource memory");
8695                 return NULL;
8696         }
8697         entry->idx = idx;
8698         entry->tag_id = key;
8699         ret = mlx5_flow_os_create_flow_action_tag(key,
8700                                                   &entry->action);
8701         if (ret) {
8702                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8703                 rte_flow_error_set(error, ENOMEM,
8704                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8705                                    NULL, "cannot create action");
8706                 return NULL;
8707         }
8708         return &entry->entry;
8709 }
8710
8711 int
8712 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
8713                      struct mlx5_hlist_entry *entry, uint64_t key,
8714                      void *cb_ctx __rte_unused)
8715 {
8716         struct mlx5_flow_dv_tag_resource *tag =
8717                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8718
8719         return key != tag->tag_id;
8720 }
8721
8722 /**
8723  * Find existing tag resource or create and register a new one.
8724  *
8725  * @param dev[in, out]
8726  *   Pointer to rte_eth_dev structure.
8727  * @param[in, out] tag_be24
8728  *   Tag value in big endian then R-shift 8.
8729  * @parm[in, out] dev_flow
8730  *   Pointer to the dev_flow.
8731  * @param[out] error
8732  *   pointer to error structure.
8733  *
8734  * @return
8735  *   0 on success otherwise -errno and errno is set.
8736  */
8737 static int
8738 flow_dv_tag_resource_register
8739                         (struct rte_eth_dev *dev,
8740                          uint32_t tag_be24,
8741                          struct mlx5_flow *dev_flow,
8742                          struct rte_flow_error *error)
8743 {
8744         struct mlx5_priv *priv = dev->data->dev_private;
8745         struct mlx5_flow_dv_tag_resource *cache_resource;
8746         struct mlx5_hlist_entry *entry;
8747
8748         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8749         if (entry) {
8750                 cache_resource = container_of
8751                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8752                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8753                 dev_flow->dv.tag_resource = cache_resource;
8754                 return 0;
8755         }
8756         return -rte_errno;
8757 }
8758
8759 void
8760 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8761                       struct mlx5_hlist_entry *entry)
8762 {
8763         struct mlx5_dev_ctx_shared *sh = list->ctx;
8764         struct mlx5_flow_dv_tag_resource *tag =
8765                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8766
8767         MLX5_ASSERT(tag && sh && tag->action);
8768         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8769         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8770         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8771 }
8772
8773 /**
8774  * Release the tag.
8775  *
8776  * @param dev
8777  *   Pointer to Ethernet device.
8778  * @param tag_idx
8779  *   Tag index.
8780  *
8781  * @return
8782  *   1 while a reference on it exists, 0 when freed.
8783  */
8784 static int
8785 flow_dv_tag_release(struct rte_eth_dev *dev,
8786                     uint32_t tag_idx)
8787 {
8788         struct mlx5_priv *priv = dev->data->dev_private;
8789         struct mlx5_flow_dv_tag_resource *tag;
8790
8791         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8792         if (!tag)
8793                 return 0;
8794         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8795                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8796         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8797 }
8798
8799 /**
8800  * Translate port ID action to vport.
8801  *
8802  * @param[in] dev
8803  *   Pointer to rte_eth_dev structure.
8804  * @param[in] action
8805  *   Pointer to the port ID action.
8806  * @param[out] dst_port_id
8807  *   The target port ID.
8808  * @param[out] error
8809  *   Pointer to the error structure.
8810  *
8811  * @return
8812  *   0 on success, a negative errno value otherwise and rte_errno is set.
8813  */
8814 static int
8815 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8816                                  const struct rte_flow_action *action,
8817                                  uint32_t *dst_port_id,
8818                                  struct rte_flow_error *error)
8819 {
8820         uint32_t port;
8821         struct mlx5_priv *priv;
8822         const struct rte_flow_action_port_id *conf =
8823                         (const struct rte_flow_action_port_id *)action->conf;
8824
8825         port = conf->original ? dev->data->port_id : conf->id;
8826         priv = mlx5_port_to_eswitch_info(port, false);
8827         if (!priv)
8828                 return rte_flow_error_set(error, -rte_errno,
8829                                           RTE_FLOW_ERROR_TYPE_ACTION,
8830                                           NULL,
8831                                           "No eswitch info was found for port");
8832 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8833         /*
8834          * This parameter is transferred to
8835          * mlx5dv_dr_action_create_dest_ib_port().
8836          */
8837         *dst_port_id = priv->dev_port;
8838 #else
8839         /*
8840          * Legacy mode, no LAG configurations is supported.
8841          * This parameter is transferred to
8842          * mlx5dv_dr_action_create_dest_vport().
8843          */
8844         *dst_port_id = priv->vport_id;
8845 #endif
8846         return 0;
8847 }
8848
8849 /**
8850  * Create a counter with aging configuration.
8851  *
8852  * @param[in] dev
8853  *   Pointer to rte_eth_dev structure.
8854  * @param[out] count
8855  *   Pointer to the counter action configuration.
8856  * @param[in] age
8857  *   Pointer to the aging action configuration.
8858  *
8859  * @return
8860  *   Index to flow counter on success, 0 otherwise.
8861  */
8862 static uint32_t
8863 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8864                                 struct mlx5_flow *dev_flow,
8865                                 const struct rte_flow_action_count *count,
8866                                 const struct rte_flow_action_age *age)
8867 {
8868         uint32_t counter;
8869         struct mlx5_age_param *age_param;
8870
8871         if (count && count->shared)
8872                 counter = flow_dv_counter_get_shared(dev, count->id);
8873         else
8874                 counter = flow_dv_counter_alloc(dev, !!age);
8875         if (!counter || age == NULL)
8876                 return counter;
8877         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8878         age_param->context = age->context ? age->context :
8879                 (void *)(uintptr_t)(dev_flow->flow_idx);
8880         age_param->timeout = age->timeout;
8881         age_param->port_id = dev->data->port_id;
8882         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8883         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8884         return counter;
8885 }
8886
8887 /**
8888  * Add Tx queue matcher
8889  *
8890  * @param[in] dev
8891  *   Pointer to the dev struct.
8892  * @param[in, out] matcher
8893  *   Flow matcher.
8894  * @param[in, out] key
8895  *   Flow matcher value.
8896  * @param[in] item
8897  *   Flow pattern to translate.
8898  * @param[in] inner
8899  *   Item is inner pattern.
8900  */
8901 static void
8902 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8903                                 void *matcher, void *key,
8904                                 const struct rte_flow_item *item)
8905 {
8906         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8907         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8908         void *misc_m =
8909                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8910         void *misc_v =
8911                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8912         struct mlx5_txq_ctrl *txq;
8913         uint32_t queue;
8914
8915
8916         queue_m = (const void *)item->mask;
8917         if (!queue_m)
8918                 return;
8919         queue_v = (const void *)item->spec;
8920         if (!queue_v)
8921                 return;
8922         txq = mlx5_txq_get(dev, queue_v->queue);
8923         if (!txq)
8924                 return;
8925         queue = txq->obj->sq->id;
8926         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8927         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8928                  queue & queue_m->queue);
8929         mlx5_txq_release(dev, queue_v->queue);
8930 }
8931
8932 /**
8933  * Set the hash fields according to the @p flow information.
8934  *
8935  * @param[in] dev_flow
8936  *   Pointer to the mlx5_flow.
8937  * @param[in] rss_desc
8938  *   Pointer to the mlx5_flow_rss_desc.
8939  */
8940 static void
8941 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8942                        struct mlx5_flow_rss_desc *rss_desc)
8943 {
8944         uint64_t items = dev_flow->handle->layers;
8945         int rss_inner = 0;
8946         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8947
8948         dev_flow->hash_fields = 0;
8949 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8950         if (rss_desc->level >= 2) {
8951                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8952                 rss_inner = 1;
8953         }
8954 #endif
8955         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8956             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8957                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8958                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8959                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8960                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8961                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8962                         else
8963                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8964                 }
8965         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8966                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8967                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8968                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8969                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8970                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8971                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8972                         else
8973                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8974                 }
8975         }
8976         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8977             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8978                 if (rss_types & ETH_RSS_UDP) {
8979                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8980                                 dev_flow->hash_fields |=
8981                                                 IBV_RX_HASH_SRC_PORT_UDP;
8982                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8983                                 dev_flow->hash_fields |=
8984                                                 IBV_RX_HASH_DST_PORT_UDP;
8985                         else
8986                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8987                 }
8988         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8989                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8990                 if (rss_types & ETH_RSS_TCP) {
8991                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8992                                 dev_flow->hash_fields |=
8993                                                 IBV_RX_HASH_SRC_PORT_TCP;
8994                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8995                                 dev_flow->hash_fields |=
8996                                                 IBV_RX_HASH_DST_PORT_TCP;
8997                         else
8998                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8999                 }
9000         }
9001 }
9002
9003 /**
9004  * Prepare an Rx Hash queue.
9005  *
9006  * @param dev
9007  *   Pointer to Ethernet device.
9008  * @param[in] dev_flow
9009  *   Pointer to the mlx5_flow.
9010  * @param[in] rss_desc
9011  *   Pointer to the mlx5_flow_rss_desc.
9012  * @param[out] hrxq_idx
9013  *   Hash Rx queue index.
9014  *
9015  * @return
9016  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
9017  */
9018 static struct mlx5_hrxq *
9019 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
9020                      struct mlx5_flow *dev_flow,
9021                      struct mlx5_flow_rss_desc *rss_desc,
9022                      uint32_t *hrxq_idx)
9023 {
9024         struct mlx5_priv *priv = dev->data->dev_private;
9025         struct mlx5_flow_handle *dh = dev_flow->handle;
9026         struct mlx5_hrxq *hrxq;
9027
9028         MLX5_ASSERT(rss_desc->queue_num);
9029         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
9030         rss_desc->hash_fields = dev_flow->hash_fields;
9031         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
9032         rss_desc->shared_rss = 0;
9033         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
9034         if (!*hrxq_idx)
9035                 return NULL;
9036         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
9037                               *hrxq_idx);
9038         return hrxq;
9039 }
9040
9041 /**
9042  * Release sample sub action resource.
9043  *
9044  * @param[in, out] dev
9045  *   Pointer to rte_eth_dev structure.
9046  * @param[in] act_res
9047  *   Pointer to sample sub action resource.
9048  */
9049 static void
9050 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
9051                                    struct mlx5_flow_sub_actions_idx *act_res)
9052 {
9053         if (act_res->rix_hrxq) {
9054                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
9055                 act_res->rix_hrxq = 0;
9056         }
9057         if (act_res->rix_encap_decap) {
9058                 flow_dv_encap_decap_resource_release(dev,
9059                                                      act_res->rix_encap_decap);
9060                 act_res->rix_encap_decap = 0;
9061         }
9062         if (act_res->rix_port_id_action) {
9063                 flow_dv_port_id_action_resource_release(dev,
9064                                                 act_res->rix_port_id_action);
9065                 act_res->rix_port_id_action = 0;
9066         }
9067         if (act_res->rix_tag) {
9068                 flow_dv_tag_release(dev, act_res->rix_tag);
9069                 act_res->rix_tag = 0;
9070         }
9071         if (act_res->cnt) {
9072                 flow_dv_counter_free(dev, act_res->cnt);
9073                 act_res->cnt = 0;
9074         }
9075 }
9076
9077 int
9078 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
9079                         struct mlx5_cache_entry *entry, void *cb_ctx)
9080 {
9081         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9082         struct rte_eth_dev *dev = ctx->dev;
9083         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9084         struct mlx5_flow_dv_sample_resource *cache_resource =
9085                         container_of(entry, typeof(*cache_resource), entry);
9086
9087         if (resource->ratio == cache_resource->ratio &&
9088             resource->ft_type == cache_resource->ft_type &&
9089             resource->ft_id == cache_resource->ft_id &&
9090             resource->set_action == cache_resource->set_action &&
9091             !memcmp((void *)&resource->sample_act,
9092                     (void *)&cache_resource->sample_act,
9093                     sizeof(struct mlx5_flow_sub_actions_list))) {
9094                 /*
9095                  * Existing sample action should release the prepared
9096                  * sub-actions reference counter.
9097                  */
9098                 flow_dv_sample_sub_actions_release(dev,
9099                                                 &resource->sample_idx);
9100                 return 0;
9101         }
9102         return 1;
9103 }
9104
9105 struct mlx5_cache_entry *
9106 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
9107                          struct mlx5_cache_entry *entry __rte_unused,
9108                          void *cb_ctx)
9109 {
9110         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9111         struct rte_eth_dev *dev = ctx->dev;
9112         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
9113         void **sample_dv_actions = resource->sub_actions;
9114         struct mlx5_flow_dv_sample_resource *cache_resource;
9115         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
9116         struct mlx5_priv *priv = dev->data->dev_private;
9117         struct mlx5_dev_ctx_shared *sh = priv->sh;
9118         struct mlx5_flow_tbl_resource *tbl;
9119         uint32_t idx = 0;
9120         const uint32_t next_ft_step = 1;
9121         uint32_t next_ft_id = resource->ft_id + next_ft_step;
9122         uint8_t is_egress = 0;
9123         uint8_t is_transfer = 0;
9124         struct rte_flow_error *error = ctx->error;
9125
9126         /* Register new sample resource. */
9127         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
9128         if (!cache_resource) {
9129                 rte_flow_error_set(error, ENOMEM,
9130                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9131                                           NULL,
9132                                           "cannot allocate resource memory");
9133                 return NULL;
9134         }
9135         *cache_resource = *resource;
9136         /* Create normal path table level */
9137         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
9138                 is_transfer = 1;
9139         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
9140                 is_egress = 1;
9141         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
9142                                         is_egress, is_transfer,
9143                                         true, NULL, 0, 0, error);
9144         if (!tbl) {
9145                 rte_flow_error_set(error, ENOMEM,
9146                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9147                                           NULL,
9148                                           "fail to create normal path table "
9149                                           "for sample");
9150                 goto error;
9151         }
9152         int ret;
9153
9154         cache_resource->normal_path_tbl = tbl;
9155         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
9156                 ret = mlx5_flow_os_create_flow_action_default_miss
9157                         (&cache_resource->default_miss);
9158                 if (!ret) {
9159                         rte_flow_error_set(error, ENOMEM,
9160                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9161                                                 NULL,
9162                                                 "cannot create default miss "
9163                                                 "action");
9164                         goto error;
9165                 }
9166                 sample_dv_actions[resource->sample_act.actions_num++] =
9167                                                 cache_resource->default_miss;
9168         }
9169         /* Create a DR sample action */
9170         sampler_attr.sample_ratio = cache_resource->ratio;
9171         sampler_attr.default_next_table = tbl->obj;
9172         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
9173         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
9174                                                         &sample_dv_actions[0];
9175         sampler_attr.action = cache_resource->set_action;
9176         if (mlx5_os_flow_dr_create_flow_action_sampler
9177                         (&sampler_attr, &cache_resource->verbs_action)) {
9178                 rte_flow_error_set(error, ENOMEM,
9179                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9180                                         NULL, "cannot create sample action");
9181                 goto error;
9182         }
9183         cache_resource->idx = idx;
9184         cache_resource->dev = dev;
9185         return &cache_resource->entry;
9186 error:
9187         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
9188             cache_resource->default_miss)
9189                 claim_zero(mlx5_flow_os_destroy_flow_action
9190                                 (cache_resource->default_miss));
9191         else
9192                 flow_dv_sample_sub_actions_release(dev,
9193                                                    &cache_resource->sample_idx);
9194         if (cache_resource->normal_path_tbl)
9195                 flow_dv_tbl_resource_release(MLX5_SH(dev),
9196                                 cache_resource->normal_path_tbl);
9197         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
9198         return NULL;
9199
9200 }
9201
9202 /**
9203  * Find existing sample resource or create and register a new one.
9204  *
9205  * @param[in, out] dev
9206  *   Pointer to rte_eth_dev structure.
9207  * @param[in] resource
9208  *   Pointer to sample resource.
9209  * @parm[in, out] dev_flow
9210  *   Pointer to the dev_flow.
9211  * @param[out] error
9212  *   pointer to error structure.
9213  *
9214  * @return
9215  *   0 on success otherwise -errno and errno is set.
9216  */
9217 static int
9218 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
9219                          struct mlx5_flow_dv_sample_resource *resource,
9220                          struct mlx5_flow *dev_flow,
9221                          struct rte_flow_error *error)
9222 {
9223         struct mlx5_flow_dv_sample_resource *cache_resource;
9224         struct mlx5_cache_entry *entry;
9225         struct mlx5_priv *priv = dev->data->dev_private;
9226         struct mlx5_flow_cb_ctx ctx = {
9227                 .dev = dev,
9228                 .error = error,
9229                 .data = resource,
9230         };
9231
9232         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
9233         if (!entry)
9234                 return -rte_errno;
9235         cache_resource = container_of(entry, typeof(*cache_resource), entry);
9236         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
9237         dev_flow->dv.sample_res = cache_resource;
9238         return 0;
9239 }
9240
9241 int
9242 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
9243                             struct mlx5_cache_entry *entry, void *cb_ctx)
9244 {
9245         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9246         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
9247         struct rte_eth_dev *dev = ctx->dev;
9248         struct mlx5_flow_dv_dest_array_resource *cache_resource =
9249                         container_of(entry, typeof(*cache_resource), entry);
9250         uint32_t idx = 0;
9251
9252         if (resource->num_of_dest == cache_resource->num_of_dest &&
9253             resource->ft_type == cache_resource->ft_type &&
9254             !memcmp((void *)cache_resource->sample_act,
9255                     (void *)resource->sample_act,
9256                    (resource->num_of_dest *
9257                    sizeof(struct mlx5_flow_sub_actions_list)))) {
9258                 /*
9259                  * Existing sample action should release the prepared
9260                  * sub-actions reference counter.
9261                  */
9262                 for (idx = 0; idx < resource->num_of_dest; idx++)
9263                         flow_dv_sample_sub_actions_release(dev,
9264                                         &resource->sample_idx[idx]);
9265                 return 0;
9266         }
9267         return 1;
9268 }
9269
9270 struct mlx5_cache_entry *
9271 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
9272                          struct mlx5_cache_entry *entry __rte_unused,
9273                          void *cb_ctx)
9274 {
9275         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
9276         struct rte_eth_dev *dev = ctx->dev;
9277         struct mlx5_flow_dv_dest_array_resource *cache_resource;
9278         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
9279         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
9280         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
9281         struct mlx5_priv *priv = dev->data->dev_private;
9282         struct mlx5_dev_ctx_shared *sh = priv->sh;
9283         struct mlx5_flow_sub_actions_list *sample_act;
9284         struct mlx5dv_dr_domain *domain;
9285         uint32_t idx = 0, res_idx = 0;
9286         struct rte_flow_error *error = ctx->error;
9287         int ret;
9288
9289         /* Register new destination array resource. */
9290         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
9291                                             &res_idx);
9292         if (!cache_resource) {
9293                 rte_flow_error_set(error, ENOMEM,
9294                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9295                                           NULL,
9296                                           "cannot allocate resource memory");
9297                 return NULL;
9298         }
9299         *cache_resource = *resource;
9300         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
9301                 domain = sh->fdb_domain;
9302         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
9303                 domain = sh->rx_domain;
9304         else
9305                 domain = sh->tx_domain;
9306         for (idx = 0; idx < resource->num_of_dest; idx++) {
9307                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
9308                                  mlx5_malloc(MLX5_MEM_ZERO,
9309                                  sizeof(struct mlx5dv_dr_action_dest_attr),
9310                                  0, SOCKET_ID_ANY);
9311                 if (!dest_attr[idx]) {
9312                         rte_flow_error_set(error, ENOMEM,
9313                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9314                                            NULL,
9315                                            "cannot allocate resource memory");
9316                         goto error;
9317                 }
9318                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
9319                 sample_act = &resource->sample_act[idx];
9320                 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
9321                         dest_attr[idx]->dest = sample_act->dr_queue_action;
9322                 } else if (sample_act->action_flags ==
9323                           (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
9324                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
9325                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
9326                         dest_attr[idx]->dest_reformat->reformat =
9327                                         sample_act->dr_encap_action;
9328                         dest_attr[idx]->dest_reformat->dest =
9329                                         sample_act->dr_port_id_action;
9330                 } else if (sample_act->action_flags ==
9331                            MLX5_FLOW_ACTION_PORT_ID) {
9332                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
9333                 }
9334         }
9335         /* create a dest array actioin */
9336         ret = mlx5_os_flow_dr_create_flow_action_dest_array
9337                                                 (domain,
9338                                                  cache_resource->num_of_dest,
9339                                                  dest_attr,
9340                                                  &cache_resource->action);
9341         if (ret) {
9342                 rte_flow_error_set(error, ENOMEM,
9343                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9344                                    NULL,
9345                                    "cannot create destination array action");
9346                 goto error;
9347         }
9348         cache_resource->idx = res_idx;
9349         cache_resource->dev = dev;
9350         for (idx = 0; idx < resource->num_of_dest; idx++)
9351                 mlx5_free(dest_attr[idx]);
9352         return &cache_resource->entry;
9353 error:
9354         for (idx = 0; idx < resource->num_of_dest; idx++) {
9355                 struct mlx5_flow_sub_actions_idx *act_res =
9356                                         &cache_resource->sample_idx[idx];
9357                 if (act_res->rix_hrxq &&
9358                     !mlx5_hrxq_release(dev,
9359                                 act_res->rix_hrxq))
9360                         act_res->rix_hrxq = 0;
9361                 if (act_res->rix_encap_decap &&
9362                         !flow_dv_encap_decap_resource_release(dev,
9363                                 act_res->rix_encap_decap))
9364                         act_res->rix_encap_decap = 0;
9365                 if (act_res->rix_port_id_action &&
9366                         !flow_dv_port_id_action_resource_release(dev,
9367                                 act_res->rix_port_id_action))
9368                         act_res->rix_port_id_action = 0;
9369                 if (dest_attr[idx])
9370                         mlx5_free(dest_attr[idx]);
9371         }
9372
9373         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
9374         return NULL;
9375 }
9376
9377 /**
9378  * Find existing destination array resource or create and register a new one.
9379  *
9380  * @param[in, out] dev
9381  *   Pointer to rte_eth_dev structure.
9382  * @param[in] resource
9383  *   Pointer to destination array resource.
9384  * @parm[in, out] dev_flow
9385  *   Pointer to the dev_flow.
9386  * @param[out] error
9387  *   pointer to error structure.
9388  *
9389  * @return
9390  *   0 on success otherwise -errno and errno is set.
9391  */
9392 static int
9393 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
9394                          struct mlx5_flow_dv_dest_array_resource *resource,
9395                          struct mlx5_flow *dev_flow,
9396                          struct rte_flow_error *error)
9397 {
9398         struct mlx5_flow_dv_dest_array_resource *cache_resource;
9399         struct mlx5_priv *priv = dev->data->dev_private;
9400         struct mlx5_cache_entry *entry;
9401         struct mlx5_flow_cb_ctx ctx = {
9402                 .dev = dev,
9403                 .error = error,
9404                 .data = resource,
9405         };
9406
9407         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
9408         if (!entry)
9409                 return -rte_errno;
9410         cache_resource = container_of(entry, typeof(*cache_resource), entry);
9411         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
9412         dev_flow->dv.dest_array_res = cache_resource;
9413         return 0;
9414 }
9415
9416 /**
9417  * Convert Sample action to DV specification.
9418  *
9419  * @param[in] dev
9420  *   Pointer to rte_eth_dev structure.
9421  * @param[in] action
9422  *   Pointer to sample action structure.
9423  * @param[in, out] dev_flow
9424  *   Pointer to the mlx5_flow.
9425  * @param[in] attr
9426  *   Pointer to the flow attributes.
9427  * @param[in, out] num_of_dest
9428  *   Pointer to the num of destination.
9429  * @param[in, out] sample_actions
9430  *   Pointer to sample actions list.
9431  * @param[in, out] res
9432  *   Pointer to sample resource.
9433  * @param[out] error
9434  *   Pointer to the error structure.
9435  *
9436  * @return
9437  *   0 on success, a negative errno value otherwise and rte_errno is set.
9438  */
9439 static int
9440 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
9441                                 const struct rte_flow_action_sample *action,
9442                                 struct mlx5_flow *dev_flow,
9443                                 const struct rte_flow_attr *attr,
9444                                 uint32_t *num_of_dest,
9445                                 void **sample_actions,
9446                                 struct mlx5_flow_dv_sample_resource *res,
9447                                 struct rte_flow_error *error)
9448 {
9449         struct mlx5_priv *priv = dev->data->dev_private;
9450         const struct rte_flow_action *sub_actions;
9451         struct mlx5_flow_sub_actions_list *sample_act;
9452         struct mlx5_flow_sub_actions_idx *sample_idx;
9453         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9454         struct mlx5_flow_rss_desc *rss_desc;
9455         uint64_t action_flags = 0;
9456
9457         MLX5_ASSERT(wks);
9458         rss_desc = &wks->rss_desc;
9459         sample_act = &res->sample_act;
9460         sample_idx = &res->sample_idx;
9461         res->ratio = action->ratio;
9462         sub_actions = action->actions;
9463         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
9464                 int type = sub_actions->type;
9465                 uint32_t pre_rix = 0;
9466                 void *pre_r;
9467                 switch (type) {
9468                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9469                 {
9470                         const struct rte_flow_action_queue *queue;
9471                         struct mlx5_hrxq *hrxq;
9472                         uint32_t hrxq_idx;
9473
9474                         queue = sub_actions->conf;
9475                         rss_desc->queue_num = 1;
9476                         rss_desc->queue[0] = queue->index;
9477                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9478                                                     rss_desc, &hrxq_idx);
9479                         if (!hrxq)
9480                                 return rte_flow_error_set
9481                                         (error, rte_errno,
9482                                          RTE_FLOW_ERROR_TYPE_ACTION,
9483                                          NULL,
9484                                          "cannot create fate queue");
9485                         sample_act->dr_queue_action = hrxq->action;
9486                         sample_idx->rix_hrxq = hrxq_idx;
9487                         sample_actions[sample_act->actions_num++] =
9488                                                 hrxq->action;
9489                         (*num_of_dest)++;
9490                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9491                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9492                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9493                         dev_flow->handle->fate_action =
9494                                         MLX5_FLOW_FATE_QUEUE;
9495                         break;
9496                 }
9497                 case RTE_FLOW_ACTION_TYPE_RSS:
9498                 {
9499                         struct mlx5_hrxq *hrxq;
9500                         uint32_t hrxq_idx;
9501                         const struct rte_flow_action_rss *rss;
9502                         const uint8_t *rss_key;
9503
9504                         rss = sub_actions->conf;
9505                         memcpy(rss_desc->queue, rss->queue,
9506                                rss->queue_num * sizeof(uint16_t));
9507                         rss_desc->queue_num = rss->queue_num;
9508                         /* NULL RSS key indicates default RSS key. */
9509                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9510                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9511                         /*
9512                          * rss->level and rss.types should be set in advance
9513                          * when expanding items for RSS.
9514                          */
9515                         flow_dv_hashfields_set(dev_flow, rss_desc);
9516                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9517                                                     rss_desc, &hrxq_idx);
9518                         if (!hrxq)
9519                                 return rte_flow_error_set
9520                                         (error, rte_errno,
9521                                          RTE_FLOW_ERROR_TYPE_ACTION,
9522                                          NULL,
9523                                          "cannot create fate queue");
9524                         sample_act->dr_queue_action = hrxq->action;
9525                         sample_idx->rix_hrxq = hrxq_idx;
9526                         sample_actions[sample_act->actions_num++] =
9527                                                 hrxq->action;
9528                         (*num_of_dest)++;
9529                         action_flags |= MLX5_FLOW_ACTION_RSS;
9530                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9531                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9532                         dev_flow->handle->fate_action =
9533                                         MLX5_FLOW_FATE_QUEUE;
9534                         break;
9535                 }
9536                 case RTE_FLOW_ACTION_TYPE_MARK:
9537                 {
9538                         uint32_t tag_be = mlx5_flow_mark_set
9539                                 (((const struct rte_flow_action_mark *)
9540                                 (sub_actions->conf))->id);
9541
9542                         dev_flow->handle->mark = 1;
9543                         pre_rix = dev_flow->handle->dvh.rix_tag;
9544                         /* Save the mark resource before sample */
9545                         pre_r = dev_flow->dv.tag_resource;
9546                         if (flow_dv_tag_resource_register(dev, tag_be,
9547                                                   dev_flow, error))
9548                                 return -rte_errno;
9549                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9550                         sample_act->dr_tag_action =
9551                                 dev_flow->dv.tag_resource->action;
9552                         sample_idx->rix_tag =
9553                                 dev_flow->handle->dvh.rix_tag;
9554                         sample_actions[sample_act->actions_num++] =
9555                                                 sample_act->dr_tag_action;
9556                         /* Recover the mark resource after sample */
9557                         dev_flow->dv.tag_resource = pre_r;
9558                         dev_flow->handle->dvh.rix_tag = pre_rix;
9559                         action_flags |= MLX5_FLOW_ACTION_MARK;
9560                         break;
9561                 }
9562                 case RTE_FLOW_ACTION_TYPE_COUNT:
9563                 {
9564                         uint32_t counter;
9565
9566                         counter = flow_dv_translate_create_counter(dev,
9567                                         dev_flow, sub_actions->conf, 0);
9568                         if (!counter)
9569                                 return rte_flow_error_set
9570                                                 (error, rte_errno,
9571                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9572                                                  NULL,
9573                                                  "cannot create counter"
9574                                                  " object.");
9575                         sample_idx->cnt = counter;
9576                         sample_act->dr_cnt_action =
9577                                   (flow_dv_counter_get_by_idx(dev,
9578                                   counter, NULL))->action;
9579                         sample_actions[sample_act->actions_num++] =
9580                                                 sample_act->dr_cnt_action;
9581                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9582                         break;
9583                 }
9584                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9585                 {
9586                         struct mlx5_flow_dv_port_id_action_resource
9587                                         port_id_resource;
9588                         uint32_t port_id = 0;
9589
9590                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9591                         /* Save the port id resource before sample */
9592                         pre_rix = dev_flow->handle->rix_port_id_action;
9593                         pre_r = dev_flow->dv.port_id_action;
9594                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9595                                                              &port_id, error))
9596                                 return -rte_errno;
9597                         port_id_resource.port_id = port_id;
9598                         if (flow_dv_port_id_action_resource_register
9599                             (dev, &port_id_resource, dev_flow, error))
9600                                 return -rte_errno;
9601                         sample_act->dr_port_id_action =
9602                                 dev_flow->dv.port_id_action->action;
9603                         sample_idx->rix_port_id_action =
9604                                 dev_flow->handle->rix_port_id_action;
9605                         sample_actions[sample_act->actions_num++] =
9606                                                 sample_act->dr_port_id_action;
9607                         /* Recover the port id resource after sample */
9608                         dev_flow->dv.port_id_action = pre_r;
9609                         dev_flow->handle->rix_port_id_action = pre_rix;
9610                         (*num_of_dest)++;
9611                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9612                         break;
9613                 }
9614                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9615                         /* Save the encap resource before sample */
9616                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9617                         pre_r = dev_flow->dv.encap_decap;
9618                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9619                                                            dev_flow,
9620                                                            attr->transfer,
9621                                                            error))
9622                                 return -rte_errno;
9623                         sample_act->dr_encap_action =
9624                                 dev_flow->dv.encap_decap->action;
9625                         sample_idx->rix_encap_decap =
9626                                 dev_flow->handle->dvh.rix_encap_decap;
9627                         sample_actions[sample_act->actions_num++] =
9628                                                 sample_act->dr_encap_action;
9629                         /* Recover the encap resource after sample */
9630                         dev_flow->dv.encap_decap = pre_r;
9631                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9632                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9633                         break;
9634                 default:
9635                         return rte_flow_error_set(error, EINVAL,
9636                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9637                                 NULL,
9638                                 "Not support for sampler action");
9639                 }
9640         }
9641         sample_act->action_flags = action_flags;
9642         res->ft_id = dev_flow->dv.group;
9643         if (attr->transfer) {
9644                 union {
9645                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9646                         uint64_t set_action;
9647                 } action_ctx = { .set_action = 0 };
9648
9649                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9650                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9651                          MLX5_MODIFICATION_TYPE_SET);
9652                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9653                          MLX5_MODI_META_REG_C_0);
9654                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9655                          priv->vport_meta_tag);
9656                 res->set_action = action_ctx.set_action;
9657         } else if (attr->ingress) {
9658                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9659         } else {
9660                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9661         }
9662         return 0;
9663 }
9664
9665 /**
9666  * Convert Sample action to DV specification.
9667  *
9668  * @param[in] dev
9669  *   Pointer to rte_eth_dev structure.
9670  * @param[in, out] dev_flow
9671  *   Pointer to the mlx5_flow.
9672  * @param[in] num_of_dest
9673  *   The num of destination.
9674  * @param[in, out] res
9675  *   Pointer to sample resource.
9676  * @param[in, out] mdest_res
9677  *   Pointer to destination array resource.
9678  * @param[in] sample_actions
9679  *   Pointer to sample path actions list.
9680  * @param[in] action_flags
9681  *   Holds the actions detected until now.
9682  * @param[out] error
9683  *   Pointer to the error structure.
9684  *
9685  * @return
9686  *   0 on success, a negative errno value otherwise and rte_errno is set.
9687  */
9688 static int
9689 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9690                              struct mlx5_flow *dev_flow,
9691                              uint32_t num_of_dest,
9692                              struct mlx5_flow_dv_sample_resource *res,
9693                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9694                              void **sample_actions,
9695                              uint64_t action_flags,
9696                              struct rte_flow_error *error)
9697 {
9698         /* update normal path action resource into last index of array */
9699         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9700         struct mlx5_flow_sub_actions_list *sample_act =
9701                                         &mdest_res->sample_act[dest_index];
9702         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9703         struct mlx5_flow_rss_desc *rss_desc;
9704         uint32_t normal_idx = 0;
9705         struct mlx5_hrxq *hrxq;
9706         uint32_t hrxq_idx;
9707
9708         MLX5_ASSERT(wks);
9709         rss_desc = &wks->rss_desc;
9710         if (num_of_dest > 1) {
9711                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9712                         /* Handle QP action for mirroring */
9713                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9714                                                     rss_desc, &hrxq_idx);
9715                         if (!hrxq)
9716                                 return rte_flow_error_set
9717                                      (error, rte_errno,
9718                                       RTE_FLOW_ERROR_TYPE_ACTION,
9719                                       NULL,
9720                                       "cannot create rx queue");
9721                         normal_idx++;
9722                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9723                         sample_act->dr_queue_action = hrxq->action;
9724                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9725                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9726                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9727                 }
9728                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9729                         normal_idx++;
9730                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9731                                 dev_flow->handle->dvh.rix_encap_decap;
9732                         sample_act->dr_encap_action =
9733                                 dev_flow->dv.encap_decap->action;
9734                 }
9735                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9736                         normal_idx++;
9737                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9738                                 dev_flow->handle->rix_port_id_action;
9739                         sample_act->dr_port_id_action =
9740                                 dev_flow->dv.port_id_action->action;
9741                 }
9742                 sample_act->actions_num = normal_idx;
9743                 /* update sample action resource into first index of array */
9744                 mdest_res->ft_type = res->ft_type;
9745                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9746                                 sizeof(struct mlx5_flow_sub_actions_idx));
9747                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9748                                 sizeof(struct mlx5_flow_sub_actions_list));
9749                 mdest_res->num_of_dest = num_of_dest;
9750                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9751                                                          dev_flow, error))
9752                         return rte_flow_error_set(error, EINVAL,
9753                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9754                                                   NULL, "can't create sample "
9755                                                   "action");
9756         } else {
9757                 res->sub_actions = sample_actions;
9758                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
9759                         return rte_flow_error_set(error, EINVAL,
9760                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9761                                                   NULL,
9762                                                   "can't create sample action");
9763         }
9764         return 0;
9765 }
9766
9767 /**
9768  * Remove an ASO age action from age actions list.
9769  *
9770  * @param[in] dev
9771  *   Pointer to the Ethernet device structure.
9772  * @param[in] age
9773  *   Pointer to the aso age action handler.
9774  */
9775 static void
9776 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
9777                                 struct mlx5_aso_age_action *age)
9778 {
9779         struct mlx5_age_info *age_info;
9780         struct mlx5_age_param *age_param = &age->age_params;
9781         struct mlx5_priv *priv = dev->data->dev_private;
9782         uint16_t expected = AGE_CANDIDATE;
9783
9784         age_info = GET_PORT_AGE_INFO(priv);
9785         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
9786                                          AGE_FREE, false, __ATOMIC_RELAXED,
9787                                          __ATOMIC_RELAXED)) {
9788                 /**
9789                  * We need the lock even it is age timeout,
9790                  * since age action may still in process.
9791                  */
9792                 rte_spinlock_lock(&age_info->aged_sl);
9793                 LIST_REMOVE(age, next);
9794                 rte_spinlock_unlock(&age_info->aged_sl);
9795                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
9796         }
9797 }
9798
9799 /**
9800  * Release an ASO age action.
9801  *
9802  * @param[in] dev
9803  *   Pointer to the Ethernet device structure.
9804  * @param[in] age_idx
9805  *   Index of ASO age action to release.
9806  * @param[in] flow
9807  *   True if the release operation is during flow destroy operation.
9808  *   False if the release operation is during action destroy operation.
9809  *
9810  * @return
9811  *   0 when age action was removed, otherwise the number of references.
9812  */
9813 static int
9814 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
9815 {
9816         struct mlx5_priv *priv = dev->data->dev_private;
9817         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9818         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
9819         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
9820
9821         if (!ret) {
9822                 flow_dv_aso_age_remove_from_age(dev, age);
9823                 rte_spinlock_lock(&mng->free_sl);
9824                 LIST_INSERT_HEAD(&mng->free, age, next);
9825                 rte_spinlock_unlock(&mng->free_sl);
9826         }
9827         return ret;
9828 }
9829
9830 /**
9831  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
9832  *
9833  * @param[in] dev
9834  *   Pointer to the Ethernet device structure.
9835  *
9836  * @return
9837  *   0 on success, otherwise negative errno value and rte_errno is set.
9838  */
9839 static int
9840 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
9841 {
9842         struct mlx5_priv *priv = dev->data->dev_private;
9843         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9844         void *old_pools = mng->pools;
9845         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
9846         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
9847         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
9848
9849         if (!pools) {
9850                 rte_errno = ENOMEM;
9851                 return -ENOMEM;
9852         }
9853         if (old_pools) {
9854                 memcpy(pools, old_pools,
9855                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
9856                 mlx5_free(old_pools);
9857         } else {
9858                 /* First ASO flow hit allocation - starting ASO data-path. */
9859                 int ret = mlx5_aso_queue_start(priv->sh);
9860
9861                 if (ret) {
9862                         mlx5_free(pools);
9863                         return ret;
9864                 }
9865         }
9866         mng->n = resize;
9867         mng->pools = pools;
9868         return 0;
9869 }
9870
9871 /**
9872  * Create and initialize a new ASO aging pool.
9873  *
9874  * @param[in] dev
9875  *   Pointer to the Ethernet device structure.
9876  * @param[out] age_free
9877  *   Where to put the pointer of a new age action.
9878  *
9879  * @return
9880  *   The age actions pool pointer and @p age_free is set on success,
9881  *   NULL otherwise and rte_errno is set.
9882  */
9883 static struct mlx5_aso_age_pool *
9884 flow_dv_age_pool_create(struct rte_eth_dev *dev,
9885                         struct mlx5_aso_age_action **age_free)
9886 {
9887         struct mlx5_priv *priv = dev->data->dev_private;
9888         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9889         struct mlx5_aso_age_pool *pool = NULL;
9890         struct mlx5_devx_obj *obj = NULL;
9891         uint32_t i;
9892
9893         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
9894                                                     priv->sh->pdn);
9895         if (!obj) {
9896                 rte_errno = ENODATA;
9897                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
9898                 return NULL;
9899         }
9900         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
9901         if (!pool) {
9902                 claim_zero(mlx5_devx_cmd_destroy(obj));
9903                 rte_errno = ENOMEM;
9904                 return NULL;
9905         }
9906         pool->flow_hit_aso_obj = obj;
9907         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
9908         rte_spinlock_lock(&mng->resize_sl);
9909         pool->index = mng->next;
9910         /* Resize pools array if there is no room for the new pool in it. */
9911         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
9912                 claim_zero(mlx5_devx_cmd_destroy(obj));
9913                 mlx5_free(pool);
9914                 rte_spinlock_unlock(&mng->resize_sl);
9915                 return NULL;
9916         }
9917         mng->pools[pool->index] = pool;
9918         mng->next++;
9919         rte_spinlock_unlock(&mng->resize_sl);
9920         /* Assign the first action in the new pool, the rest go to free list. */
9921         *age_free = &pool->actions[0];
9922         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
9923                 pool->actions[i].offset = i;
9924                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
9925         }
9926         return pool;
9927 }
9928
9929 /**
9930  * Allocate a ASO aging bit.
9931  *
9932  * @param[in] dev
9933  *   Pointer to the Ethernet device structure.
9934  * @param[out] error
9935  *   Pointer to the error structure.
9936  *
9937  * @return
9938  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
9939  */
9940 static uint32_t
9941 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
9942 {
9943         struct mlx5_priv *priv = dev->data->dev_private;
9944         const struct mlx5_aso_age_pool *pool;
9945         struct mlx5_aso_age_action *age_free = NULL;
9946         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9947
9948         MLX5_ASSERT(mng);
9949         /* Try to get the next free age action bit. */
9950         rte_spinlock_lock(&mng->free_sl);
9951         age_free = LIST_FIRST(&mng->free);
9952         if (age_free) {
9953                 LIST_REMOVE(age_free, next);
9954         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
9955                 rte_spinlock_unlock(&mng->free_sl);
9956                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
9957                                    NULL, "failed to create ASO age pool");
9958                 return 0; /* 0 is an error. */
9959         }
9960         rte_spinlock_unlock(&mng->free_sl);
9961         pool = container_of
9962           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
9963                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
9964                                                                        actions);
9965         if (!age_free->dr_action) {
9966                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
9967                                                  error);
9968
9969                 if (reg_c < 0) {
9970                         rte_flow_error_set(error, rte_errno,
9971                                            RTE_FLOW_ERROR_TYPE_ACTION,
9972                                            NULL, "failed to get reg_c "
9973                                            "for ASO flow hit");
9974                         return 0; /* 0 is an error. */
9975                 }
9976 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
9977                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
9978                                 (priv->sh->rx_domain,
9979                                  pool->flow_hit_aso_obj->obj, age_free->offset,
9980                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
9981                                  (reg_c - REG_C_0));
9982 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
9983                 if (!age_free->dr_action) {
9984                         rte_errno = errno;
9985                         rte_spinlock_lock(&mng->free_sl);
9986                         LIST_INSERT_HEAD(&mng->free, age_free, next);
9987                         rte_spinlock_unlock(&mng->free_sl);
9988                         rte_flow_error_set(error, rte_errno,
9989                                            RTE_FLOW_ERROR_TYPE_ACTION,
9990                                            NULL, "failed to create ASO "
9991                                            "flow hit action");
9992                         return 0; /* 0 is an error. */
9993                 }
9994         }
9995         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
9996         return pool->index | ((age_free->offset + 1) << 16);
9997 }
9998
9999 /**
10000  * Create a age action using ASO mechanism.
10001  *
10002  * @param[in] dev
10003  *   Pointer to rte_eth_dev structure.
10004  * @param[in] age
10005  *   Pointer to the aging action configuration.
10006  * @param[out] error
10007  *   Pointer to the error structure.
10008  *
10009  * @return
10010  *   Index to flow counter on success, 0 otherwise.
10011  */
10012 static uint32_t
10013 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
10014                                  const struct rte_flow_action_age *age,
10015                                  struct rte_flow_error *error)
10016 {
10017         uint32_t age_idx = 0;
10018         struct mlx5_aso_age_action *aso_age;
10019
10020         age_idx = flow_dv_aso_age_alloc(dev, error);
10021         if (!age_idx)
10022                 return 0;
10023         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
10024         aso_age->age_params.context = age->context;
10025         aso_age->age_params.timeout = age->timeout;
10026         aso_age->age_params.port_id = dev->data->port_id;
10027         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
10028                          __ATOMIC_RELAXED);
10029         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
10030                          __ATOMIC_RELAXED);
10031         return age_idx;
10032 }
10033
10034 /**
10035  * Fill the flow with DV spec, lock free
10036  * (mutex should be acquired by caller).
10037  *
10038  * @param[in] dev
10039  *   Pointer to rte_eth_dev structure.
10040  * @param[in, out] dev_flow
10041  *   Pointer to the sub flow.
10042  * @param[in] attr
10043  *   Pointer to the flow attributes.
10044  * @param[in] items
10045  *   Pointer to the list of items.
10046  * @param[in] actions
10047  *   Pointer to the list of actions.
10048  * @param[out] error
10049  *   Pointer to the error structure.
10050  *
10051  * @return
10052  *   0 on success, a negative errno value otherwise and rte_errno is set.
10053  */
10054 static int
10055 flow_dv_translate(struct rte_eth_dev *dev,
10056                   struct mlx5_flow *dev_flow,
10057                   const struct rte_flow_attr *attr,
10058                   const struct rte_flow_item items[],
10059                   const struct rte_flow_action actions[],
10060                   struct rte_flow_error *error)
10061 {
10062         struct mlx5_priv *priv = dev->data->dev_private;
10063         struct mlx5_dev_config *dev_conf = &priv->config;
10064         struct rte_flow *flow = dev_flow->flow;
10065         struct mlx5_flow_handle *handle = dev_flow->handle;
10066         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10067         struct mlx5_flow_rss_desc *rss_desc;
10068         uint64_t item_flags = 0;
10069         uint64_t last_item = 0;
10070         uint64_t action_flags = 0;
10071         uint64_t priority = attr->priority;
10072         struct mlx5_flow_dv_matcher matcher = {
10073                 .mask = {
10074                         .size = sizeof(matcher.mask.buf) -
10075                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
10076                 },
10077         };
10078         int actions_n = 0;
10079         bool actions_end = false;
10080         union {
10081                 struct mlx5_flow_dv_modify_hdr_resource res;
10082                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
10083                             sizeof(struct mlx5_modification_cmd) *
10084                             (MLX5_MAX_MODIFY_NUM + 1)];
10085         } mhdr_dummy;
10086         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
10087         const struct rte_flow_action_count *count = NULL;
10088         const struct rte_flow_action_age *age = NULL;
10089         union flow_dv_attr flow_attr = { .attr = 0 };
10090         uint32_t tag_be;
10091         union mlx5_flow_tbl_key tbl_key;
10092         uint32_t modify_action_position = UINT32_MAX;
10093         void *match_mask = matcher.mask.buf;
10094         void *match_value = dev_flow->dv.value.buf;
10095         uint8_t next_protocol = 0xff;
10096         struct rte_vlan_hdr vlan = { 0 };
10097         struct mlx5_flow_dv_dest_array_resource mdest_res;
10098         struct mlx5_flow_dv_sample_resource sample_res;
10099         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
10100         const struct rte_flow_action_sample *sample = NULL;
10101         struct mlx5_flow_sub_actions_list *sample_act;
10102         uint32_t sample_act_pos = UINT32_MAX;
10103         uint32_t num_of_dest = 0;
10104         int tmp_actions_n = 0;
10105         uint32_t table;
10106         int ret = 0;
10107         const struct mlx5_flow_tunnel *tunnel;
10108         struct flow_grp_info grp_info = {
10109                 .external = !!dev_flow->external,
10110                 .transfer = !!attr->transfer,
10111                 .fdb_def_rule = !!priv->fdb_def_rule,
10112                 .skip_scale = !!dev_flow->skip_scale,
10113         };
10114
10115         if (!wks)
10116                 return rte_flow_error_set(error, ENOMEM,
10117                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10118                                           NULL,
10119                                           "failed to push flow workspace");
10120         rss_desc = &wks->rss_desc;
10121         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
10122         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
10123         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10124                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10125         /* update normal path action resource into last index of array */
10126         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
10127         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
10128                  flow_items_to_tunnel(items) :
10129                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
10130                  flow_actions_to_tunnel(actions) :
10131                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
10132         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
10133                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
10134         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
10135                                 (dev, tunnel, attr, items, actions);
10136         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
10137                                        &grp_info, error);
10138         if (ret)
10139                 return ret;
10140         dev_flow->dv.group = table;
10141         if (attr->transfer)
10142                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
10143         if (priority == MLX5_FLOW_PRIO_RSVD)
10144                 priority = dev_conf->flow_prio - 1;
10145         /* number of actions must be set to 0 in case of dirty stack. */
10146         mhdr_res->actions_num = 0;
10147         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
10148                 /*
10149                  * do not add decap action if match rule drops packet
10150                  * HW rejects rules with decap & drop
10151                  *
10152                  * if tunnel match rule was inserted before matching tunnel set
10153                  * rule flow table used in the match rule must be registered.
10154                  * current implementation handles that in the
10155                  * flow_dv_match_register() at the function end.
10156                  */
10157                 bool add_decap = true;
10158                 const struct rte_flow_action *ptr = actions;
10159
10160                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
10161                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
10162                                 add_decap = false;
10163                                 break;
10164                         }
10165                 }
10166                 if (add_decap) {
10167                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
10168                                                            attr->transfer,
10169                                                            error))
10170                                 return -rte_errno;
10171                         dev_flow->dv.actions[actions_n++] =
10172                                         dev_flow->dv.encap_decap->action;
10173                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10174                 }
10175         }
10176         for (; !actions_end ; actions++) {
10177                 const struct rte_flow_action_queue *queue;
10178                 const struct rte_flow_action_rss *rss;
10179                 const struct rte_flow_action *action = actions;
10180                 const uint8_t *rss_key;
10181                 const struct rte_flow_action_meter *mtr;
10182                 struct mlx5_flow_tbl_resource *tbl;
10183                 struct mlx5_aso_age_action *age_act;
10184                 uint32_t port_id = 0;
10185                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
10186                 int action_type = actions->type;
10187                 const struct rte_flow_action *found_action = NULL;
10188                 struct mlx5_flow_meter *fm = NULL;
10189                 uint32_t jump_group = 0;
10190
10191                 if (!mlx5_flow_os_action_supported(action_type))
10192                         return rte_flow_error_set(error, ENOTSUP,
10193                                                   RTE_FLOW_ERROR_TYPE_ACTION,
10194                                                   actions,
10195                                                   "action not supported");
10196                 switch (action_type) {
10197                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
10198                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
10199                         break;
10200                 case RTE_FLOW_ACTION_TYPE_VOID:
10201                         break;
10202                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
10203                         if (flow_dv_translate_action_port_id(dev, action,
10204                                                              &port_id, error))
10205                                 return -rte_errno;
10206                         port_id_resource.port_id = port_id;
10207                         MLX5_ASSERT(!handle->rix_port_id_action);
10208                         if (flow_dv_port_id_action_resource_register
10209                             (dev, &port_id_resource, dev_flow, error))
10210                                 return -rte_errno;
10211                         dev_flow->dv.actions[actions_n++] =
10212                                         dev_flow->dv.port_id_action->action;
10213                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10214                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
10215                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
10216                         num_of_dest++;
10217                         break;
10218                 case RTE_FLOW_ACTION_TYPE_FLAG:
10219                         action_flags |= MLX5_FLOW_ACTION_FLAG;
10220                         dev_flow->handle->mark = 1;
10221                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
10222                                 struct rte_flow_action_mark mark = {
10223                                         .id = MLX5_FLOW_MARK_DEFAULT,
10224                                 };
10225
10226                                 if (flow_dv_convert_action_mark(dev, &mark,
10227                                                                 mhdr_res,
10228                                                                 error))
10229                                         return -rte_errno;
10230                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
10231                                 break;
10232                         }
10233                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
10234                         /*
10235                          * Only one FLAG or MARK is supported per device flow
10236                          * right now. So the pointer to the tag resource must be
10237                          * zero before the register process.
10238                          */
10239                         MLX5_ASSERT(!handle->dvh.rix_tag);
10240                         if (flow_dv_tag_resource_register(dev, tag_be,
10241                                                           dev_flow, error))
10242                                 return -rte_errno;
10243                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10244                         dev_flow->dv.actions[actions_n++] =
10245                                         dev_flow->dv.tag_resource->action;
10246                         break;
10247                 case RTE_FLOW_ACTION_TYPE_MARK:
10248                         action_flags |= MLX5_FLOW_ACTION_MARK;
10249                         dev_flow->handle->mark = 1;
10250                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
10251                                 const struct rte_flow_action_mark *mark =
10252                                         (const struct rte_flow_action_mark *)
10253                                                 actions->conf;
10254
10255                                 if (flow_dv_convert_action_mark(dev, mark,
10256                                                                 mhdr_res,
10257                                                                 error))
10258                                         return -rte_errno;
10259                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
10260                                 break;
10261                         }
10262                         /* Fall-through */
10263                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
10264                         /* Legacy (non-extensive) MARK action. */
10265                         tag_be = mlx5_flow_mark_set
10266                               (((const struct rte_flow_action_mark *)
10267                                (actions->conf))->id);
10268                         MLX5_ASSERT(!handle->dvh.rix_tag);
10269                         if (flow_dv_tag_resource_register(dev, tag_be,
10270                                                           dev_flow, error))
10271                                 return -rte_errno;
10272                         MLX5_ASSERT(dev_flow->dv.tag_resource);
10273                         dev_flow->dv.actions[actions_n++] =
10274                                         dev_flow->dv.tag_resource->action;
10275                         break;
10276                 case RTE_FLOW_ACTION_TYPE_SET_META:
10277                         if (flow_dv_convert_action_set_meta
10278                                 (dev, mhdr_res, attr,
10279                                  (const struct rte_flow_action_set_meta *)
10280                                   actions->conf, error))
10281                                 return -rte_errno;
10282                         action_flags |= MLX5_FLOW_ACTION_SET_META;
10283                         break;
10284                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
10285                         if (flow_dv_convert_action_set_tag
10286                                 (dev, mhdr_res,
10287                                  (const struct rte_flow_action_set_tag *)
10288                                   actions->conf, error))
10289                                 return -rte_errno;
10290                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10291                         break;
10292                 case RTE_FLOW_ACTION_TYPE_DROP:
10293                         action_flags |= MLX5_FLOW_ACTION_DROP;
10294                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
10295                         break;
10296                 case RTE_FLOW_ACTION_TYPE_QUEUE:
10297                         queue = actions->conf;
10298                         rss_desc->queue_num = 1;
10299                         rss_desc->queue[0] = queue->index;
10300                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
10301                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
10302                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
10303                         num_of_dest++;
10304                         break;
10305                 case RTE_FLOW_ACTION_TYPE_RSS:
10306                         rss = actions->conf;
10307                         memcpy(rss_desc->queue, rss->queue,
10308                                rss->queue_num * sizeof(uint16_t));
10309                         rss_desc->queue_num = rss->queue_num;
10310                         /* NULL RSS key indicates default RSS key. */
10311                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
10312                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
10313                         /*
10314                          * rss->level and rss.types should be set in advance
10315                          * when expanding items for RSS.
10316                          */
10317                         action_flags |= MLX5_FLOW_ACTION_RSS;
10318                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
10319                                 MLX5_FLOW_FATE_SHARED_RSS :
10320                                 MLX5_FLOW_FATE_QUEUE;
10321                         break;
10322                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
10323                         flow->age = (uint32_t)(uintptr_t)(action->conf);
10324                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
10325                         __atomic_fetch_add(&age_act->refcnt, 1,
10326                                            __ATOMIC_RELAXED);
10327                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
10328                         action_flags |= MLX5_FLOW_ACTION_AGE;
10329                         break;
10330                 case RTE_FLOW_ACTION_TYPE_AGE:
10331                         if (priv->sh->flow_hit_aso_en && attr->group) {
10332                                 flow->age = flow_dv_translate_create_aso_age
10333                                                 (dev, action->conf, error);
10334                                 if (!flow->age)
10335                                         return rte_flow_error_set
10336                                                 (error, rte_errno,
10337                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10338                                                  NULL,
10339                                                  "can't create ASO age action");
10340                                 dev_flow->dv.actions[actions_n++] =
10341                                           (flow_aso_age_get_by_idx
10342                                                 (dev, flow->age))->dr_action;
10343                                 action_flags |= MLX5_FLOW_ACTION_AGE;
10344                                 break;
10345                         }
10346                         /* Fall-through */
10347                 case RTE_FLOW_ACTION_TYPE_COUNT:
10348                         if (!dev_conf->devx) {
10349                                 return rte_flow_error_set
10350                                               (error, ENOTSUP,
10351                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10352                                                NULL,
10353                                                "count action not supported");
10354                         }
10355                         /* Save information first, will apply later. */
10356                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
10357                                 count = action->conf;
10358                         else
10359                                 age = action->conf;
10360                         action_flags |= MLX5_FLOW_ACTION_COUNT;
10361                         break;
10362                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
10363                         dev_flow->dv.actions[actions_n++] =
10364                                                 priv->sh->pop_vlan_action;
10365                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
10366                         break;
10367                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
10368                         if (!(action_flags &
10369                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
10370                                 flow_dev_get_vlan_info_from_items(items, &vlan);
10371                         vlan.eth_proto = rte_be_to_cpu_16
10372                              ((((const struct rte_flow_action_of_push_vlan *)
10373                                                    actions->conf)->ethertype));
10374                         found_action = mlx5_flow_find_action
10375                                         (actions + 1,
10376                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
10377                         if (found_action)
10378                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
10379                         found_action = mlx5_flow_find_action
10380                                         (actions + 1,
10381                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
10382                         if (found_action)
10383                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
10384                         if (flow_dv_create_action_push_vlan
10385                                             (dev, attr, &vlan, dev_flow, error))
10386                                 return -rte_errno;
10387                         dev_flow->dv.actions[actions_n++] =
10388                                         dev_flow->dv.push_vlan_res->action;
10389                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
10390                         break;
10391                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
10392                         /* of_vlan_push action handled this action */
10393                         MLX5_ASSERT(action_flags &
10394                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
10395                         break;
10396                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
10397                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
10398                                 break;
10399                         flow_dev_get_vlan_info_from_items(items, &vlan);
10400                         mlx5_update_vlan_vid_pcp(actions, &vlan);
10401                         /* If no VLAN push - this is a modify header action */
10402                         if (flow_dv_convert_action_modify_vlan_vid
10403                                                 (mhdr_res, actions, error))
10404                                 return -rte_errno;
10405                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
10406                         break;
10407                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
10408                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
10409                         if (flow_dv_create_action_l2_encap(dev, actions,
10410                                                            dev_flow,
10411                                                            attr->transfer,
10412                                                            error))
10413                                 return -rte_errno;
10414                         dev_flow->dv.actions[actions_n++] =
10415                                         dev_flow->dv.encap_decap->action;
10416                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10417                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
10418                                 sample_act->action_flags |=
10419                                                         MLX5_FLOW_ACTION_ENCAP;
10420                         break;
10421                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
10422                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
10423                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
10424                                                            attr->transfer,
10425                                                            error))
10426                                 return -rte_errno;
10427                         dev_flow->dv.actions[actions_n++] =
10428                                         dev_flow->dv.encap_decap->action;
10429                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10430                         break;
10431                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10432                         /* Handle encap with preceding decap. */
10433                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
10434                                 if (flow_dv_create_action_raw_encap
10435                                         (dev, actions, dev_flow, attr, error))
10436                                         return -rte_errno;
10437                                 dev_flow->dv.actions[actions_n++] =
10438                                         dev_flow->dv.encap_decap->action;
10439                         } else {
10440                                 /* Handle encap without preceding decap. */
10441                                 if (flow_dv_create_action_l2_encap
10442                                     (dev, actions, dev_flow, attr->transfer,
10443                                      error))
10444                                         return -rte_errno;
10445                                 dev_flow->dv.actions[actions_n++] =
10446                                         dev_flow->dv.encap_decap->action;
10447                         }
10448                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10449                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
10450                                 sample_act->action_flags |=
10451                                                         MLX5_FLOW_ACTION_ENCAP;
10452                         break;
10453                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
10454                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
10455                                 ;
10456                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
10457                                 if (flow_dv_create_action_l2_decap
10458                                     (dev, dev_flow, attr->transfer, error))
10459                                         return -rte_errno;
10460                                 dev_flow->dv.actions[actions_n++] =
10461                                         dev_flow->dv.encap_decap->action;
10462                         }
10463                         /* If decap is followed by encap, handle it at encap. */
10464                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10465                         break;
10466                 case RTE_FLOW_ACTION_TYPE_JUMP:
10467                         jump_group = ((const struct rte_flow_action_jump *)
10468                                                         action->conf)->group;
10469                         grp_info.std_tbl_fix = 0;
10470                         grp_info.skip_scale = 0;
10471                         ret = mlx5_flow_group_to_table(dev, tunnel,
10472                                                        jump_group,
10473                                                        &table,
10474                                                        &grp_info, error);
10475                         if (ret)
10476                                 return ret;
10477                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
10478                                                        attr->transfer,
10479                                                        !!dev_flow->external,
10480                                                        tunnel, jump_group, 0,
10481                                                        error);
10482                         if (!tbl)
10483                                 return rte_flow_error_set
10484                                                 (error, errno,
10485                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10486                                                  NULL,
10487                                                  "cannot create jump action.");
10488                         if (flow_dv_jump_tbl_resource_register
10489                             (dev, tbl, dev_flow, error)) {
10490                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10491                                 return rte_flow_error_set
10492                                                 (error, errno,
10493                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10494                                                  NULL,
10495                                                  "cannot create jump action.");
10496                         }
10497                         dev_flow->dv.actions[actions_n++] =
10498                                         dev_flow->dv.jump->action;
10499                         action_flags |= MLX5_FLOW_ACTION_JUMP;
10500                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
10501                         break;
10502                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
10503                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
10504                         if (flow_dv_convert_action_modify_mac
10505                                         (mhdr_res, actions, error))
10506                                 return -rte_errno;
10507                         action_flags |= actions->type ==
10508                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
10509                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
10510                                         MLX5_FLOW_ACTION_SET_MAC_DST;
10511                         break;
10512                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
10513                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
10514                         if (flow_dv_convert_action_modify_ipv4
10515                                         (mhdr_res, actions, error))
10516                                 return -rte_errno;
10517                         action_flags |= actions->type ==
10518                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
10519                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
10520                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
10521                         break;
10522                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
10523                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
10524                         if (flow_dv_convert_action_modify_ipv6
10525                                         (mhdr_res, actions, error))
10526                                 return -rte_errno;
10527                         action_flags |= actions->type ==
10528                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
10529                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
10530                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
10531                         break;
10532                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
10533                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
10534                         if (flow_dv_convert_action_modify_tp
10535                                         (mhdr_res, actions, items,
10536                                          &flow_attr, dev_flow, !!(action_flags &
10537                                          MLX5_FLOW_ACTION_DECAP), error))
10538                                 return -rte_errno;
10539                         action_flags |= actions->type ==
10540                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
10541                                         MLX5_FLOW_ACTION_SET_TP_SRC :
10542                                         MLX5_FLOW_ACTION_SET_TP_DST;
10543                         break;
10544                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
10545                         if (flow_dv_convert_action_modify_dec_ttl
10546                                         (mhdr_res, items, &flow_attr, dev_flow,
10547                                          !!(action_flags &
10548                                          MLX5_FLOW_ACTION_DECAP), error))
10549                                 return -rte_errno;
10550                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
10551                         break;
10552                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
10553                         if (flow_dv_convert_action_modify_ttl
10554                                         (mhdr_res, actions, items, &flow_attr,
10555                                          dev_flow, !!(action_flags &
10556                                          MLX5_FLOW_ACTION_DECAP), error))
10557                                 return -rte_errno;
10558                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
10559                         break;
10560                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
10561                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
10562                         if (flow_dv_convert_action_modify_tcp_seq
10563                                         (mhdr_res, actions, error))
10564                                 return -rte_errno;
10565                         action_flags |= actions->type ==
10566                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
10567                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
10568                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
10569                         break;
10570
10571                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
10572                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
10573                         if (flow_dv_convert_action_modify_tcp_ack
10574                                         (mhdr_res, actions, error))
10575                                 return -rte_errno;
10576                         action_flags |= actions->type ==
10577                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
10578                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
10579                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
10580                         break;
10581                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
10582                         if (flow_dv_convert_action_set_reg
10583                                         (mhdr_res, actions, error))
10584                                 return -rte_errno;
10585                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10586                         break;
10587                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
10588                         if (flow_dv_convert_action_copy_mreg
10589                                         (dev, mhdr_res, actions, error))
10590                                 return -rte_errno;
10591                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10592                         break;
10593                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
10594                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
10595                         dev_flow->handle->fate_action =
10596                                         MLX5_FLOW_FATE_DEFAULT_MISS;
10597                         break;
10598                 case RTE_FLOW_ACTION_TYPE_METER:
10599                         mtr = actions->conf;
10600                         if (!flow->meter) {
10601                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
10602                                                             attr, error);
10603                                 if (!fm)
10604                                         return rte_flow_error_set(error,
10605                                                 rte_errno,
10606                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10607                                                 NULL,
10608                                                 "meter not found "
10609                                                 "or invalid parameters");
10610                                 flow->meter = fm->idx;
10611                         }
10612                         /* Set the meter action. */
10613                         if (!fm) {
10614                                 fm = mlx5_ipool_get(priv->sh->ipool
10615                                                 [MLX5_IPOOL_MTR], flow->meter);
10616                                 if (!fm)
10617                                         return rte_flow_error_set(error,
10618                                                 rte_errno,
10619                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10620                                                 NULL,
10621                                                 "meter not found "
10622                                                 "or invalid parameters");
10623                         }
10624                         dev_flow->dv.actions[actions_n++] =
10625                                 fm->mfts->meter_action;
10626                         action_flags |= MLX5_FLOW_ACTION_METER;
10627                         break;
10628                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
10629                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
10630                                                               actions, error))
10631                                 return -rte_errno;
10632                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
10633                         break;
10634                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
10635                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
10636                                                               actions, error))
10637                                 return -rte_errno;
10638                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
10639                         break;
10640                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
10641                         sample_act_pos = actions_n;
10642                         sample = (const struct rte_flow_action_sample *)
10643                                  action->conf;
10644                         actions_n++;
10645                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
10646                         /* put encap action into group if work with port id */
10647                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
10648                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
10649                                 sample_act->action_flags |=
10650                                                         MLX5_FLOW_ACTION_ENCAP;
10651                         break;
10652                 case RTE_FLOW_ACTION_TYPE_END:
10653                         actions_end = true;
10654                         if (mhdr_res->actions_num) {
10655                                 /* create modify action if needed. */
10656                                 if (flow_dv_modify_hdr_resource_register
10657                                         (dev, mhdr_res, dev_flow, error))
10658                                         return -rte_errno;
10659                                 dev_flow->dv.actions[modify_action_position] =
10660                                         handle->dvh.modify_hdr->action;
10661                         }
10662                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
10663                                 flow->counter =
10664                                         flow_dv_translate_create_counter(dev,
10665                                                 dev_flow, count, age);
10666
10667                                 if (!flow->counter)
10668                                         return rte_flow_error_set
10669                                                 (error, rte_errno,
10670                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10671                                                 NULL,
10672                                                 "cannot create counter"
10673                                                 " object.");
10674                                 dev_flow->dv.actions[actions_n] =
10675                                           (flow_dv_counter_get_by_idx(dev,
10676                                           flow->counter, NULL))->action;
10677                                 actions_n++;
10678                         }
10679                 default:
10680                         break;
10681                 }
10682                 if (mhdr_res->actions_num &&
10683                     modify_action_position == UINT32_MAX)
10684                         modify_action_position = actions_n++;
10685         }
10686         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
10687                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
10688                 int item_type = items->type;
10689
10690                 if (!mlx5_flow_os_item_supported(item_type))
10691                         return rte_flow_error_set(error, ENOTSUP,
10692                                                   RTE_FLOW_ERROR_TYPE_ITEM,
10693                                                   NULL, "item not supported");
10694                 switch (item_type) {
10695                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
10696                         flow_dv_translate_item_port_id
10697                                 (dev, match_mask, match_value, items, attr);
10698                         last_item = MLX5_FLOW_ITEM_PORT_ID;
10699                         break;
10700                 case RTE_FLOW_ITEM_TYPE_ETH:
10701                         flow_dv_translate_item_eth(match_mask, match_value,
10702                                                    items, tunnel,
10703                                                    dev_flow->dv.group);
10704                         matcher.priority = action_flags &
10705                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
10706                                         !dev_flow->external ?
10707                                         MLX5_PRIORITY_MAP_L3 :
10708                                         MLX5_PRIORITY_MAP_L2;
10709                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
10710                                              MLX5_FLOW_LAYER_OUTER_L2;
10711                         break;
10712                 case RTE_FLOW_ITEM_TYPE_VLAN:
10713                         flow_dv_translate_item_vlan(dev_flow,
10714                                                     match_mask, match_value,
10715                                                     items, tunnel,
10716                                                     dev_flow->dv.group);
10717                         matcher.priority = MLX5_PRIORITY_MAP_L2;
10718                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
10719                                               MLX5_FLOW_LAYER_INNER_VLAN) :
10720                                              (MLX5_FLOW_LAYER_OUTER_L2 |
10721                                               MLX5_FLOW_LAYER_OUTER_VLAN);
10722                         break;
10723                 case RTE_FLOW_ITEM_TYPE_IPV4:
10724                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10725                                                   &item_flags, &tunnel);
10726                         flow_dv_translate_item_ipv4(match_mask, match_value,
10727                                                     items, tunnel,
10728                                                     dev_flow->dv.group);
10729                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10730                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
10731                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
10732                         if (items->mask != NULL &&
10733                             ((const struct rte_flow_item_ipv4 *)
10734                              items->mask)->hdr.next_proto_id) {
10735                                 next_protocol =
10736                                         ((const struct rte_flow_item_ipv4 *)
10737                                          (items->spec))->hdr.next_proto_id;
10738                                 next_protocol &=
10739                                         ((const struct rte_flow_item_ipv4 *)
10740                                          (items->mask))->hdr.next_proto_id;
10741                         } else {
10742                                 /* Reset for inner layer. */
10743                                 next_protocol = 0xff;
10744                         }
10745                         break;
10746                 case RTE_FLOW_ITEM_TYPE_IPV6:
10747                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10748                                                   &item_flags, &tunnel);
10749                         flow_dv_translate_item_ipv6(match_mask, match_value,
10750                                                     items, tunnel,
10751                                                     dev_flow->dv.group);
10752                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10753                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10754                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10755                         if (items->mask != NULL &&
10756                             ((const struct rte_flow_item_ipv6 *)
10757                              items->mask)->hdr.proto) {
10758                                 next_protocol =
10759                                         ((const struct rte_flow_item_ipv6 *)
10760                                          items->spec)->hdr.proto;
10761                                 next_protocol &=
10762                                         ((const struct rte_flow_item_ipv6 *)
10763                                          items->mask)->hdr.proto;
10764                         } else {
10765                                 /* Reset for inner layer. */
10766                                 next_protocol = 0xff;
10767                         }
10768                         break;
10769                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10770                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
10771                                                              match_value,
10772                                                              items, tunnel);
10773                         last_item = tunnel ?
10774                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10775                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10776                         if (items->mask != NULL &&
10777                             ((const struct rte_flow_item_ipv6_frag_ext *)
10778                              items->mask)->hdr.next_header) {
10779                                 next_protocol =
10780                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10781                                  items->spec)->hdr.next_header;
10782                                 next_protocol &=
10783                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10784                                  items->mask)->hdr.next_header;
10785                         } else {
10786                                 /* Reset for inner layer. */
10787                                 next_protocol = 0xff;
10788                         }
10789                         break;
10790                 case RTE_FLOW_ITEM_TYPE_TCP:
10791                         flow_dv_translate_item_tcp(match_mask, match_value,
10792                                                    items, tunnel);
10793                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10794                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10795                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10796                         break;
10797                 case RTE_FLOW_ITEM_TYPE_UDP:
10798                         flow_dv_translate_item_udp(match_mask, match_value,
10799                                                    items, tunnel);
10800                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10801                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10802                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10803                         break;
10804                 case RTE_FLOW_ITEM_TYPE_GRE:
10805                         flow_dv_translate_item_gre(match_mask, match_value,
10806                                                    items, tunnel);
10807                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10808                         last_item = MLX5_FLOW_LAYER_GRE;
10809                         break;
10810                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10811                         flow_dv_translate_item_gre_key(match_mask,
10812                                                        match_value, items);
10813                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10814                         break;
10815                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10816                         flow_dv_translate_item_nvgre(match_mask, match_value,
10817                                                      items, tunnel);
10818                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10819                         last_item = MLX5_FLOW_LAYER_GRE;
10820                         break;
10821                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10822                         flow_dv_translate_item_vxlan(match_mask, match_value,
10823                                                      items, tunnel);
10824                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10825                         last_item = MLX5_FLOW_LAYER_VXLAN;
10826                         break;
10827                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10828                         flow_dv_translate_item_vxlan_gpe(match_mask,
10829                                                          match_value, items,
10830                                                          tunnel);
10831                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10832                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10833                         break;
10834                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10835                         flow_dv_translate_item_geneve(match_mask, match_value,
10836                                                       items, tunnel);
10837                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10838                         last_item = MLX5_FLOW_LAYER_GENEVE;
10839                         break;
10840                 case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
10841                         ret = flow_dv_translate_item_geneve_opt(dev, match_mask,
10842                                                           match_value,
10843                                                           items, error);
10844                         if (ret)
10845                                 return rte_flow_error_set(error, -ret,
10846                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
10847                                         "cannot create GENEVE TLV option");
10848                         flow->geneve_tlv_option = 1;
10849                         last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
10850                         break;
10851                 case RTE_FLOW_ITEM_TYPE_MPLS:
10852                         flow_dv_translate_item_mpls(match_mask, match_value,
10853                                                     items, last_item, tunnel);
10854                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10855                         last_item = MLX5_FLOW_LAYER_MPLS;
10856                         break;
10857                 case RTE_FLOW_ITEM_TYPE_MARK:
10858                         flow_dv_translate_item_mark(dev, match_mask,
10859                                                     match_value, items);
10860                         last_item = MLX5_FLOW_ITEM_MARK;
10861                         break;
10862                 case RTE_FLOW_ITEM_TYPE_META:
10863                         flow_dv_translate_item_meta(dev, match_mask,
10864                                                     match_value, attr, items);
10865                         last_item = MLX5_FLOW_ITEM_METADATA;
10866                         break;
10867                 case RTE_FLOW_ITEM_TYPE_ICMP:
10868                         flow_dv_translate_item_icmp(match_mask, match_value,
10869                                                     items, tunnel);
10870                         last_item = MLX5_FLOW_LAYER_ICMP;
10871                         break;
10872                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10873                         flow_dv_translate_item_icmp6(match_mask, match_value,
10874                                                       items, tunnel);
10875                         last_item = MLX5_FLOW_LAYER_ICMP6;
10876                         break;
10877                 case RTE_FLOW_ITEM_TYPE_TAG:
10878                         flow_dv_translate_item_tag(dev, match_mask,
10879                                                    match_value, items);
10880                         last_item = MLX5_FLOW_ITEM_TAG;
10881                         break;
10882                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10883                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10884                                                         match_value, items);
10885                         last_item = MLX5_FLOW_ITEM_TAG;
10886                         break;
10887                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10888                         flow_dv_translate_item_tx_queue(dev, match_mask,
10889                                                         match_value,
10890                                                         items);
10891                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10892                         break;
10893                 case RTE_FLOW_ITEM_TYPE_GTP:
10894                         flow_dv_translate_item_gtp(match_mask, match_value,
10895                                                    items, tunnel);
10896                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10897                         last_item = MLX5_FLOW_LAYER_GTP;
10898                         break;
10899                 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
10900                         ret = flow_dv_translate_item_gtp_psc(match_mask,
10901                                                           match_value,
10902                                                           items);
10903                         if (ret)
10904                                 return rte_flow_error_set(error, -ret,
10905                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
10906                                         "cannot create GTP PSC item");
10907                         last_item = MLX5_FLOW_LAYER_GTP_PSC;
10908                         break;
10909                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10910                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10911                                 /* Create it only the first time to be used. */
10912                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10913                                 if (ret)
10914                                         return rte_flow_error_set
10915                                                 (error, -ret,
10916                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10917                                                 NULL,
10918                                                 "cannot create eCPRI parser");
10919                         }
10920                         /* Adjust the length matcher and device flow value. */
10921                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10922                         dev_flow->dv.value.size =
10923                                         MLX5_ST_SZ_BYTES(fte_match_param);
10924                         flow_dv_translate_item_ecpri(dev, match_mask,
10925                                                      match_value, items);
10926                         /* No other protocol should follow eCPRI layer. */
10927                         last_item = MLX5_FLOW_LAYER_ECPRI;
10928                         break;
10929                 default:
10930                         break;
10931                 }
10932                 item_flags |= last_item;
10933         }
10934         /*
10935          * When E-Switch mode is enabled, we have two cases where we need to
10936          * set the source port manually.
10937          * The first one, is in case of Nic steering rule, and the second is
10938          * E-Switch rule where no port_id item was found. In both cases
10939          * the source port is set according the current port in use.
10940          */
10941         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10942             (priv->representor || priv->master)) {
10943                 if (flow_dv_translate_item_port_id(dev, match_mask,
10944                                                    match_value, NULL, attr))
10945                         return -rte_errno;
10946         }
10947 #ifdef RTE_LIBRTE_MLX5_DEBUG
10948         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10949                                               dev_flow->dv.value.buf));
10950 #endif
10951         /*
10952          * Layers may be already initialized from prefix flow if this dev_flow
10953          * is the suffix flow.
10954          */
10955         handle->layers |= item_flags;
10956         if (action_flags & MLX5_FLOW_ACTION_RSS)
10957                 flow_dv_hashfields_set(dev_flow, rss_desc);
10958         /* If has RSS action in the sample action, the Sample/Mirror resource
10959          * should be registered after the hash filed be update.
10960          */
10961         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
10962                 ret = flow_dv_translate_action_sample(dev,
10963                                                       sample,
10964                                                       dev_flow, attr,
10965                                                       &num_of_dest,
10966                                                       sample_actions,
10967                                                       &sample_res,
10968                                                       error);
10969                 if (ret < 0)
10970                         return ret;
10971                 ret = flow_dv_create_action_sample(dev,
10972                                                    dev_flow,
10973                                                    num_of_dest,
10974                                                    &sample_res,
10975                                                    &mdest_res,
10976                                                    sample_actions,
10977                                                    action_flags,
10978                                                    error);
10979                 if (ret < 0)
10980                         return rte_flow_error_set
10981                                                 (error, rte_errno,
10982                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10983                                                 NULL,
10984                                                 "cannot create sample action");
10985                 if (num_of_dest > 1) {
10986                         dev_flow->dv.actions[sample_act_pos] =
10987                         dev_flow->dv.dest_array_res->action;
10988                 } else {
10989                         dev_flow->dv.actions[sample_act_pos] =
10990                         dev_flow->dv.sample_res->verbs_action;
10991                 }
10992         }
10993         /*
10994          * For multiple destination (sample action with ratio=1), the encap
10995          * action and port id action will be combined into group action.
10996          * So need remove the original these actions in the flow and only
10997          * use the sample action instead of.
10998          */
10999         if (num_of_dest > 1 && sample_act->dr_port_id_action) {
11000                 int i;
11001                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
11002
11003                 for (i = 0; i < actions_n; i++) {
11004                         if ((sample_act->dr_encap_action &&
11005                                 sample_act->dr_encap_action ==
11006                                 dev_flow->dv.actions[i]) ||
11007                                 (sample_act->dr_port_id_action &&
11008                                 sample_act->dr_port_id_action ==
11009                                 dev_flow->dv.actions[i]))
11010                                 continue;
11011                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
11012                 }
11013                 memcpy((void *)dev_flow->dv.actions,
11014                                 (void *)temp_actions,
11015                                 tmp_actions_n * sizeof(void *));
11016                 actions_n = tmp_actions_n;
11017         }
11018         dev_flow->dv.actions_n = actions_n;
11019         dev_flow->act_flags = action_flags;
11020         /* Register matcher. */
11021         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
11022                                     matcher.mask.size);
11023         matcher.priority = mlx5_os_flow_adjust_priority(dev,
11024                                                         priority,
11025                                                         matcher.priority);
11026         /* reserved field no needs to be set to 0 here. */
11027         tbl_key.domain = attr->transfer;
11028         tbl_key.direction = attr->egress;
11029         tbl_key.table_id = dev_flow->dv.group;
11030         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
11031                                      tunnel, attr->group, error))
11032                 return -rte_errno;
11033         return 0;
11034 }
11035
11036 /**
11037  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11038  * and tunnel.
11039  *
11040  * @param[in, out] action
11041  *   Shred RSS action holding hash RX queue objects.
11042  * @param[in] hash_fields
11043  *   Defines combination of packet fields to participate in RX hash.
11044  * @param[in] tunnel
11045  *   Tunnel type
11046  * @param[in] hrxq_idx
11047  *   Hash RX queue index to set.
11048  *
11049  * @return
11050  *   0 on success, otherwise negative errno value.
11051  */
11052 static int
11053 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
11054                               const uint64_t hash_fields,
11055                               const int tunnel,
11056                               uint32_t hrxq_idx)
11057 {
11058         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
11059
11060         switch (hash_fields & ~IBV_RX_HASH_INNER) {
11061         case MLX5_RSS_HASH_IPV4:
11062                 hrxqs[0] = hrxq_idx;
11063                 return 0;
11064         case MLX5_RSS_HASH_IPV4_TCP:
11065                 hrxqs[1] = hrxq_idx;
11066                 return 0;
11067         case MLX5_RSS_HASH_IPV4_UDP:
11068                 hrxqs[2] = hrxq_idx;
11069                 return 0;
11070         case MLX5_RSS_HASH_IPV6:
11071                 hrxqs[3] = hrxq_idx;
11072                 return 0;
11073         case MLX5_RSS_HASH_IPV6_TCP:
11074                 hrxqs[4] = hrxq_idx;
11075                 return 0;
11076         case MLX5_RSS_HASH_IPV6_UDP:
11077                 hrxqs[5] = hrxq_idx;
11078                 return 0;
11079         case MLX5_RSS_HASH_NONE:
11080                 hrxqs[6] = hrxq_idx;
11081                 return 0;
11082         default:
11083                 return -1;
11084         }
11085 }
11086
11087 /**
11088  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
11089  * and tunnel.
11090  *
11091  * @param[in] dev
11092  *   Pointer to the Ethernet device structure.
11093  * @param[in] idx
11094  *   Shared RSS action ID holding hash RX queue objects.
11095  * @param[in] hash_fields
11096  *   Defines combination of packet fields to participate in RX hash.
11097  * @param[in] tunnel
11098  *   Tunnel type
11099  *
11100  * @return
11101  *   Valid hash RX queue index, otherwise 0.
11102  */
11103 static uint32_t
11104 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
11105                                  const uint64_t hash_fields,
11106                                  const int tunnel)
11107 {
11108         struct mlx5_priv *priv = dev->data->dev_private;
11109         struct mlx5_shared_action_rss *shared_rss =
11110             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11111         const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
11112                                                         shared_rss->hrxq_tunnel;
11113
11114         switch (hash_fields & ~IBV_RX_HASH_INNER) {
11115         case MLX5_RSS_HASH_IPV4:
11116                 return hrxqs[0];
11117         case MLX5_RSS_HASH_IPV4_TCP:
11118                 return hrxqs[1];
11119         case MLX5_RSS_HASH_IPV4_UDP:
11120                 return hrxqs[2];
11121         case MLX5_RSS_HASH_IPV6:
11122                 return hrxqs[3];
11123         case MLX5_RSS_HASH_IPV6_TCP:
11124                 return hrxqs[4];
11125         case MLX5_RSS_HASH_IPV6_UDP:
11126                 return hrxqs[5];
11127         case MLX5_RSS_HASH_NONE:
11128                 return hrxqs[6];
11129         default:
11130                 return 0;
11131         }
11132 }
11133
11134 /**
11135  * Apply the flow to the NIC, lock free,
11136  * (mutex should be acquired by caller).
11137  *
11138  * @param[in] dev
11139  *   Pointer to the Ethernet device structure.
11140  * @param[in, out] flow
11141  *   Pointer to flow structure.
11142  * @param[out] error
11143  *   Pointer to error structure.
11144  *
11145  * @return
11146  *   0 on success, a negative errno value otherwise and rte_errno is set.
11147  */
11148 static int
11149 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
11150               struct rte_flow_error *error)
11151 {
11152         struct mlx5_flow_dv_workspace *dv;
11153         struct mlx5_flow_handle *dh;
11154         struct mlx5_flow_handle_dv *dv_h;
11155         struct mlx5_flow *dev_flow;
11156         struct mlx5_priv *priv = dev->data->dev_private;
11157         uint32_t handle_idx;
11158         int n;
11159         int err;
11160         int idx;
11161         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
11162         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
11163
11164         MLX5_ASSERT(wks);
11165         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
11166                 dev_flow = &wks->flows[idx];
11167                 dv = &dev_flow->dv;
11168                 dh = dev_flow->handle;
11169                 dv_h = &dh->dvh;
11170                 n = dv->actions_n;
11171                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
11172                         if (dv->transfer) {
11173                                 dv->actions[n++] = priv->sh->esw_drop_action;
11174                         } else {
11175                                 MLX5_ASSERT(priv->drop_queue.hrxq);
11176                                 dv->actions[n++] =
11177                                                 priv->drop_queue.hrxq->action;
11178                         }
11179                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
11180                            !dv_h->rix_sample && !dv_h->rix_dest_array)) {
11181                         struct mlx5_hrxq *hrxq;
11182                         uint32_t hrxq_idx;
11183
11184                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
11185                                                     &hrxq_idx);
11186                         if (!hrxq) {
11187                                 rte_flow_error_set
11188                                         (error, rte_errno,
11189                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11190                                          "cannot get hash queue");
11191                                 goto error;
11192                         }
11193                         dh->rix_hrxq = hrxq_idx;
11194                         dv->actions[n++] = hrxq->action;
11195                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
11196                         struct mlx5_hrxq *hrxq = NULL;
11197                         uint32_t hrxq_idx;
11198
11199                         hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
11200                                                 rss_desc->shared_rss,
11201                                                 dev_flow->hash_fields,
11202                                                 !!(dh->layers &
11203                                                 MLX5_FLOW_LAYER_TUNNEL));
11204                         if (hrxq_idx)
11205                                 hrxq = mlx5_ipool_get
11206                                         (priv->sh->ipool[MLX5_IPOOL_HRXQ],
11207                                          hrxq_idx);
11208                         if (!hrxq) {
11209                                 rte_flow_error_set
11210                                         (error, rte_errno,
11211                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11212                                          "cannot get hash queue");
11213                                 goto error;
11214                         }
11215                         dh->rix_srss = rss_desc->shared_rss;
11216                         dv->actions[n++] = hrxq->action;
11217                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
11218                         if (!priv->sh->default_miss_action) {
11219                                 rte_flow_error_set
11220                                         (error, rte_errno,
11221                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11222                                          "default miss action not be created.");
11223                                 goto error;
11224                         }
11225                         dv->actions[n++] = priv->sh->default_miss_action;
11226                 }
11227                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
11228                                                (void *)&dv->value, n,
11229                                                dv->actions, &dh->drv_flow);
11230                 if (err) {
11231                         rte_flow_error_set(error, errno,
11232                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11233                                            NULL,
11234                                            "hardware refuses to create flow");
11235                         goto error;
11236                 }
11237                 if (priv->vmwa_context &&
11238                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
11239                         /*
11240                          * The rule contains the VLAN pattern.
11241                          * For VF we are going to create VLAN
11242                          * interface to make hypervisor set correct
11243                          * e-Switch vport context.
11244                          */
11245                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
11246                 }
11247         }
11248         return 0;
11249 error:
11250         err = rte_errno; /* Save rte_errno before cleanup. */
11251         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
11252                        handle_idx, dh, next) {
11253                 /* hrxq is union, don't clear it if the flag is not set. */
11254                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
11255                         mlx5_hrxq_release(dev, dh->rix_hrxq);
11256                         dh->rix_hrxq = 0;
11257                 } else if (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
11258                         dh->rix_srss = 0;
11259                 }
11260                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
11261                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
11262         }
11263         rte_errno = err; /* Restore rte_errno. */
11264         return -rte_errno;
11265 }
11266
11267 void
11268 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
11269                           struct mlx5_cache_entry *entry)
11270 {
11271         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
11272                                                           entry);
11273
11274         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
11275         mlx5_free(cache);
11276 }
11277
11278 /**
11279  * Release the flow matcher.
11280  *
11281  * @param dev
11282  *   Pointer to Ethernet device.
11283  * @param handle
11284  *   Pointer to mlx5_flow_handle.
11285  *
11286  * @return
11287  *   1 while a reference on it exists, 0 when freed.
11288  */
11289 static int
11290 flow_dv_matcher_release(struct rte_eth_dev *dev,
11291                         struct mlx5_flow_handle *handle)
11292 {
11293         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
11294         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
11295                                                             typeof(*tbl), tbl);
11296         int ret;
11297
11298         MLX5_ASSERT(matcher->matcher_object);
11299         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
11300         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
11301         return ret;
11302 }
11303
11304 /**
11305  * Release encap_decap resource.
11306  *
11307  * @param list
11308  *   Pointer to the hash list.
11309  * @param entry
11310  *   Pointer to exist resource entry object.
11311  */
11312 void
11313 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
11314                               struct mlx5_hlist_entry *entry)
11315 {
11316         struct mlx5_dev_ctx_shared *sh = list->ctx;
11317         struct mlx5_flow_dv_encap_decap_resource *res =
11318                 container_of(entry, typeof(*res), entry);
11319
11320         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
11321         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
11322 }
11323
11324 /**
11325  * Release an encap/decap resource.
11326  *
11327  * @param dev
11328  *   Pointer to Ethernet device.
11329  * @param encap_decap_idx
11330  *   Index of encap decap resource.
11331  *
11332  * @return
11333  *   1 while a reference on it exists, 0 when freed.
11334  */
11335 static int
11336 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
11337                                      uint32_t encap_decap_idx)
11338 {
11339         struct mlx5_priv *priv = dev->data->dev_private;
11340         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
11341
11342         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
11343                                         encap_decap_idx);
11344         if (!cache_resource)
11345                 return 0;
11346         MLX5_ASSERT(cache_resource->action);
11347         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
11348                                      &cache_resource->entry);
11349 }
11350
11351 /**
11352  * Release an jump to table action resource.
11353  *
11354  * @param dev
11355  *   Pointer to Ethernet device.
11356  * @param handle
11357  *   Pointer to mlx5_flow_handle.
11358  *
11359  * @return
11360  *   1 while a reference on it exists, 0 when freed.
11361  */
11362 static int
11363 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
11364                                   struct mlx5_flow_handle *handle)
11365 {
11366         struct mlx5_priv *priv = dev->data->dev_private;
11367         struct mlx5_flow_tbl_data_entry *tbl_data;
11368
11369         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
11370                              handle->rix_jump);
11371         if (!tbl_data)
11372                 return 0;
11373         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
11374 }
11375
11376 void
11377 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
11378                          struct mlx5_hlist_entry *entry)
11379 {
11380         struct mlx5_flow_dv_modify_hdr_resource *res =
11381                 container_of(entry, typeof(*res), entry);
11382
11383         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
11384         mlx5_free(entry);
11385 }
11386
11387 /**
11388  * Release a modify-header resource.
11389  *
11390  * @param dev
11391  *   Pointer to Ethernet device.
11392  * @param handle
11393  *   Pointer to mlx5_flow_handle.
11394  *
11395  * @return
11396  *   1 while a reference on it exists, 0 when freed.
11397  */
11398 static int
11399 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
11400                                     struct mlx5_flow_handle *handle)
11401 {
11402         struct mlx5_priv *priv = dev->data->dev_private;
11403         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
11404
11405         MLX5_ASSERT(entry->action);
11406         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
11407 }
11408
11409 void
11410 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
11411                           struct mlx5_cache_entry *entry)
11412 {
11413         struct mlx5_dev_ctx_shared *sh = list->ctx;
11414         struct mlx5_flow_dv_port_id_action_resource *cache =
11415                         container_of(entry, typeof(*cache), entry);
11416
11417         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
11418         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
11419 }
11420
11421 /**
11422  * Release port ID action resource.
11423  *
11424  * @param dev
11425  *   Pointer to Ethernet device.
11426  * @param handle
11427  *   Pointer to mlx5_flow_handle.
11428  *
11429  * @return
11430  *   1 while a reference on it exists, 0 when freed.
11431  */
11432 static int
11433 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
11434                                         uint32_t port_id)
11435 {
11436         struct mlx5_priv *priv = dev->data->dev_private;
11437         struct mlx5_flow_dv_port_id_action_resource *cache;
11438
11439         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
11440         if (!cache)
11441                 return 0;
11442         MLX5_ASSERT(cache->action);
11443         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
11444                                      &cache->entry);
11445 }
11446
11447 /**
11448  * Release shared RSS action resource.
11449  *
11450  * @param dev
11451  *   Pointer to Ethernet device.
11452  * @param srss
11453  *   Shared RSS action index.
11454  */
11455 static void
11456 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
11457 {
11458         struct mlx5_priv *priv = dev->data->dev_private;
11459         struct mlx5_shared_action_rss *shared_rss;
11460
11461         shared_rss = mlx5_ipool_get
11462                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
11463         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
11464 }
11465
11466 void
11467 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
11468                             struct mlx5_cache_entry *entry)
11469 {
11470         struct mlx5_dev_ctx_shared *sh = list->ctx;
11471         struct mlx5_flow_dv_push_vlan_action_resource *cache =
11472                         container_of(entry, typeof(*cache), entry);
11473
11474         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
11475         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
11476 }
11477
11478 /**
11479  * Release push vlan action resource.
11480  *
11481  * @param dev
11482  *   Pointer to Ethernet device.
11483  * @param handle
11484  *   Pointer to mlx5_flow_handle.
11485  *
11486  * @return
11487  *   1 while a reference on it exists, 0 when freed.
11488  */
11489 static int
11490 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
11491                                           struct mlx5_flow_handle *handle)
11492 {
11493         struct mlx5_priv *priv = dev->data->dev_private;
11494         struct mlx5_flow_dv_push_vlan_action_resource *cache;
11495         uint32_t idx = handle->dvh.rix_push_vlan;
11496
11497         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
11498         if (!cache)
11499                 return 0;
11500         MLX5_ASSERT(cache->action);
11501         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
11502                                      &cache->entry);
11503 }
11504
11505 /**
11506  * Release the fate resource.
11507  *
11508  * @param dev
11509  *   Pointer to Ethernet device.
11510  * @param handle
11511  *   Pointer to mlx5_flow_handle.
11512  */
11513 static void
11514 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
11515                                struct mlx5_flow_handle *handle)
11516 {
11517         if (!handle->rix_fate)
11518                 return;
11519         switch (handle->fate_action) {
11520         case MLX5_FLOW_FATE_QUEUE:
11521                 mlx5_hrxq_release(dev, handle->rix_hrxq);
11522                 break;
11523         case MLX5_FLOW_FATE_JUMP:
11524                 flow_dv_jump_tbl_resource_release(dev, handle);
11525                 break;
11526         case MLX5_FLOW_FATE_PORT_ID:
11527                 flow_dv_port_id_action_resource_release(dev,
11528                                 handle->rix_port_id_action);
11529                 break;
11530         default:
11531                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
11532                 break;
11533         }
11534         handle->rix_fate = 0;
11535 }
11536
11537 void
11538 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
11539                          struct mlx5_cache_entry *entry)
11540 {
11541         struct mlx5_flow_dv_sample_resource *cache_resource =
11542                         container_of(entry, typeof(*cache_resource), entry);
11543         struct rte_eth_dev *dev = cache_resource->dev;
11544         struct mlx5_priv *priv = dev->data->dev_private;
11545
11546         if (cache_resource->verbs_action)
11547                 claim_zero(mlx5_flow_os_destroy_flow_action
11548                                 (cache_resource->verbs_action));
11549         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11550                 if (cache_resource->default_miss)
11551                         claim_zero(mlx5_flow_os_destroy_flow_action
11552                           (cache_resource->default_miss));
11553         }
11554         if (cache_resource->normal_path_tbl)
11555                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11556                         cache_resource->normal_path_tbl);
11557         flow_dv_sample_sub_actions_release(dev,
11558                                 &cache_resource->sample_idx);
11559         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11560                         cache_resource->idx);
11561         DRV_LOG(DEBUG, "sample resource %p: removed",
11562                 (void *)cache_resource);
11563 }
11564
11565 /**
11566  * Release an sample resource.
11567  *
11568  * @param dev
11569  *   Pointer to Ethernet device.
11570  * @param handle
11571  *   Pointer to mlx5_flow_handle.
11572  *
11573  * @return
11574  *   1 while a reference on it exists, 0 when freed.
11575  */
11576 static int
11577 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
11578                                      struct mlx5_flow_handle *handle)
11579 {
11580         struct mlx5_priv *priv = dev->data->dev_private;
11581         struct mlx5_flow_dv_sample_resource *cache_resource;
11582
11583         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11584                          handle->dvh.rix_sample);
11585         if (!cache_resource)
11586                 return 0;
11587         MLX5_ASSERT(cache_resource->verbs_action);
11588         return mlx5_cache_unregister(&priv->sh->sample_action_list,
11589                                      &cache_resource->entry);
11590 }
11591
11592 void
11593 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
11594                              struct mlx5_cache_entry *entry)
11595 {
11596         struct mlx5_flow_dv_dest_array_resource *cache_resource =
11597                         container_of(entry, typeof(*cache_resource), entry);
11598         struct rte_eth_dev *dev = cache_resource->dev;
11599         struct mlx5_priv *priv = dev->data->dev_private;
11600         uint32_t i = 0;
11601
11602         MLX5_ASSERT(cache_resource->action);
11603         if (cache_resource->action)
11604                 claim_zero(mlx5_flow_os_destroy_flow_action
11605                                         (cache_resource->action));
11606         for (; i < cache_resource->num_of_dest; i++)
11607                 flow_dv_sample_sub_actions_release(dev,
11608                                 &cache_resource->sample_idx[i]);
11609         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11610                         cache_resource->idx);
11611         DRV_LOG(DEBUG, "destination array resource %p: removed",
11612                 (void *)cache_resource);
11613 }
11614
11615 /**
11616  * Release an destination array resource.
11617  *
11618  * @param dev
11619  *   Pointer to Ethernet device.
11620  * @param handle
11621  *   Pointer to mlx5_flow_handle.
11622  *
11623  * @return
11624  *   1 while a reference on it exists, 0 when freed.
11625  */
11626 static int
11627 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
11628                                     struct mlx5_flow_handle *handle)
11629 {
11630         struct mlx5_priv *priv = dev->data->dev_private;
11631         struct mlx5_flow_dv_dest_array_resource *cache;
11632
11633         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11634                                handle->dvh.rix_dest_array);
11635         if (!cache)
11636                 return 0;
11637         MLX5_ASSERT(cache->action);
11638         return mlx5_cache_unregister(&priv->sh->dest_array_list,
11639                                      &cache->entry);
11640 }
11641
11642 static void
11643 flow_dv_geneve_tlv_option_resource_release(struct rte_eth_dev *dev)
11644 {
11645         struct mlx5_priv *priv = dev->data->dev_private;
11646         struct mlx5_dev_ctx_shared *sh = priv->sh;
11647         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource =
11648                                 sh->geneve_tlv_option_resource;
11649         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
11650         if (geneve_opt_resource) {
11651                 if (!(__atomic_sub_fetch(&geneve_opt_resource->refcnt, 1,
11652                                          __ATOMIC_RELAXED))) {
11653                         claim_zero(mlx5_devx_cmd_destroy
11654                                         (geneve_opt_resource->obj));
11655                         mlx5_free(sh->geneve_tlv_option_resource);
11656                         sh->geneve_tlv_option_resource = NULL;
11657                 }
11658         }
11659         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
11660 }
11661
11662 /**
11663  * Remove the flow from the NIC but keeps it in memory.
11664  * Lock free, (mutex should be acquired by caller).
11665  *
11666  * @param[in] dev
11667  *   Pointer to Ethernet device.
11668  * @param[in, out] flow
11669  *   Pointer to flow structure.
11670  */
11671 static void
11672 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
11673 {
11674         struct mlx5_flow_handle *dh;
11675         uint32_t handle_idx;
11676         struct mlx5_priv *priv = dev->data->dev_private;
11677
11678         if (!flow)
11679                 return;
11680         handle_idx = flow->dev_handles;
11681         while (handle_idx) {
11682                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11683                                     handle_idx);
11684                 if (!dh)
11685                         return;
11686                 if (dh->drv_flow) {
11687                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
11688                         dh->drv_flow = NULL;
11689                 }
11690                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
11691                         flow_dv_fate_resource_release(dev, dh);
11692                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
11693                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
11694                 handle_idx = dh->next.next;
11695         }
11696 }
11697
11698 /**
11699  * Remove the flow from the NIC and the memory.
11700  * Lock free, (mutex should be acquired by caller).
11701  *
11702  * @param[in] dev
11703  *   Pointer to the Ethernet device structure.
11704  * @param[in, out] flow
11705  *   Pointer to flow structure.
11706  */
11707 static void
11708 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
11709 {
11710         struct mlx5_flow_handle *dev_handle;
11711         struct mlx5_priv *priv = dev->data->dev_private;
11712         uint32_t srss = 0;
11713
11714         if (!flow)
11715                 return;
11716         flow_dv_remove(dev, flow);
11717         if (flow->counter) {
11718                 flow_dv_counter_free(dev, flow->counter);
11719                 flow->counter = 0;
11720         }
11721         if (flow->meter) {
11722                 struct mlx5_flow_meter *fm;
11723
11724                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
11725                                     flow->meter);
11726                 if (fm)
11727                         mlx5_flow_meter_detach(fm);
11728                 flow->meter = 0;
11729         }
11730         if (flow->age)
11731                 flow_dv_aso_age_release(dev, flow->age);
11732         if (flow->geneve_tlv_option) {
11733                 flow_dv_geneve_tlv_option_resource_release(dev);
11734                 flow->geneve_tlv_option = 0;
11735         }
11736         while (flow->dev_handles) {
11737                 uint32_t tmp_idx = flow->dev_handles;
11738
11739                 dev_handle = mlx5_ipool_get(priv->sh->ipool
11740                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
11741                 if (!dev_handle)
11742                         return;
11743                 flow->dev_handles = dev_handle->next.next;
11744                 if (dev_handle->dvh.matcher)
11745                         flow_dv_matcher_release(dev, dev_handle);
11746                 if (dev_handle->dvh.rix_sample)
11747                         flow_dv_sample_resource_release(dev, dev_handle);
11748                 if (dev_handle->dvh.rix_dest_array)
11749                         flow_dv_dest_array_resource_release(dev, dev_handle);
11750                 if (dev_handle->dvh.rix_encap_decap)
11751                         flow_dv_encap_decap_resource_release(dev,
11752                                 dev_handle->dvh.rix_encap_decap);
11753                 if (dev_handle->dvh.modify_hdr)
11754                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
11755                 if (dev_handle->dvh.rix_push_vlan)
11756                         flow_dv_push_vlan_action_resource_release(dev,
11757                                                                   dev_handle);
11758                 if (dev_handle->dvh.rix_tag)
11759                         flow_dv_tag_release(dev,
11760                                             dev_handle->dvh.rix_tag);
11761                 if (dev_handle->fate_action != MLX5_FLOW_FATE_SHARED_RSS)
11762                         flow_dv_fate_resource_release(dev, dev_handle);
11763                 else if (!srss)
11764                         srss = dev_handle->rix_srss;
11765                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11766                            tmp_idx);
11767         }
11768         if (srss)
11769                 flow_dv_shared_rss_action_release(dev, srss);
11770 }
11771
11772 /**
11773  * Release array of hash RX queue objects.
11774  * Helper function.
11775  *
11776  * @param[in] dev
11777  *   Pointer to the Ethernet device structure.
11778  * @param[in, out] hrxqs
11779  *   Array of hash RX queue objects.
11780  *
11781  * @return
11782  *   Total number of references to hash RX queue objects in *hrxqs* array
11783  *   after this operation.
11784  */
11785 static int
11786 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
11787                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
11788 {
11789         size_t i;
11790         int remaining = 0;
11791
11792         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
11793                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
11794
11795                 if (!ret)
11796                         (*hrxqs)[i] = 0;
11797                 remaining += ret;
11798         }
11799         return remaining;
11800 }
11801
11802 /**
11803  * Release all hash RX queue objects representing shared RSS action.
11804  *
11805  * @param[in] dev
11806  *   Pointer to the Ethernet device structure.
11807  * @param[in, out] action
11808  *   Shared RSS action to remove hash RX queue objects from.
11809  *
11810  * @return
11811  *   Total number of references to hash RX queue objects stored in *action*
11812  *   after this operation.
11813  *   Expected to be 0 if no external references held.
11814  */
11815 static int
11816 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
11817                                  struct mlx5_shared_action_rss *action)
11818 {
11819         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
11820                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
11821 }
11822
11823 /**
11824  * Setup shared RSS action.
11825  * Prepare set of hash RX queue objects sufficient to handle all valid
11826  * hash_fields combinations (see enum ibv_rx_hash_fields).
11827  *
11828  * @param[in] dev
11829  *   Pointer to the Ethernet device structure.
11830  * @param[in] action_idx
11831  *   Shared RSS action ipool index.
11832  * @param[in, out] action
11833  *   Partially initialized shared RSS action.
11834  * @param[out] error
11835  *   Perform verbose error reporting if not NULL. Initialized in case of
11836  *   error only.
11837  *
11838  * @return
11839  *   0 on success, otherwise negative errno value.
11840  */
11841 static int
11842 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11843                            uint32_t action_idx,
11844                            struct mlx5_shared_action_rss *action,
11845                            struct rte_flow_error *error)
11846 {
11847         struct mlx5_flow_rss_desc rss_desc = { 0 };
11848         size_t i;
11849         int err;
11850
11851         if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) {
11852                 return rte_flow_error_set(error, rte_errno,
11853                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11854                                           "cannot setup indirection table");
11855         }
11856         memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
11857         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
11858         rss_desc.const_q = action->origin.queue;
11859         rss_desc.queue_num = action->origin.queue_num;
11860         /* Set non-zero value to indicate a shared RSS. */
11861         rss_desc.shared_rss = action_idx;
11862         rss_desc.ind_tbl = action->ind_tbl;
11863         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11864                 uint32_t hrxq_idx;
11865                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11866                 int tunnel;
11867
11868                 for (tunnel = 0; tunnel < 2; tunnel++) {
11869                         rss_desc.tunnel = tunnel;
11870                         rss_desc.hash_fields = hash_fields;
11871                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
11872                         if (!hrxq_idx) {
11873                                 rte_flow_error_set
11874                                         (error, rte_errno,
11875                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11876                                          "cannot get hash queue");
11877                                 goto error_hrxq_new;
11878                         }
11879                         err = __flow_dv_action_rss_hrxq_set
11880                                 (action, hash_fields, tunnel, hrxq_idx);
11881                         MLX5_ASSERT(!err);
11882                 }
11883         }
11884         return 0;
11885 error_hrxq_new:
11886         err = rte_errno;
11887         __flow_dv_action_rss_hrxqs_release(dev, action);
11888         if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true))
11889                 action->ind_tbl = NULL;
11890         rte_errno = err;
11891         return -rte_errno;
11892 }
11893
11894 /**
11895  * Create shared RSS action.
11896  *
11897  * @param[in] dev
11898  *   Pointer to the Ethernet device structure.
11899  * @param[in] conf
11900  *   Shared action configuration.
11901  * @param[in] rss
11902  *   RSS action specification used to create shared action.
11903  * @param[out] error
11904  *   Perform verbose error reporting if not NULL. Initialized in case of
11905  *   error only.
11906  *
11907  * @return
11908  *   A valid shared action ID in case of success, 0 otherwise and
11909  *   rte_errno is set.
11910  */
11911 static uint32_t
11912 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11913                             const struct rte_flow_shared_action_conf *conf,
11914                             const struct rte_flow_action_rss *rss,
11915                             struct rte_flow_error *error)
11916 {
11917         struct mlx5_priv *priv = dev->data->dev_private;
11918         struct mlx5_shared_action_rss *shared_action = NULL;
11919         void *queue = NULL;
11920         struct rte_flow_action_rss *origin;
11921         const uint8_t *rss_key;
11922         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11923         uint32_t idx;
11924
11925         RTE_SET_USED(conf);
11926         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11927                             0, SOCKET_ID_ANY);
11928         shared_action = mlx5_ipool_zmalloc
11929                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
11930         if (!shared_action || !queue) {
11931                 rte_flow_error_set(error, ENOMEM,
11932                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11933                                    "cannot allocate resource memory");
11934                 goto error_rss_init;
11935         }
11936         if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
11937                 rte_flow_error_set(error, E2BIG,
11938                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11939                                    "rss action number out of range");
11940                 goto error_rss_init;
11941         }
11942         shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
11943                                              sizeof(*shared_action->ind_tbl),
11944                                              0, SOCKET_ID_ANY);
11945         if (!shared_action->ind_tbl) {
11946                 rte_flow_error_set(error, ENOMEM,
11947                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11948                                    "cannot allocate resource memory");
11949                 goto error_rss_init;
11950         }
11951         memcpy(queue, rss->queue, queue_size);
11952         shared_action->ind_tbl->queues = queue;
11953         shared_action->ind_tbl->queues_n = rss->queue_num;
11954         origin = &shared_action->origin;
11955         origin->func = rss->func;
11956         origin->level = rss->level;
11957         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11958         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11959         /* NULL RSS key indicates default RSS key. */
11960         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11961         memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11962         origin->key = &shared_action->key[0];
11963         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11964         origin->queue = queue;
11965         origin->queue_num = rss->queue_num;
11966         if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
11967                 goto error_rss_init;
11968         rte_spinlock_init(&shared_action->action_rss_sl);
11969         __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
11970         rte_spinlock_lock(&priv->shared_act_sl);
11971         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11972                      &priv->rss_shared_actions, idx, shared_action, next);
11973         rte_spinlock_unlock(&priv->shared_act_sl);
11974         return idx;
11975 error_rss_init:
11976         if (shared_action) {
11977                 if (shared_action->ind_tbl)
11978                         mlx5_free(shared_action->ind_tbl);
11979                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11980                                 idx);
11981         }
11982         if (queue)
11983                 mlx5_free(queue);
11984         return 0;
11985 }
11986
11987 /**
11988  * Destroy the shared RSS action.
11989  * Release related hash RX queue objects.
11990  *
11991  * @param[in] dev
11992  *   Pointer to the Ethernet device structure.
11993  * @param[in] idx
11994  *   The shared RSS action object ID to be removed.
11995  * @param[out] error
11996  *   Perform verbose error reporting if not NULL. Initialized in case of
11997  *   error only.
11998  *
11999  * @return
12000  *   0 on success, otherwise negative errno value.
12001  */
12002 static int
12003 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
12004                              struct rte_flow_error *error)
12005 {
12006         struct mlx5_priv *priv = dev->data->dev_private;
12007         struct mlx5_shared_action_rss *shared_rss =
12008             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12009         uint32_t old_refcnt = 1;
12010         int remaining;
12011         uint16_t *queue = NULL;
12012
12013         if (!shared_rss)
12014                 return rte_flow_error_set(error, EINVAL,
12015                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12016                                           "invalid shared action");
12017         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
12018         if (remaining)
12019                 return rte_flow_error_set(error, EBUSY,
12020                                           RTE_FLOW_ERROR_TYPE_ACTION,
12021                                           NULL,
12022                                           "shared rss hrxq has references");
12023         queue = shared_rss->ind_tbl->queues;
12024         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
12025         if (remaining)
12026                 return rte_flow_error_set(error, EBUSY,
12027                                           RTE_FLOW_ERROR_TYPE_ACTION,
12028                                           NULL,
12029                                           "shared rss indirection table has"
12030                                           " references");
12031         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
12032                                          0, 0, __ATOMIC_ACQUIRE,
12033                                          __ATOMIC_RELAXED))
12034                 return rte_flow_error_set(error, EBUSY,
12035                                           RTE_FLOW_ERROR_TYPE_ACTION,
12036                                           NULL,
12037                                           "shared rss has references");
12038         mlx5_free(queue);
12039         rte_spinlock_lock(&priv->shared_act_sl);
12040         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12041                      &priv->rss_shared_actions, idx, shared_rss, next);
12042         rte_spinlock_unlock(&priv->shared_act_sl);
12043         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
12044                         idx);
12045         return 0;
12046 }
12047
12048 /**
12049  * Create shared action, lock free,
12050  * (mutex should be acquired by caller).
12051  * Dispatcher for action type specific call.
12052  *
12053  * @param[in] dev
12054  *   Pointer to the Ethernet device structure.
12055  * @param[in] conf
12056  *   Shared action configuration.
12057  * @param[in] action
12058  *   Action specification used to create shared action.
12059  * @param[out] error
12060  *   Perform verbose error reporting if not NULL. Initialized in case of
12061  *   error only.
12062  *
12063  * @return
12064  *   A valid shared action handle in case of success, NULL otherwise and
12065  *   rte_errno is set.
12066  */
12067 static struct rte_flow_shared_action *
12068 flow_dv_action_create(struct rte_eth_dev *dev,
12069                       const struct rte_flow_shared_action_conf *conf,
12070                       const struct rte_flow_action *action,
12071                       struct rte_flow_error *err)
12072 {
12073         uint32_t idx = 0;
12074         uint32_t ret = 0;
12075
12076         switch (action->type) {
12077         case RTE_FLOW_ACTION_TYPE_RSS:
12078                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
12079                 idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
12080                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12081                 break;
12082         case RTE_FLOW_ACTION_TYPE_AGE:
12083                 ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
12084                 idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
12085                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
12086                 if (ret) {
12087                         struct mlx5_aso_age_action *aso_age =
12088                                               flow_aso_age_get_by_idx(dev, ret);
12089
12090                         if (!aso_age->age_params.context)
12091                                 aso_age->age_params.context =
12092                                                          (void *)(uintptr_t)idx;
12093                 }
12094                 break;
12095         default:
12096                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
12097                                    NULL, "action type not supported");
12098                 break;
12099         }
12100         return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
12101 }
12102
12103 /**
12104  * Destroy the shared action.
12105  * Release action related resources on the NIC and the memory.
12106  * Lock free, (mutex should be acquired by caller).
12107  * Dispatcher for action type specific call.
12108  *
12109  * @param[in] dev
12110  *   Pointer to the Ethernet device structure.
12111  * @param[in] action
12112  *   The shared action object to be removed.
12113  * @param[out] error
12114  *   Perform verbose error reporting if not NULL. Initialized in case of
12115  *   error only.
12116  *
12117  * @return
12118  *   0 on success, otherwise negative errno value.
12119  */
12120 static int
12121 flow_dv_action_destroy(struct rte_eth_dev *dev,
12122                        struct rte_flow_shared_action *action,
12123                        struct rte_flow_error *error)
12124 {
12125         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12126         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12127         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12128         int ret;
12129
12130         switch (type) {
12131         case MLX5_SHARED_ACTION_TYPE_RSS:
12132                 return __flow_dv_action_rss_release(dev, idx, error);
12133         case MLX5_SHARED_ACTION_TYPE_AGE:
12134                 ret = flow_dv_aso_age_release(dev, idx);
12135                 if (ret)
12136                         /*
12137                          * In this case, the last flow has a reference will
12138                          * actually release the age action.
12139                          */
12140                         DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
12141                                 " released with references %d.", idx, ret);
12142                 return 0;
12143         default:
12144                 return rte_flow_error_set(error, ENOTSUP,
12145                                           RTE_FLOW_ERROR_TYPE_ACTION,
12146                                           NULL,
12147                                           "action type not supported");
12148         }
12149 }
12150
12151 /**
12152  * Updates in place shared RSS action configuration.
12153  *
12154  * @param[in] dev
12155  *   Pointer to the Ethernet device structure.
12156  * @param[in] idx
12157  *   The shared RSS action object ID to be updated.
12158  * @param[in] action_conf
12159  *   RSS action specification used to modify *shared_rss*.
12160  * @param[out] error
12161  *   Perform verbose error reporting if not NULL. Initialized in case of
12162  *   error only.
12163  *
12164  * @return
12165  *   0 on success, otherwise negative errno value.
12166  * @note: currently only support update of RSS queues.
12167  */
12168 static int
12169 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
12170                             const struct rte_flow_action_rss *action_conf,
12171                             struct rte_flow_error *error)
12172 {
12173         struct mlx5_priv *priv = dev->data->dev_private;
12174         struct mlx5_shared_action_rss *shared_rss =
12175             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
12176         int ret = 0;
12177         void *queue = NULL;
12178         uint16_t *queue_old = NULL;
12179         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
12180
12181         if (!shared_rss)
12182                 return rte_flow_error_set(error, EINVAL,
12183                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12184                                           "invalid shared action to update");
12185         queue = mlx5_malloc(MLX5_MEM_ZERO,
12186                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
12187                             0, SOCKET_ID_ANY);
12188         if (!queue)
12189                 return rte_flow_error_set(error, ENOMEM,
12190                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12191                                           NULL,
12192                                           "cannot allocate resource memory");
12193         memcpy(queue, action_conf->queue, queue_size);
12194         MLX5_ASSERT(shared_rss->ind_tbl);
12195         rte_spinlock_lock(&shared_rss->action_rss_sl);
12196         queue_old = shared_rss->ind_tbl->queues;
12197         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
12198                                         queue, action_conf->queue_num, true);
12199         if (ret) {
12200                 mlx5_free(queue);
12201                 ret = rte_flow_error_set(error, rte_errno,
12202                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
12203                                           "cannot update indirection table");
12204         } else {
12205                 mlx5_free(queue_old);
12206                 shared_rss->origin.queue = queue;
12207                 shared_rss->origin.queue_num = action_conf->queue_num;
12208         }
12209         rte_spinlock_unlock(&shared_rss->action_rss_sl);
12210         return ret;
12211 }
12212
12213 /**
12214  * Updates in place shared action configuration, lock free,
12215  * (mutex should be acquired by caller).
12216  *
12217  * @param[in] dev
12218  *   Pointer to the Ethernet device structure.
12219  * @param[in] action
12220  *   The shared action object to be updated.
12221  * @param[in] action_conf
12222  *   Action specification used to modify *action*.
12223  *   *action_conf* should be of type correlating with type of the *action*,
12224  *   otherwise considered as invalid.
12225  * @param[out] error
12226  *   Perform verbose error reporting if not NULL. Initialized in case of
12227  *   error only.
12228  *
12229  * @return
12230  *   0 on success, otherwise negative errno value.
12231  */
12232 static int
12233 flow_dv_action_update(struct rte_eth_dev *dev,
12234                         struct rte_flow_shared_action *action,
12235                         const void *action_conf,
12236                         struct rte_flow_error *err)
12237 {
12238         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12239         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12240         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12241
12242         switch (type) {
12243         case MLX5_SHARED_ACTION_TYPE_RSS:
12244                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
12245         default:
12246                 return rte_flow_error_set(err, ENOTSUP,
12247                                           RTE_FLOW_ERROR_TYPE_ACTION,
12248                                           NULL,
12249                                           "action type update not supported");
12250         }
12251 }
12252
12253 static int
12254 flow_dv_action_query(struct rte_eth_dev *dev,
12255                      const struct rte_flow_shared_action *action, void *data,
12256                      struct rte_flow_error *error)
12257 {
12258         struct mlx5_age_param *age_param;
12259         struct rte_flow_query_age *resp;
12260         uint32_t act_idx = (uint32_t)(uintptr_t)action;
12261         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
12262         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
12263
12264         switch (type) {
12265         case MLX5_SHARED_ACTION_TYPE_AGE:
12266                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
12267                 resp = data;
12268                 resp->aged = __atomic_load_n(&age_param->state,
12269                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
12270                                                                           1 : 0;
12271                 resp->sec_since_last_hit_valid = !resp->aged;
12272                 if (resp->sec_since_last_hit_valid)
12273                         resp->sec_since_last_hit = __atomic_load_n
12274                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
12275                 return 0;
12276         default:
12277                 return rte_flow_error_set(error, ENOTSUP,
12278                                           RTE_FLOW_ERROR_TYPE_ACTION,
12279                                           NULL,
12280                                           "action type query not supported");
12281         }
12282 }
12283
12284 /**
12285  * Query a dv flow  rule for its statistics via devx.
12286  *
12287  * @param[in] dev
12288  *   Pointer to Ethernet device.
12289  * @param[in] flow
12290  *   Pointer to the sub flow.
12291  * @param[out] data
12292  *   data retrieved by the query.
12293  * @param[out] error
12294  *   Perform verbose error reporting if not NULL.
12295  *
12296  * @return
12297  *   0 on success, a negative errno value otherwise and rte_errno is set.
12298  */
12299 static int
12300 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
12301                     void *data, struct rte_flow_error *error)
12302 {
12303         struct mlx5_priv *priv = dev->data->dev_private;
12304         struct rte_flow_query_count *qc = data;
12305
12306         if (!priv->config.devx)
12307                 return rte_flow_error_set(error, ENOTSUP,
12308                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12309                                           NULL,
12310                                           "counters are not supported");
12311         if (flow->counter) {
12312                 uint64_t pkts, bytes;
12313                 struct mlx5_flow_counter *cnt;
12314
12315                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
12316                                                  NULL);
12317                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
12318                                                &bytes);
12319
12320                 if (err)
12321                         return rte_flow_error_set(error, -err,
12322                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12323                                         NULL, "cannot read counters");
12324                 qc->hits_set = 1;
12325                 qc->bytes_set = 1;
12326                 qc->hits = pkts - cnt->hits;
12327                 qc->bytes = bytes - cnt->bytes;
12328                 if (qc->reset) {
12329                         cnt->hits = pkts;
12330                         cnt->bytes = bytes;
12331                 }
12332                 return 0;
12333         }
12334         return rte_flow_error_set(error, EINVAL,
12335                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12336                                   NULL,
12337                                   "counters are not available");
12338 }
12339
12340 /**
12341  * Query a flow rule AGE action for aging information.
12342  *
12343  * @param[in] dev
12344  *   Pointer to Ethernet device.
12345  * @param[in] flow
12346  *   Pointer to the sub flow.
12347  * @param[out] data
12348  *   data retrieved by the query.
12349  * @param[out] error
12350  *   Perform verbose error reporting if not NULL.
12351  *
12352  * @return
12353  *   0 on success, a negative errno value otherwise and rte_errno is set.
12354  */
12355 static int
12356 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
12357                   void *data, struct rte_flow_error *error)
12358 {
12359         struct rte_flow_query_age *resp = data;
12360         struct mlx5_age_param *age_param;
12361
12362         if (flow->age) {
12363                 struct mlx5_aso_age_action *act =
12364                                      flow_aso_age_get_by_idx(dev, flow->age);
12365
12366                 age_param = &act->age_params;
12367         } else if (flow->counter) {
12368                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
12369
12370                 if (!age_param || !age_param->timeout)
12371                         return rte_flow_error_set
12372                                         (error, EINVAL,
12373                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12374                                          NULL, "cannot read age data");
12375         } else {
12376                 return rte_flow_error_set(error, EINVAL,
12377                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12378                                           NULL, "age data not available");
12379         }
12380         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
12381                                      AGE_TMOUT ? 1 : 0;
12382         resp->sec_since_last_hit_valid = !resp->aged;
12383         if (resp->sec_since_last_hit_valid)
12384                 resp->sec_since_last_hit = __atomic_load_n
12385                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
12386         return 0;
12387 }
12388
12389 /**
12390  * Query a flow.
12391  *
12392  * @see rte_flow_query()
12393  * @see rte_flow_ops
12394  */
12395 static int
12396 flow_dv_query(struct rte_eth_dev *dev,
12397               struct rte_flow *flow __rte_unused,
12398               const struct rte_flow_action *actions __rte_unused,
12399               void *data __rte_unused,
12400               struct rte_flow_error *error __rte_unused)
12401 {
12402         int ret = -EINVAL;
12403
12404         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
12405                 switch (actions->type) {
12406                 case RTE_FLOW_ACTION_TYPE_VOID:
12407                         break;
12408                 case RTE_FLOW_ACTION_TYPE_COUNT:
12409                         ret = flow_dv_query_count(dev, flow, data, error);
12410                         break;
12411                 case RTE_FLOW_ACTION_TYPE_AGE:
12412                         ret = flow_dv_query_age(dev, flow, data, error);
12413                         break;
12414                 default:
12415                         return rte_flow_error_set(error, ENOTSUP,
12416                                                   RTE_FLOW_ERROR_TYPE_ACTION,
12417                                                   actions,
12418                                                   "action not supported");
12419                 }
12420         }
12421         return ret;
12422 }
12423
12424 /**
12425  * Destroy the meter table set.
12426  * Lock free, (mutex should be acquired by caller).
12427  *
12428  * @param[in] dev
12429  *   Pointer to Ethernet device.
12430  * @param[in] tbl
12431  *   Pointer to the meter table set.
12432  *
12433  * @return
12434  *   Always 0.
12435  */
12436 static int
12437 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
12438                         struct mlx5_meter_domains_infos *tbl)
12439 {
12440         struct mlx5_priv *priv = dev->data->dev_private;
12441         struct mlx5_meter_domains_infos *mtd =
12442                                 (struct mlx5_meter_domains_infos *)tbl;
12443
12444         if (!mtd || !priv->config.dv_flow_en)
12445                 return 0;
12446         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
12447                 claim_zero(mlx5_flow_os_destroy_flow
12448                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
12449         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
12450                 claim_zero(mlx5_flow_os_destroy_flow
12451                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
12452         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
12453                 claim_zero(mlx5_flow_os_destroy_flow
12454                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
12455         if (mtd->egress.color_matcher)
12456                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12457                            (mtd->egress.color_matcher));
12458         if (mtd->egress.any_matcher)
12459                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12460                            (mtd->egress.any_matcher));
12461         if (mtd->egress.tbl)
12462                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
12463         if (mtd->egress.sfx_tbl)
12464                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
12465         if (mtd->ingress.color_matcher)
12466                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12467                            (mtd->ingress.color_matcher));
12468         if (mtd->ingress.any_matcher)
12469                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12470                            (mtd->ingress.any_matcher));
12471         if (mtd->ingress.tbl)
12472                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
12473         if (mtd->ingress.sfx_tbl)
12474                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12475                                              mtd->ingress.sfx_tbl);
12476         if (mtd->transfer.color_matcher)
12477                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12478                            (mtd->transfer.color_matcher));
12479         if (mtd->transfer.any_matcher)
12480                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12481                            (mtd->transfer.any_matcher));
12482         if (mtd->transfer.tbl)
12483                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
12484         if (mtd->transfer.sfx_tbl)
12485                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12486                                              mtd->transfer.sfx_tbl);
12487         if (mtd->drop_actn)
12488                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
12489         mlx5_free(mtd);
12490         return 0;
12491 }
12492
12493 /* Number of meter flow actions, count and jump or count and drop. */
12494 #define METER_ACTIONS 2
12495
12496 /**
12497  * Create specify domain meter table and suffix table.
12498  *
12499  * @param[in] dev
12500  *   Pointer to Ethernet device.
12501  * @param[in,out] mtb
12502  *   Pointer to DV meter table set.
12503  * @param[in] egress
12504  *   Table attribute.
12505  * @param[in] transfer
12506  *   Table attribute.
12507  * @param[in] color_reg_c_idx
12508  *   Reg C index for color match.
12509  *
12510  * @return
12511  *   0 on success, -1 otherwise and rte_errno is set.
12512  */
12513 static int
12514 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
12515                            struct mlx5_meter_domains_infos *mtb,
12516                            uint8_t egress, uint8_t transfer,
12517                            uint32_t color_reg_c_idx)
12518 {
12519         struct mlx5_priv *priv = dev->data->dev_private;
12520         struct mlx5_dev_ctx_shared *sh = priv->sh;
12521         struct mlx5_flow_dv_match_params mask = {
12522                 .size = sizeof(mask.buf),
12523         };
12524         struct mlx5_flow_dv_match_params value = {
12525                 .size = sizeof(value.buf),
12526         };
12527         struct mlx5dv_flow_matcher_attr dv_attr = {
12528                 .type = IBV_FLOW_ATTR_NORMAL,
12529                 .priority = 0,
12530                 .match_criteria_enable = 0,
12531                 .match_mask = (void *)&mask,
12532         };
12533         void *actions[METER_ACTIONS];
12534         struct mlx5_meter_domain_info *dtb;
12535         struct rte_flow_error error;
12536         int i = 0;
12537         int ret;
12538
12539         if (transfer)
12540                 dtb = &mtb->transfer;
12541         else if (egress)
12542                 dtb = &mtb->egress;
12543         else
12544                 dtb = &mtb->ingress;
12545         /* Create the meter table with METER level. */
12546         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
12547                                             egress, transfer, false, NULL, 0,
12548                                             0, &error);
12549         if (!dtb->tbl) {
12550                 DRV_LOG(ERR, "Failed to create meter policer table.");
12551                 return -1;
12552         }
12553         /* Create the meter suffix table with SUFFIX level. */
12554         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
12555                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
12556                                             egress, transfer, false, NULL, 0,
12557                                             0, &error);
12558         if (!dtb->sfx_tbl) {
12559                 DRV_LOG(ERR, "Failed to create meter suffix table.");
12560                 return -1;
12561         }
12562         /* Create matchers, Any and Color. */
12563         dv_attr.priority = 3;
12564         dv_attr.match_criteria_enable = 0;
12565         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12566                                                &dtb->any_matcher);
12567         if (ret) {
12568                 DRV_LOG(ERR, "Failed to create meter"
12569                              " policer default matcher.");
12570                 goto error_exit;
12571         }
12572         dv_attr.priority = 0;
12573         dv_attr.match_criteria_enable =
12574                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
12575         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
12576                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
12577         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12578                                                &dtb->color_matcher);
12579         if (ret) {
12580                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
12581                 goto error_exit;
12582         }
12583         if (mtb->count_actns[RTE_MTR_DROPPED])
12584                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
12585         actions[i++] = mtb->drop_actn;
12586         /* Default rule: lowest priority, match any, actions: drop. */
12587         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
12588                                        actions,
12589                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
12590         if (ret) {
12591                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
12592                 goto error_exit;
12593         }
12594         return 0;
12595 error_exit:
12596         return -1;
12597 }
12598
12599 /**
12600  * Create the needed meter and suffix tables.
12601  * Lock free, (mutex should be acquired by caller).
12602  *
12603  * @param[in] dev
12604  *   Pointer to Ethernet device.
12605  * @param[in] fm
12606  *   Pointer to the flow meter.
12607  *
12608  * @return
12609  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
12610  */
12611 static struct mlx5_meter_domains_infos *
12612 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
12613                        const struct mlx5_flow_meter *fm)
12614 {
12615         struct mlx5_priv *priv = dev->data->dev_private;
12616         struct mlx5_meter_domains_infos *mtb;
12617         int ret;
12618         int i;
12619
12620         if (!priv->mtr_en) {
12621                 rte_errno = ENOTSUP;
12622                 return NULL;
12623         }
12624         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
12625         if (!mtb) {
12626                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
12627                 return NULL;
12628         }
12629         /* Create meter count actions */
12630         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
12631                 struct mlx5_flow_counter *cnt;
12632                 if (!fm->policer_stats.cnt[i])
12633                         continue;
12634                 cnt = flow_dv_counter_get_by_idx(dev,
12635                       fm->policer_stats.cnt[i], NULL);
12636                 mtb->count_actns[i] = cnt->action;
12637         }
12638         /* Create drop action. */
12639         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
12640         if (ret) {
12641                 DRV_LOG(ERR, "Failed to create drop action.");
12642                 goto error_exit;
12643         }
12644         /* Egress meter table. */
12645         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
12646         if (ret) {
12647                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
12648                 goto error_exit;
12649         }
12650         /* Ingress meter table. */
12651         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
12652         if (ret) {
12653                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
12654                 goto error_exit;
12655         }
12656         /* FDB meter table. */
12657         if (priv->config.dv_esw_en) {
12658                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
12659                                                  priv->mtr_color_reg);
12660                 if (ret) {
12661                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
12662                         goto error_exit;
12663                 }
12664         }
12665         return mtb;
12666 error_exit:
12667         flow_dv_destroy_mtr_tbl(dev, mtb);
12668         return NULL;
12669 }
12670
12671 /**
12672  * Destroy domain policer rule.
12673  *
12674  * @param[in] dt
12675  *   Pointer to domain table.
12676  */
12677 static void
12678 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
12679 {
12680         int i;
12681
12682         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12683                 if (dt->policer_rules[i]) {
12684                         claim_zero(mlx5_flow_os_destroy_flow
12685                                    (dt->policer_rules[i]));
12686                         dt->policer_rules[i] = NULL;
12687                 }
12688         }
12689         if (dt->jump_actn) {
12690                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
12691                 dt->jump_actn = NULL;
12692         }
12693 }
12694
12695 /**
12696  * Destroy policer rules.
12697  *
12698  * @param[in] dev
12699  *   Pointer to Ethernet device.
12700  * @param[in] fm
12701  *   Pointer to flow meter structure.
12702  * @param[in] attr
12703  *   Pointer to flow attributes.
12704  *
12705  * @return
12706  *   Always 0.
12707  */
12708 static int
12709 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
12710                               const struct mlx5_flow_meter *fm,
12711                               const struct rte_flow_attr *attr)
12712 {
12713         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
12714
12715         if (!mtb)
12716                 return 0;
12717         if (attr->egress)
12718                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
12719         if (attr->ingress)
12720                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
12721         if (attr->transfer)
12722                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
12723         return 0;
12724 }
12725
12726 /**
12727  * Create specify domain meter policer rule.
12728  *
12729  * @param[in] fm
12730  *   Pointer to flow meter structure.
12731  * @param[in] mtb
12732  *   Pointer to DV meter table set.
12733  * @param[in] mtr_reg_c
12734  *   Color match REG_C.
12735  *
12736  * @return
12737  *   0 on success, -1 otherwise.
12738  */
12739 static int
12740 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
12741                                     struct mlx5_meter_domain_info *dtb,
12742                                     uint8_t mtr_reg_c)
12743 {
12744         struct mlx5_flow_dv_match_params matcher = {
12745                 .size = sizeof(matcher.buf),
12746         };
12747         struct mlx5_flow_dv_match_params value = {
12748                 .size = sizeof(value.buf),
12749         };
12750         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12751         void *actions[METER_ACTIONS];
12752         int i;
12753         int ret = 0;
12754
12755         /* Create jump action. */
12756         if (!dtb->jump_actn)
12757                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
12758                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
12759         if (ret) {
12760                 DRV_LOG(ERR, "Failed to create policer jump action.");
12761                 goto error;
12762         }
12763         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12764                 int j = 0;
12765
12766                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
12767                                        rte_col_2_mlx5_col(i), UINT8_MAX);
12768                 if (mtb->count_actns[i])
12769                         actions[j++] = mtb->count_actns[i];
12770                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
12771                         actions[j++] = mtb->drop_actn;
12772                 else
12773                         actions[j++] = dtb->jump_actn;
12774                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
12775                                                (void *)&value, j, actions,
12776                                                &dtb->policer_rules[i]);
12777                 if (ret) {
12778                         DRV_LOG(ERR, "Failed to create policer rule.");
12779                         goto error;
12780                 }
12781         }
12782         return 0;
12783 error:
12784         rte_errno = errno;
12785         return -1;
12786 }
12787
12788 /**
12789  * Create policer rules.
12790  *
12791  * @param[in] dev
12792  *   Pointer to Ethernet device.
12793  * @param[in] fm
12794  *   Pointer to flow meter structure.
12795  * @param[in] attr
12796  *   Pointer to flow attributes.
12797  *
12798  * @return
12799  *   0 on success, -1 otherwise.
12800  */
12801 static int
12802 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
12803                              struct mlx5_flow_meter *fm,
12804                              const struct rte_flow_attr *attr)
12805 {
12806         struct mlx5_priv *priv = dev->data->dev_private;
12807         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12808         int ret;
12809
12810         if (attr->egress) {
12811                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
12812                                                 priv->mtr_color_reg);
12813                 if (ret) {
12814                         DRV_LOG(ERR, "Failed to create egress policer.");
12815                         goto error;
12816                 }
12817         }
12818         if (attr->ingress) {
12819                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
12820                                                 priv->mtr_color_reg);
12821                 if (ret) {
12822                         DRV_LOG(ERR, "Failed to create ingress policer.");
12823                         goto error;
12824                 }
12825         }
12826         if (attr->transfer) {
12827                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
12828                                                 priv->mtr_color_reg);
12829                 if (ret) {
12830                         DRV_LOG(ERR, "Failed to create transfer policer.");
12831                         goto error;
12832                 }
12833         }
12834         return 0;
12835 error:
12836         flow_dv_destroy_policer_rules(dev, fm, attr);
12837         return -1;
12838 }
12839
12840 /**
12841  * Validate the batch counter support in root table.
12842  *
12843  * Create a simple flow with invalid counter and drop action on root table to
12844  * validate if batch counter with offset on root table is supported or not.
12845  *
12846  * @param[in] dev
12847  *   Pointer to rte_eth_dev structure.
12848  *
12849  * @return
12850  *   0 on success, a negative errno value otherwise and rte_errno is set.
12851  */
12852 int
12853 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
12854 {
12855         struct mlx5_priv *priv = dev->data->dev_private;
12856         struct mlx5_dev_ctx_shared *sh = priv->sh;
12857         struct mlx5_flow_dv_match_params mask = {
12858                 .size = sizeof(mask.buf),
12859         };
12860         struct mlx5_flow_dv_match_params value = {
12861                 .size = sizeof(value.buf),
12862         };
12863         struct mlx5dv_flow_matcher_attr dv_attr = {
12864                 .type = IBV_FLOW_ATTR_NORMAL,
12865                 .priority = 0,
12866                 .match_criteria_enable = 0,
12867                 .match_mask = (void *)&mask,
12868         };
12869         void *actions[2] = { 0 };
12870         struct mlx5_flow_tbl_resource *tbl = NULL;
12871         struct mlx5_devx_obj *dcs = NULL;
12872         void *matcher = NULL;
12873         void *flow = NULL;
12874         int ret = -1;
12875
12876         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
12877         if (!tbl)
12878                 goto err;
12879         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
12880         if (!dcs)
12881                 goto err;
12882         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
12883                                                     &actions[0]);
12884         if (ret)
12885                 goto err;
12886         actions[1] = priv->drop_queue.hrxq->action;
12887         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
12888         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
12889                                                &matcher);
12890         if (ret)
12891                 goto err;
12892         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
12893                                        actions, &flow);
12894 err:
12895         /*
12896          * If batch counter with offset is not supported, the driver will not
12897          * validate the invalid offset value, flow create should success.
12898          * In this case, it means batch counter is not supported in root table.
12899          *
12900          * Otherwise, if flow create is failed, counter offset is supported.
12901          */
12902         if (flow) {
12903                 DRV_LOG(INFO, "Batch counter is not supported in root "
12904                               "table. Switch to fallback mode.");
12905                 rte_errno = ENOTSUP;
12906                 ret = -rte_errno;
12907                 claim_zero(mlx5_flow_os_destroy_flow(flow));
12908         } else {
12909                 /* Check matcher to make sure validate fail at flow create. */
12910                 if (!matcher || (matcher && errno != EINVAL))
12911                         DRV_LOG(ERR, "Unexpected error in counter offset "
12912                                      "support detection");
12913                 ret = 0;
12914         }
12915         if (actions[0])
12916                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
12917         if (matcher)
12918                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
12919         if (tbl)
12920                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12921         if (dcs)
12922                 claim_zero(mlx5_devx_cmd_destroy(dcs));
12923         return ret;
12924 }
12925
12926 /**
12927  * Query a devx counter.
12928  *
12929  * @param[in] dev
12930  *   Pointer to the Ethernet device structure.
12931  * @param[in] cnt
12932  *   Index to the flow counter.
12933  * @param[in] clear
12934  *   Set to clear the counter statistics.
12935  * @param[out] pkts
12936  *   The statistics value of packets.
12937  * @param[out] bytes
12938  *   The statistics value of bytes.
12939  *
12940  * @return
12941  *   0 on success, otherwise return -1.
12942  */
12943 static int
12944 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12945                       uint64_t *pkts, uint64_t *bytes)
12946 {
12947         struct mlx5_priv *priv = dev->data->dev_private;
12948         struct mlx5_flow_counter *cnt;
12949         uint64_t inn_pkts, inn_bytes;
12950         int ret;
12951
12952         if (!priv->config.devx)
12953                 return -1;
12954
12955         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
12956         if (ret)
12957                 return -1;
12958         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
12959         *pkts = inn_pkts - cnt->hits;
12960         *bytes = inn_bytes - cnt->bytes;
12961         if (clear) {
12962                 cnt->hits = inn_pkts;
12963                 cnt->bytes = inn_bytes;
12964         }
12965         return 0;
12966 }
12967
12968 /**
12969  * Get aged-out flows.
12970  *
12971  * @param[in] dev
12972  *   Pointer to the Ethernet device structure.
12973  * @param[in] context
12974  *   The address of an array of pointers to the aged-out flows contexts.
12975  * @param[in] nb_contexts
12976  *   The length of context array pointers.
12977  * @param[out] error
12978  *   Perform verbose error reporting if not NULL. Initialized in case of
12979  *   error only.
12980  *
12981  * @return
12982  *   how many contexts get in success, otherwise negative errno value.
12983  *   if nb_contexts is 0, return the amount of all aged contexts.
12984  *   if nb_contexts is not 0 , return the amount of aged flows reported
12985  *   in the context array.
12986  * @note: only stub for now
12987  */
12988 static int
12989 flow_get_aged_flows(struct rte_eth_dev *dev,
12990                     void **context,
12991                     uint32_t nb_contexts,
12992                     struct rte_flow_error *error)
12993 {
12994         struct mlx5_priv *priv = dev->data->dev_private;
12995         struct mlx5_age_info *age_info;
12996         struct mlx5_age_param *age_param;
12997         struct mlx5_flow_counter *counter;
12998         struct mlx5_aso_age_action *act;
12999         int nb_flows = 0;
13000
13001         if (nb_contexts && !context)
13002                 return rte_flow_error_set(error, EINVAL,
13003                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13004                                           NULL, "empty context");
13005         age_info = GET_PORT_AGE_INFO(priv);
13006         rte_spinlock_lock(&age_info->aged_sl);
13007         LIST_FOREACH(act, &age_info->aged_aso, next) {
13008                 nb_flows++;
13009                 if (nb_contexts) {
13010                         context[nb_flows - 1] =
13011                                                 act->age_params.context;
13012                         if (!(--nb_contexts))
13013                                 break;
13014                 }
13015         }
13016         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
13017                 nb_flows++;
13018                 if (nb_contexts) {
13019                         age_param = MLX5_CNT_TO_AGE(counter);
13020                         context[nb_flows - 1] = age_param->context;
13021                         if (!(--nb_contexts))
13022                                 break;
13023                 }
13024         }
13025         rte_spinlock_unlock(&age_info->aged_sl);
13026         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
13027         return nb_flows;
13028 }
13029
13030 /*
13031  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
13032  */
13033 static uint32_t
13034 flow_dv_counter_allocate(struct rte_eth_dev *dev)
13035 {
13036         return flow_dv_counter_alloc(dev, 0);
13037 }
13038
13039 /**
13040  * Validate shared action.
13041  * Dispatcher for action type specific validation.
13042  *
13043  * @param[in] dev
13044  *   Pointer to the Ethernet device structure.
13045  * @param[in] conf
13046  *   Shared action configuration.
13047  * @param[in] action
13048  *   The shared action object to validate.
13049  * @param[out] error
13050  *   Perform verbose error reporting if not NULL. Initialized in case of
13051  *   error only.
13052  *
13053  * @return
13054  *   0 on success, otherwise negative errno value.
13055  */
13056 static int
13057 flow_dv_action_validate(struct rte_eth_dev *dev,
13058                         const struct rte_flow_shared_action_conf *conf,
13059                         const struct rte_flow_action *action,
13060                         struct rte_flow_error *err)
13061 {
13062         struct mlx5_priv *priv = dev->data->dev_private;
13063
13064         RTE_SET_USED(conf);
13065         switch (action->type) {
13066         case RTE_FLOW_ACTION_TYPE_RSS:
13067                 return mlx5_validate_action_rss(dev, action, err);
13068         case RTE_FLOW_ACTION_TYPE_AGE:
13069                 if (!priv->sh->aso_age_mng)
13070                         return rte_flow_error_set(err, ENOTSUP,
13071                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
13072                                                 NULL,
13073                                              "shared age action not supported");
13074                 return flow_dv_validate_action_age(0, action, dev, err);
13075         default:
13076                 return rte_flow_error_set(err, ENOTSUP,
13077                                           RTE_FLOW_ERROR_TYPE_ACTION,
13078                                           NULL,
13079                                           "action type not supported");
13080         }
13081 }
13082
13083 static int
13084 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
13085 {
13086         struct mlx5_priv *priv = dev->data->dev_private;
13087         int ret = 0;
13088
13089         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
13090                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->rx_domain,
13091                                                 flags);
13092                 if (ret != 0)
13093                         return ret;
13094         }
13095         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
13096                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->tx_domain, flags);
13097                 if (ret != 0)
13098                         return ret;
13099         }
13100         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
13101                 ret = mlx5_os_flow_dr_sync_domain(priv->sh->fdb_domain, flags);
13102                 if (ret != 0)
13103                         return ret;
13104         }
13105         return 0;
13106 }
13107
13108 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
13109         .validate = flow_dv_validate,
13110         .prepare = flow_dv_prepare,
13111         .translate = flow_dv_translate,
13112         .apply = flow_dv_apply,
13113         .remove = flow_dv_remove,
13114         .destroy = flow_dv_destroy,
13115         .query = flow_dv_query,
13116         .create_mtr_tbls = flow_dv_create_mtr_tbl,
13117         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
13118         .create_policer_rules = flow_dv_create_policer_rules,
13119         .destroy_policer_rules = flow_dv_destroy_policer_rules,
13120         .counter_alloc = flow_dv_counter_allocate,
13121         .counter_free = flow_dv_counter_free,
13122         .counter_query = flow_dv_counter_query,
13123         .get_aged_flows = flow_get_aged_flows,
13124         .action_validate = flow_dv_action_validate,
13125         .action_create = flow_dv_action_create,
13126         .action_destroy = flow_dv_action_destroy,
13127         .action_update = flow_dv_action_update,
13128         .action_query = flow_dv_action_query,
13129         .sync_domain = flow_dv_sync_domain,
13130 };
13131
13132 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
13133