net/mlx5: wrap adjust flow priority per OS
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 #include <rte_common.h>
12 #include <rte_ether.h>
13 #include <rte_ethdev_driver.h>
14 #include <rte_flow.h>
15 #include <rte_flow_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_cycles.h>
18 #include <rte_ip.h>
19 #include <rte_gre.h>
20 #include <rte_vxlan.h>
21 #include <rte_gtp.h>
22 #include <rte_eal_paging.h>
23 #include <rte_mpls.h>
24
25 #include <mlx5_glue.h>
26 #include <mlx5_devx_cmds.h>
27 #include <mlx5_prm.h>
28 #include <mlx5_malloc.h>
29
30 #include "mlx5_defs.h"
31 #include "mlx5.h"
32 #include "mlx5_common_os.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_flow_os.h"
35 #include "mlx5_rxtx.h"
36 #include "rte_pmd_mlx5.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 static int
74 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
75                              struct mlx5_flow_tbl_resource *tbl);
76
77 static int
78 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
79                                       uint32_t encap_decap_idx);
80
81 static int
82 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
83                                         uint32_t port_id);
84 static void
85 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss);
86
87 /**
88  * Initialize flow attributes structure according to flow items' types.
89  *
90  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
91  * mode. For tunnel mode, the items to be modified are the outermost ones.
92  *
93  * @param[in] item
94  *   Pointer to item specification.
95  * @param[out] attr
96  *   Pointer to flow attributes structure.
97  * @param[in] dev_flow
98  *   Pointer to the sub flow.
99  * @param[in] tunnel_decap
100  *   Whether action is after tunnel decapsulation.
101  */
102 static void
103 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
104                   struct mlx5_flow *dev_flow, bool tunnel_decap)
105 {
106         uint64_t layers = dev_flow->handle->layers;
107
108         /*
109          * If layers is already initialized, it means this dev_flow is the
110          * suffix flow, the layers flags is set by the prefix flow. Need to
111          * use the layer flags from prefix flow as the suffix flow may not
112          * have the user defined items as the flow is split.
113          */
114         if (layers) {
115                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
116                         attr->ipv4 = 1;
117                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
118                         attr->ipv6 = 1;
119                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
120                         attr->tcp = 1;
121                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
122                         attr->udp = 1;
123                 attr->valid = 1;
124                 return;
125         }
126         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
127                 uint8_t next_protocol = 0xff;
128                 switch (item->type) {
129                 case RTE_FLOW_ITEM_TYPE_GRE:
130                 case RTE_FLOW_ITEM_TYPE_NVGRE:
131                 case RTE_FLOW_ITEM_TYPE_VXLAN:
132                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
133                 case RTE_FLOW_ITEM_TYPE_GENEVE:
134                 case RTE_FLOW_ITEM_TYPE_MPLS:
135                         if (tunnel_decap)
136                                 attr->attr = 0;
137                         break;
138                 case RTE_FLOW_ITEM_TYPE_IPV4:
139                         if (!attr->ipv6)
140                                 attr->ipv4 = 1;
141                         if (item->mask != NULL &&
142                             ((const struct rte_flow_item_ipv4 *)
143                             item->mask)->hdr.next_proto_id)
144                                 next_protocol =
145                                     ((const struct rte_flow_item_ipv4 *)
146                                       (item->spec))->hdr.next_proto_id &
147                                     ((const struct rte_flow_item_ipv4 *)
148                                       (item->mask))->hdr.next_proto_id;
149                         if ((next_protocol == IPPROTO_IPIP ||
150                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
151                                 attr->attr = 0;
152                         break;
153                 case RTE_FLOW_ITEM_TYPE_IPV6:
154                         if (!attr->ipv4)
155                                 attr->ipv6 = 1;
156                         if (item->mask != NULL &&
157                             ((const struct rte_flow_item_ipv6 *)
158                             item->mask)->hdr.proto)
159                                 next_protocol =
160                                     ((const struct rte_flow_item_ipv6 *)
161                                       (item->spec))->hdr.proto &
162                                     ((const struct rte_flow_item_ipv6 *)
163                                       (item->mask))->hdr.proto;
164                         if ((next_protocol == IPPROTO_IPIP ||
165                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
166                                 attr->attr = 0;
167                         break;
168                 case RTE_FLOW_ITEM_TYPE_UDP:
169                         if (!attr->tcp)
170                                 attr->udp = 1;
171                         break;
172                 case RTE_FLOW_ITEM_TYPE_TCP:
173                         if (!attr->udp)
174                                 attr->tcp = 1;
175                         break;
176                 default:
177                         break;
178                 }
179         }
180         attr->valid = 1;
181 }
182
183 /**
184  * Convert rte_mtr_color to mlx5 color.
185  *
186  * @param[in] rcol
187  *   rte_mtr_color.
188  *
189  * @return
190  *   mlx5 color.
191  */
192 static int
193 rte_col_2_mlx5_col(enum rte_color rcol)
194 {
195         switch (rcol) {
196         case RTE_COLOR_GREEN:
197                 return MLX5_FLOW_COLOR_GREEN;
198         case RTE_COLOR_YELLOW:
199                 return MLX5_FLOW_COLOR_YELLOW;
200         case RTE_COLOR_RED:
201                 return MLX5_FLOW_COLOR_RED;
202         default:
203                 break;
204         }
205         return MLX5_FLOW_COLOR_UNDEFINED;
206 }
207
208 struct field_modify_info {
209         uint32_t size; /* Size of field in protocol header, in bytes. */
210         uint32_t offset; /* Offset of field in protocol header, in bytes. */
211         enum mlx5_modification_field id;
212 };
213
214 struct field_modify_info modify_eth[] = {
215         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
216         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
217         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
218         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
219         {0, 0, 0},
220 };
221
222 struct field_modify_info modify_vlan_out_first_vid[] = {
223         /* Size in bits !!! */
224         {12, 0, MLX5_MODI_OUT_FIRST_VID},
225         {0, 0, 0},
226 };
227
228 struct field_modify_info modify_ipv4[] = {
229         {1,  1, MLX5_MODI_OUT_IP_DSCP},
230         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
231         {4, 12, MLX5_MODI_OUT_SIPV4},
232         {4, 16, MLX5_MODI_OUT_DIPV4},
233         {0, 0, 0},
234 };
235
236 struct field_modify_info modify_ipv6[] = {
237         {1,  0, MLX5_MODI_OUT_IP_DSCP},
238         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
239         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
240         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
241         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
242         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
243         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
244         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
245         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
246         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
247         {0, 0, 0},
248 };
249
250 struct field_modify_info modify_udp[] = {
251         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
252         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
253         {0, 0, 0},
254 };
255
256 struct field_modify_info modify_tcp[] = {
257         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
258         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
259         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
260         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
261         {0, 0, 0},
262 };
263
264 static void
265 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
266                           uint8_t next_protocol, uint64_t *item_flags,
267                           int *tunnel)
268 {
269         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
270                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
271         if (next_protocol == IPPROTO_IPIP) {
272                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
273                 *tunnel = 1;
274         }
275         if (next_protocol == IPPROTO_IPV6) {
276                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
277                 *tunnel = 1;
278         }
279 }
280
281 /* Update VLAN's VID/PCP based on input rte_flow_action.
282  *
283  * @param[in] action
284  *   Pointer to struct rte_flow_action.
285  * @param[out] vlan
286  *   Pointer to struct rte_vlan_hdr.
287  */
288 static void
289 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
290                          struct rte_vlan_hdr *vlan)
291 {
292         uint16_t vlan_tci;
293         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
294                 vlan_tci =
295                     ((const struct rte_flow_action_of_set_vlan_pcp *)
296                                                action->conf)->vlan_pcp;
297                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
298                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
299                 vlan->vlan_tci |= vlan_tci;
300         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
301                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
302                 vlan->vlan_tci |= rte_be_to_cpu_16
303                     (((const struct rte_flow_action_of_set_vlan_vid *)
304                                              action->conf)->vlan_vid);
305         }
306 }
307
308 /**
309  * Fetch 1, 2, 3 or 4 byte field from the byte array
310  * and return as unsigned integer in host-endian format.
311  *
312  * @param[in] data
313  *   Pointer to data array.
314  * @param[in] size
315  *   Size of field to extract.
316  *
317  * @return
318  *   converted field in host endian format.
319  */
320 static inline uint32_t
321 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
322 {
323         uint32_t ret;
324
325         switch (size) {
326         case 1:
327                 ret = *data;
328                 break;
329         case 2:
330                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
331                 break;
332         case 3:
333                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
334                 ret = (ret << 8) | *(data + sizeof(uint16_t));
335                 break;
336         case 4:
337                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
338                 break;
339         default:
340                 MLX5_ASSERT(false);
341                 ret = 0;
342                 break;
343         }
344         return ret;
345 }
346
347 /**
348  * Convert modify-header action to DV specification.
349  *
350  * Data length of each action is determined by provided field description
351  * and the item mask. Data bit offset and width of each action is determined
352  * by provided item mask.
353  *
354  * @param[in] item
355  *   Pointer to item specification.
356  * @param[in] field
357  *   Pointer to field modification information.
358  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
359  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
360  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
361  * @param[in] dcopy
362  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
363  *   Negative offset value sets the same offset as source offset.
364  *   size field is ignored, value is taken from source field.
365  * @param[in,out] resource
366  *   Pointer to the modify-header resource.
367  * @param[in] type
368  *   Type of modification.
369  * @param[out] error
370  *   Pointer to the error structure.
371  *
372  * @return
373  *   0 on success, a negative errno value otherwise and rte_errno is set.
374  */
375 static int
376 flow_dv_convert_modify_action(struct rte_flow_item *item,
377                               struct field_modify_info *field,
378                               struct field_modify_info *dcopy,
379                               struct mlx5_flow_dv_modify_hdr_resource *resource,
380                               uint32_t type, struct rte_flow_error *error)
381 {
382         uint32_t i = resource->actions_num;
383         struct mlx5_modification_cmd *actions = resource->actions;
384
385         /*
386          * The item and mask are provided in big-endian format.
387          * The fields should be presented as in big-endian format either.
388          * Mask must be always present, it defines the actual field width.
389          */
390         MLX5_ASSERT(item->mask);
391         MLX5_ASSERT(field->size);
392         do {
393                 unsigned int size_b;
394                 unsigned int off_b;
395                 uint32_t mask;
396                 uint32_t data;
397
398                 if (i >= MLX5_MAX_MODIFY_NUM)
399                         return rte_flow_error_set(error, EINVAL,
400                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
401                                  "too many items to modify");
402                 /* Fetch variable byte size mask from the array. */
403                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
404                                            field->offset, field->size);
405                 if (!mask) {
406                         ++field;
407                         continue;
408                 }
409                 /* Deduce actual data width in bits from mask value. */
410                 off_b = rte_bsf32(mask);
411                 size_b = sizeof(uint32_t) * CHAR_BIT -
412                          off_b - __builtin_clz(mask);
413                 MLX5_ASSERT(size_b);
414                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
415                 actions[i] = (struct mlx5_modification_cmd) {
416                         .action_type = type,
417                         .field = field->id,
418                         .offset = off_b,
419                         .length = size_b,
420                 };
421                 /* Convert entire record to expected big-endian format. */
422                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
423                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
424                         MLX5_ASSERT(dcopy);
425                         actions[i].dst_field = dcopy->id;
426                         actions[i].dst_offset =
427                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
428                         /* Convert entire record to big-endian format. */
429                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
430                 } else {
431                         MLX5_ASSERT(item->spec);
432                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
433                                                    field->offset, field->size);
434                         /* Shift out the trailing masked bits from data. */
435                         data = (data & mask) >> off_b;
436                         actions[i].data1 = rte_cpu_to_be_32(data);
437                 }
438                 ++i;
439                 ++field;
440         } while (field->size);
441         if (resource->actions_num == i)
442                 return rte_flow_error_set(error, EINVAL,
443                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
444                                           "invalid modification flow item");
445         resource->actions_num = i;
446         return 0;
447 }
448
449 /**
450  * Convert modify-header set IPv4 address action to DV specification.
451  *
452  * @param[in,out] resource
453  *   Pointer to the modify-header resource.
454  * @param[in] action
455  *   Pointer to action specification.
456  * @param[out] error
457  *   Pointer to the error structure.
458  *
459  * @return
460  *   0 on success, a negative errno value otherwise and rte_errno is set.
461  */
462 static int
463 flow_dv_convert_action_modify_ipv4
464                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
465                          const struct rte_flow_action *action,
466                          struct rte_flow_error *error)
467 {
468         const struct rte_flow_action_set_ipv4 *conf =
469                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
470         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
471         struct rte_flow_item_ipv4 ipv4;
472         struct rte_flow_item_ipv4 ipv4_mask;
473
474         memset(&ipv4, 0, sizeof(ipv4));
475         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
476         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
477                 ipv4.hdr.src_addr = conf->ipv4_addr;
478                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
479         } else {
480                 ipv4.hdr.dst_addr = conf->ipv4_addr;
481                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
482         }
483         item.spec = &ipv4;
484         item.mask = &ipv4_mask;
485         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
486                                              MLX5_MODIFICATION_TYPE_SET, error);
487 }
488
489 /**
490  * Convert modify-header set IPv6 address action to DV specification.
491  *
492  * @param[in,out] resource
493  *   Pointer to the modify-header resource.
494  * @param[in] action
495  *   Pointer to action specification.
496  * @param[out] error
497  *   Pointer to the error structure.
498  *
499  * @return
500  *   0 on success, a negative errno value otherwise and rte_errno is set.
501  */
502 static int
503 flow_dv_convert_action_modify_ipv6
504                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
505                          const struct rte_flow_action *action,
506                          struct rte_flow_error *error)
507 {
508         const struct rte_flow_action_set_ipv6 *conf =
509                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
510         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
511         struct rte_flow_item_ipv6 ipv6;
512         struct rte_flow_item_ipv6 ipv6_mask;
513
514         memset(&ipv6, 0, sizeof(ipv6));
515         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
516         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
517                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
518                        sizeof(ipv6.hdr.src_addr));
519                 memcpy(&ipv6_mask.hdr.src_addr,
520                        &rte_flow_item_ipv6_mask.hdr.src_addr,
521                        sizeof(ipv6.hdr.src_addr));
522         } else {
523                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
524                        sizeof(ipv6.hdr.dst_addr));
525                 memcpy(&ipv6_mask.hdr.dst_addr,
526                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
527                        sizeof(ipv6.hdr.dst_addr));
528         }
529         item.spec = &ipv6;
530         item.mask = &ipv6_mask;
531         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
532                                              MLX5_MODIFICATION_TYPE_SET, error);
533 }
534
535 /**
536  * Convert modify-header set MAC address action to DV specification.
537  *
538  * @param[in,out] resource
539  *   Pointer to the modify-header resource.
540  * @param[in] action
541  *   Pointer to action specification.
542  * @param[out] error
543  *   Pointer to the error structure.
544  *
545  * @return
546  *   0 on success, a negative errno value otherwise and rte_errno is set.
547  */
548 static int
549 flow_dv_convert_action_modify_mac
550                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
551                          const struct rte_flow_action *action,
552                          struct rte_flow_error *error)
553 {
554         const struct rte_flow_action_set_mac *conf =
555                 (const struct rte_flow_action_set_mac *)(action->conf);
556         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
557         struct rte_flow_item_eth eth;
558         struct rte_flow_item_eth eth_mask;
559
560         memset(&eth, 0, sizeof(eth));
561         memset(&eth_mask, 0, sizeof(eth_mask));
562         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
563                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
564                        sizeof(eth.src.addr_bytes));
565                 memcpy(&eth_mask.src.addr_bytes,
566                        &rte_flow_item_eth_mask.src.addr_bytes,
567                        sizeof(eth_mask.src.addr_bytes));
568         } else {
569                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
570                        sizeof(eth.dst.addr_bytes));
571                 memcpy(&eth_mask.dst.addr_bytes,
572                        &rte_flow_item_eth_mask.dst.addr_bytes,
573                        sizeof(eth_mask.dst.addr_bytes));
574         }
575         item.spec = &eth;
576         item.mask = &eth_mask;
577         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
578                                              MLX5_MODIFICATION_TYPE_SET, error);
579 }
580
581 /**
582  * Convert modify-header set VLAN VID action to DV specification.
583  *
584  * @param[in,out] resource
585  *   Pointer to the modify-header resource.
586  * @param[in] action
587  *   Pointer to action specification.
588  * @param[out] error
589  *   Pointer to the error structure.
590  *
591  * @return
592  *   0 on success, a negative errno value otherwise and rte_errno is set.
593  */
594 static int
595 flow_dv_convert_action_modify_vlan_vid
596                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
597                          const struct rte_flow_action *action,
598                          struct rte_flow_error *error)
599 {
600         const struct rte_flow_action_of_set_vlan_vid *conf =
601                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
602         int i = resource->actions_num;
603         struct mlx5_modification_cmd *actions = resource->actions;
604         struct field_modify_info *field = modify_vlan_out_first_vid;
605
606         if (i >= MLX5_MAX_MODIFY_NUM)
607                 return rte_flow_error_set(error, EINVAL,
608                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
609                          "too many items to modify");
610         actions[i] = (struct mlx5_modification_cmd) {
611                 .action_type = MLX5_MODIFICATION_TYPE_SET,
612                 .field = field->id,
613                 .length = field->size,
614                 .offset = field->offset,
615         };
616         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
617         actions[i].data1 = conf->vlan_vid;
618         actions[i].data1 = actions[i].data1 << 16;
619         resource->actions_num = ++i;
620         return 0;
621 }
622
623 /**
624  * Convert modify-header set TP action to DV specification.
625  *
626  * @param[in,out] resource
627  *   Pointer to the modify-header resource.
628  * @param[in] action
629  *   Pointer to action specification.
630  * @param[in] items
631  *   Pointer to rte_flow_item objects list.
632  * @param[in] attr
633  *   Pointer to flow attributes structure.
634  * @param[in] dev_flow
635  *   Pointer to the sub flow.
636  * @param[in] tunnel_decap
637  *   Whether action is after tunnel decapsulation.
638  * @param[out] error
639  *   Pointer to the error structure.
640  *
641  * @return
642  *   0 on success, a negative errno value otherwise and rte_errno is set.
643  */
644 static int
645 flow_dv_convert_action_modify_tp
646                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
647                          const struct rte_flow_action *action,
648                          const struct rte_flow_item *items,
649                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
650                          bool tunnel_decap, struct rte_flow_error *error)
651 {
652         const struct rte_flow_action_set_tp *conf =
653                 (const struct rte_flow_action_set_tp *)(action->conf);
654         struct rte_flow_item item;
655         struct rte_flow_item_udp udp;
656         struct rte_flow_item_udp udp_mask;
657         struct rte_flow_item_tcp tcp;
658         struct rte_flow_item_tcp tcp_mask;
659         struct field_modify_info *field;
660
661         if (!attr->valid)
662                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
663         if (attr->udp) {
664                 memset(&udp, 0, sizeof(udp));
665                 memset(&udp_mask, 0, sizeof(udp_mask));
666                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
667                         udp.hdr.src_port = conf->port;
668                         udp_mask.hdr.src_port =
669                                         rte_flow_item_udp_mask.hdr.src_port;
670                 } else {
671                         udp.hdr.dst_port = conf->port;
672                         udp_mask.hdr.dst_port =
673                                         rte_flow_item_udp_mask.hdr.dst_port;
674                 }
675                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
676                 item.spec = &udp;
677                 item.mask = &udp_mask;
678                 field = modify_udp;
679         } else {
680                 MLX5_ASSERT(attr->tcp);
681                 memset(&tcp, 0, sizeof(tcp));
682                 memset(&tcp_mask, 0, sizeof(tcp_mask));
683                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
684                         tcp.hdr.src_port = conf->port;
685                         tcp_mask.hdr.src_port =
686                                         rte_flow_item_tcp_mask.hdr.src_port;
687                 } else {
688                         tcp.hdr.dst_port = conf->port;
689                         tcp_mask.hdr.dst_port =
690                                         rte_flow_item_tcp_mask.hdr.dst_port;
691                 }
692                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
693                 item.spec = &tcp;
694                 item.mask = &tcp_mask;
695                 field = modify_tcp;
696         }
697         return flow_dv_convert_modify_action(&item, field, NULL, resource,
698                                              MLX5_MODIFICATION_TYPE_SET, error);
699 }
700
701 /**
702  * Convert modify-header set TTL action to DV specification.
703  *
704  * @param[in,out] resource
705  *   Pointer to the modify-header resource.
706  * @param[in] action
707  *   Pointer to action specification.
708  * @param[in] items
709  *   Pointer to rte_flow_item objects list.
710  * @param[in] attr
711  *   Pointer to flow attributes structure.
712  * @param[in] dev_flow
713  *   Pointer to the sub flow.
714  * @param[in] tunnel_decap
715  *   Whether action is after tunnel decapsulation.
716  * @param[out] error
717  *   Pointer to the error structure.
718  *
719  * @return
720  *   0 on success, a negative errno value otherwise and rte_errno is set.
721  */
722 static int
723 flow_dv_convert_action_modify_ttl
724                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
725                          const struct rte_flow_action *action,
726                          const struct rte_flow_item *items,
727                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
728                          bool tunnel_decap, struct rte_flow_error *error)
729 {
730         const struct rte_flow_action_set_ttl *conf =
731                 (const struct rte_flow_action_set_ttl *)(action->conf);
732         struct rte_flow_item item;
733         struct rte_flow_item_ipv4 ipv4;
734         struct rte_flow_item_ipv4 ipv4_mask;
735         struct rte_flow_item_ipv6 ipv6;
736         struct rte_flow_item_ipv6 ipv6_mask;
737         struct field_modify_info *field;
738
739         if (!attr->valid)
740                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
741         if (attr->ipv4) {
742                 memset(&ipv4, 0, sizeof(ipv4));
743                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
744                 ipv4.hdr.time_to_live = conf->ttl_value;
745                 ipv4_mask.hdr.time_to_live = 0xFF;
746                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
747                 item.spec = &ipv4;
748                 item.mask = &ipv4_mask;
749                 field = modify_ipv4;
750         } else {
751                 MLX5_ASSERT(attr->ipv6);
752                 memset(&ipv6, 0, sizeof(ipv6));
753                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
754                 ipv6.hdr.hop_limits = conf->ttl_value;
755                 ipv6_mask.hdr.hop_limits = 0xFF;
756                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
757                 item.spec = &ipv6;
758                 item.mask = &ipv6_mask;
759                 field = modify_ipv6;
760         }
761         return flow_dv_convert_modify_action(&item, field, NULL, resource,
762                                              MLX5_MODIFICATION_TYPE_SET, error);
763 }
764
765 /**
766  * Convert modify-header decrement TTL action to DV specification.
767  *
768  * @param[in,out] resource
769  *   Pointer to the modify-header resource.
770  * @param[in] action
771  *   Pointer to action specification.
772  * @param[in] items
773  *   Pointer to rte_flow_item objects list.
774  * @param[in] attr
775  *   Pointer to flow attributes structure.
776  * @param[in] dev_flow
777  *   Pointer to the sub flow.
778  * @param[in] tunnel_decap
779  *   Whether action is after tunnel decapsulation.
780  * @param[out] error
781  *   Pointer to the error structure.
782  *
783  * @return
784  *   0 on success, a negative errno value otherwise and rte_errno is set.
785  */
786 static int
787 flow_dv_convert_action_modify_dec_ttl
788                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
789                          const struct rte_flow_item *items,
790                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
791                          bool tunnel_decap, struct rte_flow_error *error)
792 {
793         struct rte_flow_item item;
794         struct rte_flow_item_ipv4 ipv4;
795         struct rte_flow_item_ipv4 ipv4_mask;
796         struct rte_flow_item_ipv6 ipv6;
797         struct rte_flow_item_ipv6 ipv6_mask;
798         struct field_modify_info *field;
799
800         if (!attr->valid)
801                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
802         if (attr->ipv4) {
803                 memset(&ipv4, 0, sizeof(ipv4));
804                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
805                 ipv4.hdr.time_to_live = 0xFF;
806                 ipv4_mask.hdr.time_to_live = 0xFF;
807                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
808                 item.spec = &ipv4;
809                 item.mask = &ipv4_mask;
810                 field = modify_ipv4;
811         } else {
812                 MLX5_ASSERT(attr->ipv6);
813                 memset(&ipv6, 0, sizeof(ipv6));
814                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
815                 ipv6.hdr.hop_limits = 0xFF;
816                 ipv6_mask.hdr.hop_limits = 0xFF;
817                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
818                 item.spec = &ipv6;
819                 item.mask = &ipv6_mask;
820                 field = modify_ipv6;
821         }
822         return flow_dv_convert_modify_action(&item, field, NULL, resource,
823                                              MLX5_MODIFICATION_TYPE_ADD, error);
824 }
825
826 /**
827  * Convert modify-header increment/decrement TCP Sequence number
828  * to DV specification.
829  *
830  * @param[in,out] resource
831  *   Pointer to the modify-header resource.
832  * @param[in] action
833  *   Pointer to action specification.
834  * @param[out] error
835  *   Pointer to the error structure.
836  *
837  * @return
838  *   0 on success, a negative errno value otherwise and rte_errno is set.
839  */
840 static int
841 flow_dv_convert_action_modify_tcp_seq
842                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
843                          const struct rte_flow_action *action,
844                          struct rte_flow_error *error)
845 {
846         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
847         uint64_t value = rte_be_to_cpu_32(*conf);
848         struct rte_flow_item item;
849         struct rte_flow_item_tcp tcp;
850         struct rte_flow_item_tcp tcp_mask;
851
852         memset(&tcp, 0, sizeof(tcp));
853         memset(&tcp_mask, 0, sizeof(tcp_mask));
854         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
855                 /*
856                  * The HW has no decrement operation, only increment operation.
857                  * To simulate decrement X from Y using increment operation
858                  * we need to add UINT32_MAX X times to Y.
859                  * Each adding of UINT32_MAX decrements Y by 1.
860                  */
861                 value *= UINT32_MAX;
862         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
863         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
864         item.type = RTE_FLOW_ITEM_TYPE_TCP;
865         item.spec = &tcp;
866         item.mask = &tcp_mask;
867         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
868                                              MLX5_MODIFICATION_TYPE_ADD, error);
869 }
870
871 /**
872  * Convert modify-header increment/decrement TCP Acknowledgment number
873  * to DV specification.
874  *
875  * @param[in,out] resource
876  *   Pointer to the modify-header resource.
877  * @param[in] action
878  *   Pointer to action specification.
879  * @param[out] error
880  *   Pointer to the error structure.
881  *
882  * @return
883  *   0 on success, a negative errno value otherwise and rte_errno is set.
884  */
885 static int
886 flow_dv_convert_action_modify_tcp_ack
887                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
888                          const struct rte_flow_action *action,
889                          struct rte_flow_error *error)
890 {
891         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
892         uint64_t value = rte_be_to_cpu_32(*conf);
893         struct rte_flow_item item;
894         struct rte_flow_item_tcp tcp;
895         struct rte_flow_item_tcp tcp_mask;
896
897         memset(&tcp, 0, sizeof(tcp));
898         memset(&tcp_mask, 0, sizeof(tcp_mask));
899         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
900                 /*
901                  * The HW has no decrement operation, only increment operation.
902                  * To simulate decrement X from Y using increment operation
903                  * we need to add UINT32_MAX X times to Y.
904                  * Each adding of UINT32_MAX decrements Y by 1.
905                  */
906                 value *= UINT32_MAX;
907         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
908         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
909         item.type = RTE_FLOW_ITEM_TYPE_TCP;
910         item.spec = &tcp;
911         item.mask = &tcp_mask;
912         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
913                                              MLX5_MODIFICATION_TYPE_ADD, error);
914 }
915
916 static enum mlx5_modification_field reg_to_field[] = {
917         [REG_NON] = MLX5_MODI_OUT_NONE,
918         [REG_A] = MLX5_MODI_META_DATA_REG_A,
919         [REG_B] = MLX5_MODI_META_DATA_REG_B,
920         [REG_C_0] = MLX5_MODI_META_REG_C_0,
921         [REG_C_1] = MLX5_MODI_META_REG_C_1,
922         [REG_C_2] = MLX5_MODI_META_REG_C_2,
923         [REG_C_3] = MLX5_MODI_META_REG_C_3,
924         [REG_C_4] = MLX5_MODI_META_REG_C_4,
925         [REG_C_5] = MLX5_MODI_META_REG_C_5,
926         [REG_C_6] = MLX5_MODI_META_REG_C_6,
927         [REG_C_7] = MLX5_MODI_META_REG_C_7,
928 };
929
930 /**
931  * Convert register set to DV specification.
932  *
933  * @param[in,out] resource
934  *   Pointer to the modify-header resource.
935  * @param[in] action
936  *   Pointer to action specification.
937  * @param[out] error
938  *   Pointer to the error structure.
939  *
940  * @return
941  *   0 on success, a negative errno value otherwise and rte_errno is set.
942  */
943 static int
944 flow_dv_convert_action_set_reg
945                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
946                          const struct rte_flow_action *action,
947                          struct rte_flow_error *error)
948 {
949         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
950         struct mlx5_modification_cmd *actions = resource->actions;
951         uint32_t i = resource->actions_num;
952
953         if (i >= MLX5_MAX_MODIFY_NUM)
954                 return rte_flow_error_set(error, EINVAL,
955                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
956                                           "too many items to modify");
957         MLX5_ASSERT(conf->id != REG_NON);
958         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
959         actions[i] = (struct mlx5_modification_cmd) {
960                 .action_type = MLX5_MODIFICATION_TYPE_SET,
961                 .field = reg_to_field[conf->id],
962         };
963         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
964         actions[i].data1 = rte_cpu_to_be_32(conf->data);
965         ++i;
966         resource->actions_num = i;
967         return 0;
968 }
969
970 /**
971  * Convert SET_TAG action to DV specification.
972  *
973  * @param[in] dev
974  *   Pointer to the rte_eth_dev structure.
975  * @param[in,out] resource
976  *   Pointer to the modify-header resource.
977  * @param[in] conf
978  *   Pointer to action specification.
979  * @param[out] error
980  *   Pointer to the error structure.
981  *
982  * @return
983  *   0 on success, a negative errno value otherwise and rte_errno is set.
984  */
985 static int
986 flow_dv_convert_action_set_tag
987                         (struct rte_eth_dev *dev,
988                          struct mlx5_flow_dv_modify_hdr_resource *resource,
989                          const struct rte_flow_action_set_tag *conf,
990                          struct rte_flow_error *error)
991 {
992         rte_be32_t data = rte_cpu_to_be_32(conf->data);
993         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
994         struct rte_flow_item item = {
995                 .spec = &data,
996                 .mask = &mask,
997         };
998         struct field_modify_info reg_c_x[] = {
999                 [1] = {0, 0, 0},
1000         };
1001         enum mlx5_modification_field reg_type;
1002         int ret;
1003
1004         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1005         if (ret < 0)
1006                 return ret;
1007         MLX5_ASSERT(ret != REG_NON);
1008         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1009         reg_type = reg_to_field[ret];
1010         MLX5_ASSERT(reg_type > 0);
1011         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1012         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1013                                              MLX5_MODIFICATION_TYPE_SET, error);
1014 }
1015
1016 /**
1017  * Convert internal COPY_REG action to DV specification.
1018  *
1019  * @param[in] dev
1020  *   Pointer to the rte_eth_dev structure.
1021  * @param[in,out] res
1022  *   Pointer to the modify-header resource.
1023  * @param[in] action
1024  *   Pointer to action specification.
1025  * @param[out] error
1026  *   Pointer to the error structure.
1027  *
1028  * @return
1029  *   0 on success, a negative errno value otherwise and rte_errno is set.
1030  */
1031 static int
1032 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1033                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1034                                  const struct rte_flow_action *action,
1035                                  struct rte_flow_error *error)
1036 {
1037         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1038         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1039         struct rte_flow_item item = {
1040                 .spec = NULL,
1041                 .mask = &mask,
1042         };
1043         struct field_modify_info reg_src[] = {
1044                 {4, 0, reg_to_field[conf->src]},
1045                 {0, 0, 0},
1046         };
1047         struct field_modify_info reg_dst = {
1048                 .offset = 0,
1049                 .id = reg_to_field[conf->dst],
1050         };
1051         /* Adjust reg_c[0] usage according to reported mask. */
1052         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1053                 struct mlx5_priv *priv = dev->data->dev_private;
1054                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1055
1056                 MLX5_ASSERT(reg_c0);
1057                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1058                 if (conf->dst == REG_C_0) {
1059                         /* Copy to reg_c[0], within mask only. */
1060                         reg_dst.offset = rte_bsf32(reg_c0);
1061                         /*
1062                          * Mask is ignoring the enianness, because
1063                          * there is no conversion in datapath.
1064                          */
1065 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1066                         /* Copy from destination lower bits to reg_c[0]. */
1067                         mask = reg_c0 >> reg_dst.offset;
1068 #else
1069                         /* Copy from destination upper bits to reg_c[0]. */
1070                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1071                                           rte_fls_u32(reg_c0));
1072 #endif
1073                 } else {
1074                         mask = rte_cpu_to_be_32(reg_c0);
1075 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1076                         /* Copy from reg_c[0] to destination lower bits. */
1077                         reg_dst.offset = 0;
1078 #else
1079                         /* Copy from reg_c[0] to destination upper bits. */
1080                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1081                                          (rte_fls_u32(reg_c0) -
1082                                           rte_bsf32(reg_c0));
1083 #endif
1084                 }
1085         }
1086         return flow_dv_convert_modify_action(&item,
1087                                              reg_src, &reg_dst, res,
1088                                              MLX5_MODIFICATION_TYPE_COPY,
1089                                              error);
1090 }
1091
1092 /**
1093  * Convert MARK action to DV specification. This routine is used
1094  * in extensive metadata only and requires metadata register to be
1095  * handled. In legacy mode hardware tag resource is engaged.
1096  *
1097  * @param[in] dev
1098  *   Pointer to the rte_eth_dev structure.
1099  * @param[in] conf
1100  *   Pointer to MARK action specification.
1101  * @param[in,out] resource
1102  *   Pointer to the modify-header resource.
1103  * @param[out] error
1104  *   Pointer to the error structure.
1105  *
1106  * @return
1107  *   0 on success, a negative errno value otherwise and rte_errno is set.
1108  */
1109 static int
1110 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1111                             const struct rte_flow_action_mark *conf,
1112                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1113                             struct rte_flow_error *error)
1114 {
1115         struct mlx5_priv *priv = dev->data->dev_private;
1116         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1117                                            priv->sh->dv_mark_mask);
1118         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1119         struct rte_flow_item item = {
1120                 .spec = &data,
1121                 .mask = &mask,
1122         };
1123         struct field_modify_info reg_c_x[] = {
1124                 [1] = {0, 0, 0},
1125         };
1126         int reg;
1127
1128         if (!mask)
1129                 return rte_flow_error_set(error, EINVAL,
1130                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1131                                           NULL, "zero mark action mask");
1132         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1133         if (reg < 0)
1134                 return reg;
1135         MLX5_ASSERT(reg > 0);
1136         if (reg == REG_C_0) {
1137                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1138                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1139
1140                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1141                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1142                 mask = rte_cpu_to_be_32(mask << shl_c0);
1143         }
1144         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1145         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1146                                              MLX5_MODIFICATION_TYPE_SET, error);
1147 }
1148
1149 /**
1150  * Get metadata register index for specified steering domain.
1151  *
1152  * @param[in] dev
1153  *   Pointer to the rte_eth_dev structure.
1154  * @param[in] attr
1155  *   Attributes of flow to determine steering domain.
1156  * @param[out] error
1157  *   Pointer to the error structure.
1158  *
1159  * @return
1160  *   positive index on success, a negative errno value otherwise
1161  *   and rte_errno is set.
1162  */
1163 static enum modify_reg
1164 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1165                          const struct rte_flow_attr *attr,
1166                          struct rte_flow_error *error)
1167 {
1168         int reg =
1169                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1170                                           MLX5_METADATA_FDB :
1171                                             attr->egress ?
1172                                             MLX5_METADATA_TX :
1173                                             MLX5_METADATA_RX, 0, error);
1174         if (reg < 0)
1175                 return rte_flow_error_set(error,
1176                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1177                                           NULL, "unavailable "
1178                                           "metadata register");
1179         return reg;
1180 }
1181
1182 /**
1183  * Convert SET_META action to DV specification.
1184  *
1185  * @param[in] dev
1186  *   Pointer to the rte_eth_dev structure.
1187  * @param[in,out] resource
1188  *   Pointer to the modify-header resource.
1189  * @param[in] attr
1190  *   Attributes of flow that includes this item.
1191  * @param[in] conf
1192  *   Pointer to action specification.
1193  * @param[out] error
1194  *   Pointer to the error structure.
1195  *
1196  * @return
1197  *   0 on success, a negative errno value otherwise and rte_errno is set.
1198  */
1199 static int
1200 flow_dv_convert_action_set_meta
1201                         (struct rte_eth_dev *dev,
1202                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1203                          const struct rte_flow_attr *attr,
1204                          const struct rte_flow_action_set_meta *conf,
1205                          struct rte_flow_error *error)
1206 {
1207         uint32_t data = conf->data;
1208         uint32_t mask = conf->mask;
1209         struct rte_flow_item item = {
1210                 .spec = &data,
1211                 .mask = &mask,
1212         };
1213         struct field_modify_info reg_c_x[] = {
1214                 [1] = {0, 0, 0},
1215         };
1216         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1217
1218         if (reg < 0)
1219                 return reg;
1220         MLX5_ASSERT(reg != REG_NON);
1221         /*
1222          * In datapath code there is no endianness
1223          * coversions for perfromance reasons, all
1224          * pattern conversions are done in rte_flow.
1225          */
1226         if (reg == REG_C_0) {
1227                 struct mlx5_priv *priv = dev->data->dev_private;
1228                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1229                 uint32_t shl_c0;
1230
1231                 MLX5_ASSERT(msk_c0);
1232 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1233                 shl_c0 = rte_bsf32(msk_c0);
1234 #else
1235                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1236 #endif
1237                 mask <<= shl_c0;
1238                 data <<= shl_c0;
1239                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1240         }
1241         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1242         /* The routine expects parameters in memory as big-endian ones. */
1243         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1244                                              MLX5_MODIFICATION_TYPE_SET, error);
1245 }
1246
1247 /**
1248  * Convert modify-header set IPv4 DSCP action to DV specification.
1249  *
1250  * @param[in,out] resource
1251  *   Pointer to the modify-header resource.
1252  * @param[in] action
1253  *   Pointer to action specification.
1254  * @param[out] error
1255  *   Pointer to the error structure.
1256  *
1257  * @return
1258  *   0 on success, a negative errno value otherwise and rte_errno is set.
1259  */
1260 static int
1261 flow_dv_convert_action_modify_ipv4_dscp
1262                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1263                          const struct rte_flow_action *action,
1264                          struct rte_flow_error *error)
1265 {
1266         const struct rte_flow_action_set_dscp *conf =
1267                 (const struct rte_flow_action_set_dscp *)(action->conf);
1268         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1269         struct rte_flow_item_ipv4 ipv4;
1270         struct rte_flow_item_ipv4 ipv4_mask;
1271
1272         memset(&ipv4, 0, sizeof(ipv4));
1273         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1274         ipv4.hdr.type_of_service = conf->dscp;
1275         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1276         item.spec = &ipv4;
1277         item.mask = &ipv4_mask;
1278         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1279                                              MLX5_MODIFICATION_TYPE_SET, error);
1280 }
1281
1282 /**
1283  * Convert modify-header set IPv6 DSCP action to DV specification.
1284  *
1285  * @param[in,out] resource
1286  *   Pointer to the modify-header resource.
1287  * @param[in] action
1288  *   Pointer to action specification.
1289  * @param[out] error
1290  *   Pointer to the error structure.
1291  *
1292  * @return
1293  *   0 on success, a negative errno value otherwise and rte_errno is set.
1294  */
1295 static int
1296 flow_dv_convert_action_modify_ipv6_dscp
1297                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1298                          const struct rte_flow_action *action,
1299                          struct rte_flow_error *error)
1300 {
1301         const struct rte_flow_action_set_dscp *conf =
1302                 (const struct rte_flow_action_set_dscp *)(action->conf);
1303         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1304         struct rte_flow_item_ipv6 ipv6;
1305         struct rte_flow_item_ipv6 ipv6_mask;
1306
1307         memset(&ipv6, 0, sizeof(ipv6));
1308         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1309         /*
1310          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1311          * rdma-core only accept the DSCP bits byte aligned start from
1312          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1313          * bits in IPv6 case as rdma-core requires byte aligned value.
1314          */
1315         ipv6.hdr.vtc_flow = conf->dscp;
1316         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1317         item.spec = &ipv6;
1318         item.mask = &ipv6_mask;
1319         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1320                                              MLX5_MODIFICATION_TYPE_SET, error);
1321 }
1322
1323 /**
1324  * Validate MARK item.
1325  *
1326  * @param[in] dev
1327  *   Pointer to the rte_eth_dev structure.
1328  * @param[in] item
1329  *   Item specification.
1330  * @param[in] attr
1331  *   Attributes of flow that includes this item.
1332  * @param[out] error
1333  *   Pointer to error structure.
1334  *
1335  * @return
1336  *   0 on success, a negative errno value otherwise and rte_errno is set.
1337  */
1338 static int
1339 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1340                            const struct rte_flow_item *item,
1341                            const struct rte_flow_attr *attr __rte_unused,
1342                            struct rte_flow_error *error)
1343 {
1344         struct mlx5_priv *priv = dev->data->dev_private;
1345         struct mlx5_dev_config *config = &priv->config;
1346         const struct rte_flow_item_mark *spec = item->spec;
1347         const struct rte_flow_item_mark *mask = item->mask;
1348         const struct rte_flow_item_mark nic_mask = {
1349                 .id = priv->sh->dv_mark_mask,
1350         };
1351         int ret;
1352
1353         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1354                 return rte_flow_error_set(error, ENOTSUP,
1355                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1356                                           "extended metadata feature"
1357                                           " isn't enabled");
1358         if (!mlx5_flow_ext_mreg_supported(dev))
1359                 return rte_flow_error_set(error, ENOTSUP,
1360                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1361                                           "extended metadata register"
1362                                           " isn't supported");
1363         if (!nic_mask.id)
1364                 return rte_flow_error_set(error, ENOTSUP,
1365                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1366                                           "extended metadata register"
1367                                           " isn't available");
1368         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1369         if (ret < 0)
1370                 return ret;
1371         if (!spec)
1372                 return rte_flow_error_set(error, EINVAL,
1373                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1374                                           item->spec,
1375                                           "data cannot be empty");
1376         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1377                 return rte_flow_error_set(error, EINVAL,
1378                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1379                                           &spec->id,
1380                                           "mark id exceeds the limit");
1381         if (!mask)
1382                 mask = &nic_mask;
1383         if (!mask->id)
1384                 return rte_flow_error_set(error, EINVAL,
1385                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1386                                         "mask cannot be zero");
1387
1388         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1389                                         (const uint8_t *)&nic_mask,
1390                                         sizeof(struct rte_flow_item_mark),
1391                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1392         if (ret < 0)
1393                 return ret;
1394         return 0;
1395 }
1396
1397 /**
1398  * Validate META item.
1399  *
1400  * @param[in] dev
1401  *   Pointer to the rte_eth_dev structure.
1402  * @param[in] item
1403  *   Item specification.
1404  * @param[in] attr
1405  *   Attributes of flow that includes this item.
1406  * @param[out] error
1407  *   Pointer to error structure.
1408  *
1409  * @return
1410  *   0 on success, a negative errno value otherwise and rte_errno is set.
1411  */
1412 static int
1413 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1414                            const struct rte_flow_item *item,
1415                            const struct rte_flow_attr *attr,
1416                            struct rte_flow_error *error)
1417 {
1418         struct mlx5_priv *priv = dev->data->dev_private;
1419         struct mlx5_dev_config *config = &priv->config;
1420         const struct rte_flow_item_meta *spec = item->spec;
1421         const struct rte_flow_item_meta *mask = item->mask;
1422         struct rte_flow_item_meta nic_mask = {
1423                 .data = UINT32_MAX
1424         };
1425         int reg;
1426         int ret;
1427
1428         if (!spec)
1429                 return rte_flow_error_set(error, EINVAL,
1430                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1431                                           item->spec,
1432                                           "data cannot be empty");
1433         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1434                 if (!mlx5_flow_ext_mreg_supported(dev))
1435                         return rte_flow_error_set(error, ENOTSUP,
1436                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1437                                           "extended metadata register"
1438                                           " isn't supported");
1439                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1440                 if (reg < 0)
1441                         return reg;
1442                 if (reg == REG_NON)
1443                         return rte_flow_error_set(error, ENOTSUP,
1444                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1445                                         "unavalable extended metadata register");
1446                 if (reg == REG_B)
1447                         return rte_flow_error_set(error, ENOTSUP,
1448                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1449                                           "match on reg_b "
1450                                           "isn't supported");
1451                 if (reg != REG_A)
1452                         nic_mask.data = priv->sh->dv_meta_mask;
1453         } else if (attr->transfer) {
1454                 return rte_flow_error_set(error, ENOTSUP,
1455                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1456                                         "extended metadata feature "
1457                                         "should be enabled when "
1458                                         "meta item is requested "
1459                                         "with e-switch mode ");
1460         }
1461         if (!mask)
1462                 mask = &rte_flow_item_meta_mask;
1463         if (!mask->data)
1464                 return rte_flow_error_set(error, EINVAL,
1465                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1466                                         "mask cannot be zero");
1467
1468         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1469                                         (const uint8_t *)&nic_mask,
1470                                         sizeof(struct rte_flow_item_meta),
1471                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1472         return ret;
1473 }
1474
1475 /**
1476  * Validate TAG item.
1477  *
1478  * @param[in] dev
1479  *   Pointer to the rte_eth_dev structure.
1480  * @param[in] item
1481  *   Item specification.
1482  * @param[in] attr
1483  *   Attributes of flow that includes this item.
1484  * @param[out] error
1485  *   Pointer to error structure.
1486  *
1487  * @return
1488  *   0 on success, a negative errno value otherwise and rte_errno is set.
1489  */
1490 static int
1491 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1492                           const struct rte_flow_item *item,
1493                           const struct rte_flow_attr *attr __rte_unused,
1494                           struct rte_flow_error *error)
1495 {
1496         const struct rte_flow_item_tag *spec = item->spec;
1497         const struct rte_flow_item_tag *mask = item->mask;
1498         const struct rte_flow_item_tag nic_mask = {
1499                 .data = RTE_BE32(UINT32_MAX),
1500                 .index = 0xff,
1501         };
1502         int ret;
1503
1504         if (!mlx5_flow_ext_mreg_supported(dev))
1505                 return rte_flow_error_set(error, ENOTSUP,
1506                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1507                                           "extensive metadata register"
1508                                           " isn't supported");
1509         if (!spec)
1510                 return rte_flow_error_set(error, EINVAL,
1511                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1512                                           item->spec,
1513                                           "data cannot be empty");
1514         if (!mask)
1515                 mask = &rte_flow_item_tag_mask;
1516         if (!mask->data)
1517                 return rte_flow_error_set(error, EINVAL,
1518                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1519                                         "mask cannot be zero");
1520
1521         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1522                                         (const uint8_t *)&nic_mask,
1523                                         sizeof(struct rte_flow_item_tag),
1524                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1525         if (ret < 0)
1526                 return ret;
1527         if (mask->index != 0xff)
1528                 return rte_flow_error_set(error, EINVAL,
1529                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1530                                           "partial mask for tag index"
1531                                           " is not supported");
1532         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1533         if (ret < 0)
1534                 return ret;
1535         MLX5_ASSERT(ret != REG_NON);
1536         return 0;
1537 }
1538
1539 /**
1540  * Validate vport item.
1541  *
1542  * @param[in] dev
1543  *   Pointer to the rte_eth_dev structure.
1544  * @param[in] item
1545  *   Item specification.
1546  * @param[in] attr
1547  *   Attributes of flow that includes this item.
1548  * @param[in] item_flags
1549  *   Bit-fields that holds the items detected until now.
1550  * @param[out] error
1551  *   Pointer to error structure.
1552  *
1553  * @return
1554  *   0 on success, a negative errno value otherwise and rte_errno is set.
1555  */
1556 static int
1557 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1558                               const struct rte_flow_item *item,
1559                               const struct rte_flow_attr *attr,
1560                               uint64_t item_flags,
1561                               struct rte_flow_error *error)
1562 {
1563         const struct rte_flow_item_port_id *spec = item->spec;
1564         const struct rte_flow_item_port_id *mask = item->mask;
1565         const struct rte_flow_item_port_id switch_mask = {
1566                         .id = 0xffffffff,
1567         };
1568         struct mlx5_priv *esw_priv;
1569         struct mlx5_priv *dev_priv;
1570         int ret;
1571
1572         if (!attr->transfer)
1573                 return rte_flow_error_set(error, EINVAL,
1574                                           RTE_FLOW_ERROR_TYPE_ITEM,
1575                                           NULL,
1576                                           "match on port id is valid only"
1577                                           " when transfer flag is enabled");
1578         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1579                 return rte_flow_error_set(error, ENOTSUP,
1580                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1581                                           "multiple source ports are not"
1582                                           " supported");
1583         if (!mask)
1584                 mask = &switch_mask;
1585         if (mask->id != 0xffffffff)
1586                 return rte_flow_error_set(error, ENOTSUP,
1587                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1588                                            mask,
1589                                            "no support for partial mask on"
1590                                            " \"id\" field");
1591         ret = mlx5_flow_item_acceptable
1592                                 (item, (const uint8_t *)mask,
1593                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1594                                  sizeof(struct rte_flow_item_port_id),
1595                                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1596         if (ret)
1597                 return ret;
1598         if (!spec)
1599                 return 0;
1600         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1601         if (!esw_priv)
1602                 return rte_flow_error_set(error, rte_errno,
1603                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1604                                           "failed to obtain E-Switch info for"
1605                                           " port");
1606         dev_priv = mlx5_dev_to_eswitch_info(dev);
1607         if (!dev_priv)
1608                 return rte_flow_error_set(error, rte_errno,
1609                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1610                                           NULL,
1611                                           "failed to obtain E-Switch info");
1612         if (esw_priv->domain_id != dev_priv->domain_id)
1613                 return rte_flow_error_set(error, EINVAL,
1614                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1615                                           "cannot match on a port from a"
1616                                           " different E-Switch");
1617         return 0;
1618 }
1619
1620 /**
1621  * Validate VLAN item.
1622  *
1623  * @param[in] item
1624  *   Item specification.
1625  * @param[in] item_flags
1626  *   Bit-fields that holds the items detected until now.
1627  * @param[in] dev
1628  *   Ethernet device flow is being created on.
1629  * @param[out] error
1630  *   Pointer to error structure.
1631  *
1632  * @return
1633  *   0 on success, a negative errno value otherwise and rte_errno is set.
1634  */
1635 static int
1636 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1637                            uint64_t item_flags,
1638                            struct rte_eth_dev *dev,
1639                            struct rte_flow_error *error)
1640 {
1641         const struct rte_flow_item_vlan *mask = item->mask;
1642         const struct rte_flow_item_vlan nic_mask = {
1643                 .tci = RTE_BE16(UINT16_MAX),
1644                 .inner_type = RTE_BE16(UINT16_MAX),
1645                 .has_more_vlan = 1,
1646         };
1647         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1648         int ret;
1649         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1650                                         MLX5_FLOW_LAYER_INNER_L4) :
1651                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1652                                         MLX5_FLOW_LAYER_OUTER_L4);
1653         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1654                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1655
1656         if (item_flags & vlanm)
1657                 return rte_flow_error_set(error, EINVAL,
1658                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1659                                           "multiple VLAN layers not supported");
1660         else if ((item_flags & l34m) != 0)
1661                 return rte_flow_error_set(error, EINVAL,
1662                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1663                                           "VLAN cannot follow L3/L4 layer");
1664         if (!mask)
1665                 mask = &rte_flow_item_vlan_mask;
1666         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1667                                         (const uint8_t *)&nic_mask,
1668                                         sizeof(struct rte_flow_item_vlan),
1669                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1670         if (ret)
1671                 return ret;
1672         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1673                 struct mlx5_priv *priv = dev->data->dev_private;
1674
1675                 if (priv->vmwa_context) {
1676                         /*
1677                          * Non-NULL context means we have a virtual machine
1678                          * and SR-IOV enabled, we have to create VLAN interface
1679                          * to make hypervisor to setup E-Switch vport
1680                          * context correctly. We avoid creating the multiple
1681                          * VLAN interfaces, so we cannot support VLAN tag mask.
1682                          */
1683                         return rte_flow_error_set(error, EINVAL,
1684                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1685                                                   item,
1686                                                   "VLAN tag mask is not"
1687                                                   " supported in virtual"
1688                                                   " environment");
1689                 }
1690         }
1691         return 0;
1692 }
1693
1694 /*
1695  * GTP flags are contained in 1 byte of the format:
1696  * -------------------------------------------
1697  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1698  * |-----------------------------------------|
1699  * | value | Version | PT | Res | E | S | PN |
1700  * -------------------------------------------
1701  *
1702  * Matching is supported only for GTP flags E, S, PN.
1703  */
1704 #define MLX5_GTP_FLAGS_MASK     0x07
1705
1706 /**
1707  * Validate GTP item.
1708  *
1709  * @param[in] dev
1710  *   Pointer to the rte_eth_dev structure.
1711  * @param[in] item
1712  *   Item specification.
1713  * @param[in] item_flags
1714  *   Bit-fields that holds the items detected until now.
1715  * @param[out] error
1716  *   Pointer to error structure.
1717  *
1718  * @return
1719  *   0 on success, a negative errno value otherwise and rte_errno is set.
1720  */
1721 static int
1722 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1723                           const struct rte_flow_item *item,
1724                           uint64_t item_flags,
1725                           struct rte_flow_error *error)
1726 {
1727         struct mlx5_priv *priv = dev->data->dev_private;
1728         const struct rte_flow_item_gtp *spec = item->spec;
1729         const struct rte_flow_item_gtp *mask = item->mask;
1730         const struct rte_flow_item_gtp nic_mask = {
1731                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1732                 .msg_type = 0xff,
1733                 .teid = RTE_BE32(0xffffffff),
1734         };
1735
1736         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1737                 return rte_flow_error_set(error, ENOTSUP,
1738                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1739                                           "GTP support is not enabled");
1740         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1741                 return rte_flow_error_set(error, ENOTSUP,
1742                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1743                                           "multiple tunnel layers not"
1744                                           " supported");
1745         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1746                 return rte_flow_error_set(error, EINVAL,
1747                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1748                                           "no outer UDP layer found");
1749         if (!mask)
1750                 mask = &rte_flow_item_gtp_mask;
1751         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1752                 return rte_flow_error_set(error, ENOTSUP,
1753                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1754                                           "Match is supported for GTP"
1755                                           " flags only");
1756         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1757                                          (const uint8_t *)&nic_mask,
1758                                          sizeof(struct rte_flow_item_gtp),
1759                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1760 }
1761
1762 /**
1763  * Validate IPV4 item.
1764  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
1765  * add specific validation of fragment_offset field,
1766  *
1767  * @param[in] item
1768  *   Item specification.
1769  * @param[in] item_flags
1770  *   Bit-fields that holds the items detected until now.
1771  * @param[out] error
1772  *   Pointer to error structure.
1773  *
1774  * @return
1775  *   0 on success, a negative errno value otherwise and rte_errno is set.
1776  */
1777 static int
1778 flow_dv_validate_item_ipv4(const struct rte_flow_item *item,
1779                            uint64_t item_flags,
1780                            uint64_t last_item,
1781                            uint16_t ether_type,
1782                            struct rte_flow_error *error)
1783 {
1784         int ret;
1785         const struct rte_flow_item_ipv4 *spec = item->spec;
1786         const struct rte_flow_item_ipv4 *last = item->last;
1787         const struct rte_flow_item_ipv4 *mask = item->mask;
1788         rte_be16_t fragment_offset_spec = 0;
1789         rte_be16_t fragment_offset_last = 0;
1790         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
1791                 .hdr = {
1792                         .src_addr = RTE_BE32(0xffffffff),
1793                         .dst_addr = RTE_BE32(0xffffffff),
1794                         .type_of_service = 0xff,
1795                         .fragment_offset = RTE_BE16(0xffff),
1796                         .next_proto_id = 0xff,
1797                         .time_to_live = 0xff,
1798                 },
1799         };
1800
1801         ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
1802                                            ether_type, &nic_ipv4_mask,
1803                                            MLX5_ITEM_RANGE_ACCEPTED, error);
1804         if (ret < 0)
1805                 return ret;
1806         if (spec && mask)
1807                 fragment_offset_spec = spec->hdr.fragment_offset &
1808                                        mask->hdr.fragment_offset;
1809         if (!fragment_offset_spec)
1810                 return 0;
1811         /*
1812          * spec and mask are valid, enforce using full mask to make sure the
1813          * complete value is used correctly.
1814          */
1815         if ((mask->hdr.fragment_offset & RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1816                         != RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1817                 return rte_flow_error_set(error, EINVAL,
1818                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1819                                           item, "must use full mask for"
1820                                           " fragment_offset");
1821         /*
1822          * Match on fragment_offset 0x2000 means MF is 1 and frag-offset is 0,
1823          * indicating this is 1st fragment of fragmented packet.
1824          * This is not yet supported in MLX5, return appropriate error message.
1825          */
1826         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG))
1827                 return rte_flow_error_set(error, ENOTSUP,
1828                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1829                                           "match on first fragment not "
1830                                           "supported");
1831         if (fragment_offset_spec && !last)
1832                 return rte_flow_error_set(error, ENOTSUP,
1833                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1834                                           "specified value not supported");
1835         /* spec and last are valid, validate the specified range. */
1836         fragment_offset_last = last->hdr.fragment_offset &
1837                                mask->hdr.fragment_offset;
1838         /*
1839          * Match on fragment_offset spec 0x2001 and last 0x3fff
1840          * means MF is 1 and frag-offset is > 0.
1841          * This packet is fragment 2nd and onward, excluding last.
1842          * This is not yet supported in MLX5, return appropriate
1843          * error message.
1844          */
1845         if (fragment_offset_spec == RTE_BE16(RTE_IPV4_HDR_MF_FLAG + 1) &&
1846             fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK))
1847                 return rte_flow_error_set(error, ENOTSUP,
1848                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1849                                           last, "match on following "
1850                                           "fragments not supported");
1851         /*
1852          * Match on fragment_offset spec 0x0001 and last 0x1fff
1853          * means MF is 0 and frag-offset is > 0.
1854          * This packet is last fragment of fragmented packet.
1855          * This is not yet supported in MLX5, return appropriate
1856          * error message.
1857          */
1858         if (fragment_offset_spec == RTE_BE16(1) &&
1859             fragment_offset_last == RTE_BE16(RTE_IPV4_HDR_OFFSET_MASK))
1860                 return rte_flow_error_set(error, ENOTSUP,
1861                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1862                                           last, "match on last "
1863                                           "fragment not supported");
1864         /*
1865          * Match on fragment_offset spec 0x0001 and last 0x3fff
1866          * means MF and/or frag-offset is not 0.
1867          * This is a fragmented packet.
1868          * Other range values are invalid and rejected.
1869          */
1870         if (!(fragment_offset_spec == RTE_BE16(1) &&
1871               fragment_offset_last == RTE_BE16(MLX5_IPV4_FRAG_OFFSET_MASK)))
1872                 return rte_flow_error_set(error, ENOTSUP,
1873                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1874                                           "specified range not supported");
1875         return 0;
1876 }
1877
1878 /**
1879  * Validate IPV6 fragment extension item.
1880  *
1881  * @param[in] item
1882  *   Item specification.
1883  * @param[in] item_flags
1884  *   Bit-fields that holds the items detected until now.
1885  * @param[out] error
1886  *   Pointer to error structure.
1887  *
1888  * @return
1889  *   0 on success, a negative errno value otherwise and rte_errno is set.
1890  */
1891 static int
1892 flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
1893                                     uint64_t item_flags,
1894                                     struct rte_flow_error *error)
1895 {
1896         const struct rte_flow_item_ipv6_frag_ext *spec = item->spec;
1897         const struct rte_flow_item_ipv6_frag_ext *last = item->last;
1898         const struct rte_flow_item_ipv6_frag_ext *mask = item->mask;
1899         rte_be16_t frag_data_spec = 0;
1900         rte_be16_t frag_data_last = 0;
1901         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1902         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1903                                       MLX5_FLOW_LAYER_OUTER_L4;
1904         int ret = 0;
1905         struct rte_flow_item_ipv6_frag_ext nic_mask = {
1906                 .hdr = {
1907                         .next_header = 0xff,
1908                         .frag_data = RTE_BE16(0xffff),
1909                 },
1910         };
1911
1912         if (item_flags & l4m)
1913                 return rte_flow_error_set(error, EINVAL,
1914                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1915                                           "ipv6 fragment extension item cannot "
1916                                           "follow L4 item.");
1917         if ((tunnel && !(item_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
1918             (!tunnel && !(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
1919                 return rte_flow_error_set(error, EINVAL,
1920                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1921                                           "ipv6 fragment extension item must "
1922                                           "follow ipv6 item");
1923         if (spec && mask)
1924                 frag_data_spec = spec->hdr.frag_data & mask->hdr.frag_data;
1925         if (!frag_data_spec)
1926                 return 0;
1927         /*
1928          * spec and mask are valid, enforce using full mask to make sure the
1929          * complete value is used correctly.
1930          */
1931         if ((mask->hdr.frag_data & RTE_BE16(RTE_IPV6_FRAG_USED_MASK)) !=
1932                                 RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1933                 return rte_flow_error_set(error, EINVAL,
1934                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1935                                           item, "must use full mask for"
1936                                           " frag_data");
1937         /*
1938          * Match on frag_data 0x00001 means M is 1 and frag-offset is 0.
1939          * This is 1st fragment of fragmented packet.
1940          */
1941         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_MF_MASK))
1942                 return rte_flow_error_set(error, ENOTSUP,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1944                                           "match on first fragment not "
1945                                           "supported");
1946         if (frag_data_spec && !last)
1947                 return rte_flow_error_set(error, EINVAL,
1948                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1949                                           "specified value not supported");
1950         ret = mlx5_flow_item_acceptable
1951                                 (item, (const uint8_t *)mask,
1952                                  (const uint8_t *)&nic_mask,
1953                                  sizeof(struct rte_flow_item_ipv6_frag_ext),
1954                                  MLX5_ITEM_RANGE_ACCEPTED, error);
1955         if (ret)
1956                 return ret;
1957         /* spec and last are valid, validate the specified range. */
1958         frag_data_last = last->hdr.frag_data & mask->hdr.frag_data;
1959         /*
1960          * Match on frag_data spec 0x0009 and last 0xfff9
1961          * means M is 1 and frag-offset is > 0.
1962          * This packet is fragment 2nd and onward, excluding last.
1963          * This is not yet supported in MLX5, return appropriate
1964          * error message.
1965          */
1966         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN |
1967                                        RTE_IPV6_EHDR_MF_MASK) &&
1968             frag_data_last == RTE_BE16(RTE_IPV6_FRAG_USED_MASK))
1969                 return rte_flow_error_set(error, ENOTSUP,
1970                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1971                                           last, "match on following "
1972                                           "fragments not supported");
1973         /*
1974          * Match on frag_data spec 0x0008 and last 0xfff8
1975          * means M is 0 and frag-offset is > 0.
1976          * This packet is last fragment of fragmented packet.
1977          * This is not yet supported in MLX5, return appropriate
1978          * error message.
1979          */
1980         if (frag_data_spec == RTE_BE16(RTE_IPV6_EHDR_FO_ALIGN) &&
1981             frag_data_last == RTE_BE16(RTE_IPV6_EHDR_FO_MASK))
1982                 return rte_flow_error_set(error, ENOTSUP,
1983                                           RTE_FLOW_ERROR_TYPE_ITEM_LAST,
1984                                           last, "match on last "
1985                                           "fragment not supported");
1986         /* Other range values are invalid and rejected. */
1987         return rte_flow_error_set(error, EINVAL,
1988                                   RTE_FLOW_ERROR_TYPE_ITEM_LAST, last,
1989                                   "specified range not supported");
1990 }
1991
1992 /**
1993  * Validate the pop VLAN action.
1994  *
1995  * @param[in] dev
1996  *   Pointer to the rte_eth_dev structure.
1997  * @param[in] action_flags
1998  *   Holds the actions detected until now.
1999  * @param[in] action
2000  *   Pointer to the pop vlan action.
2001  * @param[in] item_flags
2002  *   The items found in this flow rule.
2003  * @param[in] attr
2004  *   Pointer to flow attributes.
2005  * @param[out] error
2006  *   Pointer to error structure.
2007  *
2008  * @return
2009  *   0 on success, a negative errno value otherwise and rte_errno is set.
2010  */
2011 static int
2012 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
2013                                  uint64_t action_flags,
2014                                  const struct rte_flow_action *action,
2015                                  uint64_t item_flags,
2016                                  const struct rte_flow_attr *attr,
2017                                  struct rte_flow_error *error)
2018 {
2019         const struct mlx5_priv *priv = dev->data->dev_private;
2020
2021         (void)action;
2022         (void)attr;
2023         if (!priv->sh->pop_vlan_action)
2024                 return rte_flow_error_set(error, ENOTSUP,
2025                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2026                                           NULL,
2027                                           "pop vlan action is not supported");
2028         if (attr->egress)
2029                 return rte_flow_error_set(error, ENOTSUP,
2030                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2031                                           NULL,
2032                                           "pop vlan action not supported for "
2033                                           "egress");
2034         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
2035                 return rte_flow_error_set(error, ENOTSUP,
2036                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2037                                           "no support for multiple VLAN "
2038                                           "actions");
2039         /* Pop VLAN with preceding Decap requires inner header with VLAN. */
2040         if ((action_flags & MLX5_FLOW_ACTION_DECAP) &&
2041             !(item_flags & MLX5_FLOW_LAYER_INNER_VLAN))
2042                 return rte_flow_error_set(error, ENOTSUP,
2043                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2044                                           NULL,
2045                                           "cannot pop vlan after decap without "
2046                                           "match on inner vlan in the flow");
2047         /* Pop VLAN without preceding Decap requires outer header with VLAN. */
2048         if (!(action_flags & MLX5_FLOW_ACTION_DECAP) &&
2049             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2050                 return rte_flow_error_set(error, ENOTSUP,
2051                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2052                                           NULL,
2053                                           "cannot pop vlan without a "
2054                                           "match on (outer) vlan in the flow");
2055         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2056                 return rte_flow_error_set(error, EINVAL,
2057                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2058                                           "wrong action order, port_id should "
2059                                           "be after pop VLAN action");
2060         if (!attr->transfer && priv->representor)
2061                 return rte_flow_error_set(error, ENOTSUP,
2062                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2063                                           "pop vlan action for VF representor "
2064                                           "not supported on NIC table");
2065         return 0;
2066 }
2067
2068 /**
2069  * Get VLAN default info from vlan match info.
2070  *
2071  * @param[in] items
2072  *   the list of item specifications.
2073  * @param[out] vlan
2074  *   pointer VLAN info to fill to.
2075  *
2076  * @return
2077  *   0 on success, a negative errno value otherwise and rte_errno is set.
2078  */
2079 static void
2080 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
2081                                   struct rte_vlan_hdr *vlan)
2082 {
2083         const struct rte_flow_item_vlan nic_mask = {
2084                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
2085                                 MLX5DV_FLOW_VLAN_VID_MASK),
2086                 .inner_type = RTE_BE16(0xffff),
2087         };
2088
2089         if (items == NULL)
2090                 return;
2091         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2092                 int type = items->type;
2093
2094                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
2095                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
2096                         break;
2097         }
2098         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
2099                 const struct rte_flow_item_vlan *vlan_m = items->mask;
2100                 const struct rte_flow_item_vlan *vlan_v = items->spec;
2101
2102                 /* If VLAN item in pattern doesn't contain data, return here. */
2103                 if (!vlan_v)
2104                         return;
2105                 if (!vlan_m)
2106                         vlan_m = &nic_mask;
2107                 /* Only full match values are accepted */
2108                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
2109                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
2110                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
2111                         vlan->vlan_tci |=
2112                                 rte_be_to_cpu_16(vlan_v->tci &
2113                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
2114                 }
2115                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
2116                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
2117                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
2118                         vlan->vlan_tci |=
2119                                 rte_be_to_cpu_16(vlan_v->tci &
2120                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
2121                 }
2122                 if (vlan_m->inner_type == nic_mask.inner_type)
2123                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
2124                                                            vlan_m->inner_type);
2125         }
2126 }
2127
2128 /**
2129  * Validate the push VLAN action.
2130  *
2131  * @param[in] dev
2132  *   Pointer to the rte_eth_dev structure.
2133  * @param[in] action_flags
2134  *   Holds the actions detected until now.
2135  * @param[in] item_flags
2136  *   The items found in this flow rule.
2137  * @param[in] action
2138  *   Pointer to the action structure.
2139  * @param[in] attr
2140  *   Pointer to flow attributes
2141  * @param[out] error
2142  *   Pointer to error structure.
2143  *
2144  * @return
2145  *   0 on success, a negative errno value otherwise and rte_errno is set.
2146  */
2147 static int
2148 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
2149                                   uint64_t action_flags,
2150                                   const struct rte_flow_item_vlan *vlan_m,
2151                                   const struct rte_flow_action *action,
2152                                   const struct rte_flow_attr *attr,
2153                                   struct rte_flow_error *error)
2154 {
2155         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
2156         const struct mlx5_priv *priv = dev->data->dev_private;
2157
2158         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
2159             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
2160                 return rte_flow_error_set(error, EINVAL,
2161                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2162                                           "invalid vlan ethertype");
2163         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2164                 return rte_flow_error_set(error, EINVAL,
2165                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2166                                           "wrong action order, port_id should "
2167                                           "be after push VLAN");
2168         if (!attr->transfer && priv->representor)
2169                 return rte_flow_error_set(error, ENOTSUP,
2170                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2171                                           "push vlan action for VF representor "
2172                                           "not supported on NIC table");
2173         if (vlan_m &&
2174             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
2175             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
2176                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
2177             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
2178             !(mlx5_flow_find_action
2179                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
2180                 return rte_flow_error_set(error, EINVAL,
2181                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2182                                           "not full match mask on VLAN PCP and "
2183                                           "there is no of_set_vlan_pcp action, "
2184                                           "push VLAN action cannot figure out "
2185                                           "PCP value");
2186         if (vlan_m &&
2187             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
2188             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
2189                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
2190             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
2191             !(mlx5_flow_find_action
2192                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
2193                 return rte_flow_error_set(error, EINVAL,
2194                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2195                                           "not full match mask on VLAN VID and "
2196                                           "there is no of_set_vlan_vid action, "
2197                                           "push VLAN action cannot figure out "
2198                                           "VID value");
2199         (void)attr;
2200         return 0;
2201 }
2202
2203 /**
2204  * Validate the set VLAN PCP.
2205  *
2206  * @param[in] action_flags
2207  *   Holds the actions detected until now.
2208  * @param[in] actions
2209  *   Pointer to the list of actions remaining in the flow rule.
2210  * @param[out] error
2211  *   Pointer to error structure.
2212  *
2213  * @return
2214  *   0 on success, a negative errno value otherwise and rte_errno is set.
2215  */
2216 static int
2217 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2218                                      const struct rte_flow_action actions[],
2219                                      struct rte_flow_error *error)
2220 {
2221         const struct rte_flow_action *action = actions;
2222         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2223
2224         if (conf->vlan_pcp > 7)
2225                 return rte_flow_error_set(error, EINVAL,
2226                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2227                                           "VLAN PCP value is too big");
2228         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2229                 return rte_flow_error_set(error, ENOTSUP,
2230                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2231                                           "set VLAN PCP action must follow "
2232                                           "the push VLAN action");
2233         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2234                 return rte_flow_error_set(error, ENOTSUP,
2235                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2236                                           "Multiple VLAN PCP modification are "
2237                                           "not supported");
2238         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2239                 return rte_flow_error_set(error, EINVAL,
2240                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2241                                           "wrong action order, port_id should "
2242                                           "be after set VLAN PCP");
2243         return 0;
2244 }
2245
2246 /**
2247  * Validate the set VLAN VID.
2248  *
2249  * @param[in] item_flags
2250  *   Holds the items detected in this rule.
2251  * @param[in] action_flags
2252  *   Holds the actions detected until now.
2253  * @param[in] actions
2254  *   Pointer to the list of actions remaining in the flow rule.
2255  * @param[out] error
2256  *   Pointer to error structure.
2257  *
2258  * @return
2259  *   0 on success, a negative errno value otherwise and rte_errno is set.
2260  */
2261 static int
2262 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2263                                      uint64_t action_flags,
2264                                      const struct rte_flow_action actions[],
2265                                      struct rte_flow_error *error)
2266 {
2267         const struct rte_flow_action *action = actions;
2268         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2269
2270         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2271                 return rte_flow_error_set(error, EINVAL,
2272                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2273                                           "VLAN VID value is too big");
2274         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2275             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2276                 return rte_flow_error_set(error, ENOTSUP,
2277                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2278                                           "set VLAN VID action must follow push"
2279                                           " VLAN action or match on VLAN item");
2280         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2281                 return rte_flow_error_set(error, ENOTSUP,
2282                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2283                                           "Multiple VLAN VID modifications are "
2284                                           "not supported");
2285         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2286                 return rte_flow_error_set(error, EINVAL,
2287                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2288                                           "wrong action order, port_id should "
2289                                           "be after set VLAN VID");
2290         return 0;
2291 }
2292
2293 /*
2294  * Validate the FLAG action.
2295  *
2296  * @param[in] dev
2297  *   Pointer to the rte_eth_dev structure.
2298  * @param[in] action_flags
2299  *   Holds the actions detected until now.
2300  * @param[in] attr
2301  *   Pointer to flow attributes
2302  * @param[out] error
2303  *   Pointer to error structure.
2304  *
2305  * @return
2306  *   0 on success, a negative errno value otherwise and rte_errno is set.
2307  */
2308 static int
2309 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2310                              uint64_t action_flags,
2311                              const struct rte_flow_attr *attr,
2312                              struct rte_flow_error *error)
2313 {
2314         struct mlx5_priv *priv = dev->data->dev_private;
2315         struct mlx5_dev_config *config = &priv->config;
2316         int ret;
2317
2318         /* Fall back if no extended metadata register support. */
2319         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2320                 return mlx5_flow_validate_action_flag(action_flags, attr,
2321                                                       error);
2322         /* Extensive metadata mode requires registers. */
2323         if (!mlx5_flow_ext_mreg_supported(dev))
2324                 return rte_flow_error_set(error, ENOTSUP,
2325                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2326                                           "no metadata registers "
2327                                           "to support flag action");
2328         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2329                 return rte_flow_error_set(error, ENOTSUP,
2330                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2331                                           "extended metadata register"
2332                                           " isn't available");
2333         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2334         if (ret < 0)
2335                 return ret;
2336         MLX5_ASSERT(ret > 0);
2337         if (action_flags & MLX5_FLOW_ACTION_MARK)
2338                 return rte_flow_error_set(error, EINVAL,
2339                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2340                                           "can't mark and flag in same flow");
2341         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2342                 return rte_flow_error_set(error, EINVAL,
2343                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2344                                           "can't have 2 flag"
2345                                           " actions in same flow");
2346         return 0;
2347 }
2348
2349 /**
2350  * Validate MARK action.
2351  *
2352  * @param[in] dev
2353  *   Pointer to the rte_eth_dev structure.
2354  * @param[in] action
2355  *   Pointer to action.
2356  * @param[in] action_flags
2357  *   Holds the actions detected until now.
2358  * @param[in] attr
2359  *   Pointer to flow attributes
2360  * @param[out] error
2361  *   Pointer to error structure.
2362  *
2363  * @return
2364  *   0 on success, a negative errno value otherwise and rte_errno is set.
2365  */
2366 static int
2367 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2368                              const struct rte_flow_action *action,
2369                              uint64_t action_flags,
2370                              const struct rte_flow_attr *attr,
2371                              struct rte_flow_error *error)
2372 {
2373         struct mlx5_priv *priv = dev->data->dev_private;
2374         struct mlx5_dev_config *config = &priv->config;
2375         const struct rte_flow_action_mark *mark = action->conf;
2376         int ret;
2377
2378         /* Fall back if no extended metadata register support. */
2379         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2380                 return mlx5_flow_validate_action_mark(action, action_flags,
2381                                                       attr, error);
2382         /* Extensive metadata mode requires registers. */
2383         if (!mlx5_flow_ext_mreg_supported(dev))
2384                 return rte_flow_error_set(error, ENOTSUP,
2385                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2386                                           "no metadata registers "
2387                                           "to support mark action");
2388         if (!priv->sh->dv_mark_mask)
2389                 return rte_flow_error_set(error, ENOTSUP,
2390                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2391                                           "extended metadata register"
2392                                           " isn't available");
2393         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2394         if (ret < 0)
2395                 return ret;
2396         MLX5_ASSERT(ret > 0);
2397         if (!mark)
2398                 return rte_flow_error_set(error, EINVAL,
2399                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2400                                           "configuration cannot be null");
2401         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2402                 return rte_flow_error_set(error, EINVAL,
2403                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2404                                           &mark->id,
2405                                           "mark id exceeds the limit");
2406         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2407                 return rte_flow_error_set(error, EINVAL,
2408                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2409                                           "can't flag and mark in same flow");
2410         if (action_flags & MLX5_FLOW_ACTION_MARK)
2411                 return rte_flow_error_set(error, EINVAL,
2412                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2413                                           "can't have 2 mark actions in same"
2414                                           " flow");
2415         return 0;
2416 }
2417
2418 /**
2419  * Validate SET_META action.
2420  *
2421  * @param[in] dev
2422  *   Pointer to the rte_eth_dev structure.
2423  * @param[in] action
2424  *   Pointer to the action structure.
2425  * @param[in] action_flags
2426  *   Holds the actions detected until now.
2427  * @param[in] attr
2428  *   Pointer to flow attributes
2429  * @param[out] error
2430  *   Pointer to error structure.
2431  *
2432  * @return
2433  *   0 on success, a negative errno value otherwise and rte_errno is set.
2434  */
2435 static int
2436 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2437                                  const struct rte_flow_action *action,
2438                                  uint64_t action_flags __rte_unused,
2439                                  const struct rte_flow_attr *attr,
2440                                  struct rte_flow_error *error)
2441 {
2442         const struct rte_flow_action_set_meta *conf;
2443         uint32_t nic_mask = UINT32_MAX;
2444         int reg;
2445
2446         if (!mlx5_flow_ext_mreg_supported(dev))
2447                 return rte_flow_error_set(error, ENOTSUP,
2448                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2449                                           "extended metadata register"
2450                                           " isn't supported");
2451         reg = flow_dv_get_metadata_reg(dev, attr, error);
2452         if (reg < 0)
2453                 return reg;
2454         if (reg == REG_NON)
2455                 return rte_flow_error_set(error, ENOTSUP,
2456                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2457                                           "unavalable extended metadata register");
2458         if (reg != REG_A && reg != REG_B) {
2459                 struct mlx5_priv *priv = dev->data->dev_private;
2460
2461                 nic_mask = priv->sh->dv_meta_mask;
2462         }
2463         if (!(action->conf))
2464                 return rte_flow_error_set(error, EINVAL,
2465                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2466                                           "configuration cannot be null");
2467         conf = (const struct rte_flow_action_set_meta *)action->conf;
2468         if (!conf->mask)
2469                 return rte_flow_error_set(error, EINVAL,
2470                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2471                                           "zero mask doesn't have any effect");
2472         if (conf->mask & ~nic_mask)
2473                 return rte_flow_error_set(error, EINVAL,
2474                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2475                                           "meta data must be within reg C0");
2476         return 0;
2477 }
2478
2479 /**
2480  * Validate SET_TAG action.
2481  *
2482  * @param[in] dev
2483  *   Pointer to the rte_eth_dev structure.
2484  * @param[in] action
2485  *   Pointer to the action structure.
2486  * @param[in] action_flags
2487  *   Holds the actions detected until now.
2488  * @param[in] attr
2489  *   Pointer to flow attributes
2490  * @param[out] error
2491  *   Pointer to error structure.
2492  *
2493  * @return
2494  *   0 on success, a negative errno value otherwise and rte_errno is set.
2495  */
2496 static int
2497 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2498                                 const struct rte_flow_action *action,
2499                                 uint64_t action_flags,
2500                                 const struct rte_flow_attr *attr,
2501                                 struct rte_flow_error *error)
2502 {
2503         const struct rte_flow_action_set_tag *conf;
2504         const uint64_t terminal_action_flags =
2505                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2506                 MLX5_FLOW_ACTION_RSS;
2507         int ret;
2508
2509         if (!mlx5_flow_ext_mreg_supported(dev))
2510                 return rte_flow_error_set(error, ENOTSUP,
2511                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2512                                           "extensive metadata register"
2513                                           " isn't supported");
2514         if (!(action->conf))
2515                 return rte_flow_error_set(error, EINVAL,
2516                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2517                                           "configuration cannot be null");
2518         conf = (const struct rte_flow_action_set_tag *)action->conf;
2519         if (!conf->mask)
2520                 return rte_flow_error_set(error, EINVAL,
2521                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2522                                           "zero mask doesn't have any effect");
2523         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2524         if (ret < 0)
2525                 return ret;
2526         if (!attr->transfer && attr->ingress &&
2527             (action_flags & terminal_action_flags))
2528                 return rte_flow_error_set(error, EINVAL,
2529                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2530                                           "set_tag has no effect"
2531                                           " with terminal actions");
2532         return 0;
2533 }
2534
2535 /**
2536  * Validate count action.
2537  *
2538  * @param[in] dev
2539  *   Pointer to rte_eth_dev structure.
2540  * @param[out] error
2541  *   Pointer to error structure.
2542  *
2543  * @return
2544  *   0 on success, a negative errno value otherwise and rte_errno is set.
2545  */
2546 static int
2547 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2548                               struct rte_flow_error *error)
2549 {
2550         struct mlx5_priv *priv = dev->data->dev_private;
2551
2552         if (!priv->config.devx)
2553                 goto notsup_err;
2554 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2555         return 0;
2556 #endif
2557 notsup_err:
2558         return rte_flow_error_set
2559                       (error, ENOTSUP,
2560                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2561                        NULL,
2562                        "count action not supported");
2563 }
2564
2565 /**
2566  * Validate the L2 encap action.
2567  *
2568  * @param[in] dev
2569  *   Pointer to the rte_eth_dev structure.
2570  * @param[in] action_flags
2571  *   Holds the actions detected until now.
2572  * @param[in] action
2573  *   Pointer to the action structure.
2574  * @param[in] attr
2575  *   Pointer to flow attributes.
2576  * @param[out] error
2577  *   Pointer to error structure.
2578  *
2579  * @return
2580  *   0 on success, a negative errno value otherwise and rte_errno is set.
2581  */
2582 static int
2583 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2584                                  uint64_t action_flags,
2585                                  const struct rte_flow_action *action,
2586                                  const struct rte_flow_attr *attr,
2587                                  struct rte_flow_error *error)
2588 {
2589         const struct mlx5_priv *priv = dev->data->dev_private;
2590
2591         if (!(action->conf))
2592                 return rte_flow_error_set(error, EINVAL,
2593                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2594                                           "configuration cannot be null");
2595         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2596                 return rte_flow_error_set(error, EINVAL,
2597                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2598                                           "can only have a single encap action "
2599                                           "in a flow");
2600         if (!attr->transfer && priv->representor)
2601                 return rte_flow_error_set(error, ENOTSUP,
2602                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2603                                           "encap action for VF representor "
2604                                           "not supported on NIC table");
2605         return 0;
2606 }
2607
2608 /**
2609  * Validate a decap action.
2610  *
2611  * @param[in] dev
2612  *   Pointer to the rte_eth_dev structure.
2613  * @param[in] action_flags
2614  *   Holds the actions detected until now.
2615  * @param[in] attr
2616  *   Pointer to flow attributes
2617  * @param[out] error
2618  *   Pointer to error structure.
2619  *
2620  * @return
2621  *   0 on success, a negative errno value otherwise and rte_errno is set.
2622  */
2623 static int
2624 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2625                               uint64_t action_flags,
2626                               const struct rte_flow_attr *attr,
2627                               struct rte_flow_error *error)
2628 {
2629         const struct mlx5_priv *priv = dev->data->dev_private;
2630
2631         if (priv->config.hca_attr.scatter_fcs_w_decap_disable &&
2632             !priv->config.decap_en)
2633                 return rte_flow_error_set(error, ENOTSUP,
2634                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2635                                           "decap is not enabled");
2636         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2637                 return rte_flow_error_set(error, ENOTSUP,
2638                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2639                                           action_flags &
2640                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2641                                           "have a single decap action" : "decap "
2642                                           "after encap is not supported");
2643         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2644                 return rte_flow_error_set(error, EINVAL,
2645                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2646                                           "can't have decap action after"
2647                                           " modify action");
2648         if (attr->egress)
2649                 return rte_flow_error_set(error, ENOTSUP,
2650                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2651                                           NULL,
2652                                           "decap action not supported for "
2653                                           "egress");
2654         if (!attr->transfer && priv->representor)
2655                 return rte_flow_error_set(error, ENOTSUP,
2656                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2657                                           "decap action for VF representor "
2658                                           "not supported on NIC table");
2659         return 0;
2660 }
2661
2662 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2663
2664 /**
2665  * Validate the raw encap and decap actions.
2666  *
2667  * @param[in] dev
2668  *   Pointer to the rte_eth_dev structure.
2669  * @param[in] decap
2670  *   Pointer to the decap action.
2671  * @param[in] encap
2672  *   Pointer to the encap action.
2673  * @param[in] attr
2674  *   Pointer to flow attributes
2675  * @param[in/out] action_flags
2676  *   Holds the actions detected until now.
2677  * @param[out] actions_n
2678  *   pointer to the number of actions counter.
2679  * @param[out] error
2680  *   Pointer to error structure.
2681  *
2682  * @return
2683  *   0 on success, a negative errno value otherwise and rte_errno is set.
2684  */
2685 static int
2686 flow_dv_validate_action_raw_encap_decap
2687         (struct rte_eth_dev *dev,
2688          const struct rte_flow_action_raw_decap *decap,
2689          const struct rte_flow_action_raw_encap *encap,
2690          const struct rte_flow_attr *attr, uint64_t *action_flags,
2691          int *actions_n, struct rte_flow_error *error)
2692 {
2693         const struct mlx5_priv *priv = dev->data->dev_private;
2694         int ret;
2695
2696         if (encap && (!encap->size || !encap->data))
2697                 return rte_flow_error_set(error, EINVAL,
2698                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2699                                           "raw encap data cannot be empty");
2700         if (decap && encap) {
2701                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2702                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2703                         /* L3 encap. */
2704                         decap = NULL;
2705                 else if (encap->size <=
2706                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2707                            decap->size >
2708                            MLX5_ENCAPSULATION_DECISION_SIZE)
2709                         /* L3 decap. */
2710                         encap = NULL;
2711                 else if (encap->size >
2712                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2713                            decap->size >
2714                            MLX5_ENCAPSULATION_DECISION_SIZE)
2715                         /* 2 L2 actions: encap and decap. */
2716                         ;
2717                 else
2718                         return rte_flow_error_set(error,
2719                                 ENOTSUP,
2720                                 RTE_FLOW_ERROR_TYPE_ACTION,
2721                                 NULL, "unsupported too small "
2722                                 "raw decap and too small raw "
2723                                 "encap combination");
2724         }
2725         if (decap) {
2726                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2727                                                     error);
2728                 if (ret < 0)
2729                         return ret;
2730                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2731                 ++(*actions_n);
2732         }
2733         if (encap) {
2734                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2735                         return rte_flow_error_set(error, ENOTSUP,
2736                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2737                                                   NULL,
2738                                                   "small raw encap size");
2739                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2740                         return rte_flow_error_set(error, EINVAL,
2741                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2742                                                   NULL,
2743                                                   "more than one encap action");
2744                 if (!attr->transfer && priv->representor)
2745                         return rte_flow_error_set
2746                                         (error, ENOTSUP,
2747                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2748                                          "encap action for VF representor "
2749                                          "not supported on NIC table");
2750                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2751                 ++(*actions_n);
2752         }
2753         return 0;
2754 }
2755
2756 /**
2757  * Match encap_decap resource.
2758  *
2759  * @param list
2760  *   Pointer to the hash list.
2761  * @param entry
2762  *   Pointer to exist resource entry object.
2763  * @param key
2764  *   Key of the new entry.
2765  * @param ctx_cb
2766  *   Pointer to new encap_decap resource.
2767  *
2768  * @return
2769  *   0 on matching, none-zero otherwise.
2770  */
2771 int
2772 flow_dv_encap_decap_match_cb(struct mlx5_hlist *list __rte_unused,
2773                              struct mlx5_hlist_entry *entry,
2774                              uint64_t key __rte_unused, void *cb_ctx)
2775 {
2776         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2777         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2778         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2779
2780         cache_resource = container_of(entry,
2781                                       struct mlx5_flow_dv_encap_decap_resource,
2782                                       entry);
2783         if (resource->reformat_type == cache_resource->reformat_type &&
2784             resource->ft_type == cache_resource->ft_type &&
2785             resource->flags == cache_resource->flags &&
2786             resource->size == cache_resource->size &&
2787             !memcmp((const void *)resource->buf,
2788                     (const void *)cache_resource->buf,
2789                     resource->size))
2790                 return 0;
2791         return -1;
2792 }
2793
2794 /**
2795  * Allocate encap_decap resource.
2796  *
2797  * @param list
2798  *   Pointer to the hash list.
2799  * @param entry
2800  *   Pointer to exist resource entry object.
2801  * @param ctx_cb
2802  *   Pointer to new encap_decap resource.
2803  *
2804  * @return
2805  *   0 on matching, none-zero otherwise.
2806  */
2807 struct mlx5_hlist_entry *
2808 flow_dv_encap_decap_create_cb(struct mlx5_hlist *list,
2809                               uint64_t key __rte_unused,
2810                               void *cb_ctx)
2811 {
2812         struct mlx5_dev_ctx_shared *sh = list->ctx;
2813         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2814         struct mlx5dv_dr_domain *domain;
2815         struct mlx5_flow_dv_encap_decap_resource *resource = ctx->data;
2816         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2817         uint32_t idx;
2818         int ret;
2819
2820         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2821                 domain = sh->fdb_domain;
2822         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2823                 domain = sh->rx_domain;
2824         else
2825                 domain = sh->tx_domain;
2826         /* Register new encap/decap resource. */
2827         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2828                                        &idx);
2829         if (!cache_resource) {
2830                 rte_flow_error_set(ctx->error, ENOMEM,
2831                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2832                                    "cannot allocate resource memory");
2833                 return NULL;
2834         }
2835         *cache_resource = *resource;
2836         cache_resource->idx = idx;
2837         ret = mlx5_flow_os_create_flow_action_packet_reformat
2838                                         (sh->ctx, domain, cache_resource,
2839                                          &cache_resource->action);
2840         if (ret) {
2841                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
2842                 rte_flow_error_set(ctx->error, ENOMEM,
2843                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2844                                    NULL, "cannot create action");
2845                 return NULL;
2846         }
2847
2848         return &cache_resource->entry;
2849 }
2850
2851 /**
2852  * Find existing encap/decap resource or create and register a new one.
2853  *
2854  * @param[in, out] dev
2855  *   Pointer to rte_eth_dev structure.
2856  * @param[in, out] resource
2857  *   Pointer to encap/decap resource.
2858  * @parm[in, out] dev_flow
2859  *   Pointer to the dev_flow.
2860  * @param[out] error
2861  *   pointer to error structure.
2862  *
2863  * @return
2864  *   0 on success otherwise -errno and errno is set.
2865  */
2866 static int
2867 flow_dv_encap_decap_resource_register
2868                         (struct rte_eth_dev *dev,
2869                          struct mlx5_flow_dv_encap_decap_resource *resource,
2870                          struct mlx5_flow *dev_flow,
2871                          struct rte_flow_error *error)
2872 {
2873         struct mlx5_priv *priv = dev->data->dev_private;
2874         struct mlx5_dev_ctx_shared *sh = priv->sh;
2875         struct mlx5_hlist_entry *entry;
2876         union {
2877                 struct {
2878                         uint32_t ft_type:8;
2879                         uint32_t refmt_type:8;
2880                         /*
2881                          * Header reformat actions can be shared between
2882                          * non-root tables. One bit to indicate non-root
2883                          * table or not.
2884                          */
2885                         uint32_t is_root:1;
2886                         uint32_t reserve:15;
2887                 };
2888                 uint32_t v32;
2889         } encap_decap_key = {
2890                 {
2891                         .ft_type = resource->ft_type,
2892                         .refmt_type = resource->reformat_type,
2893                         .is_root = !!dev_flow->dv.group,
2894                         .reserve = 0,
2895                 }
2896         };
2897         struct mlx5_flow_cb_ctx ctx = {
2898                 .error = error,
2899                 .data = resource,
2900         };
2901         uint64_t key64;
2902
2903         resource->flags = dev_flow->dv.group ? 0 : 1;
2904         key64 =  __rte_raw_cksum(&encap_decap_key.v32,
2905                                  sizeof(encap_decap_key.v32), 0);
2906         if (resource->reformat_type !=
2907             MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 &&
2908             resource->size)
2909                 key64 = __rte_raw_cksum(resource->buf, resource->size, key64);
2910         entry = mlx5_hlist_register(sh->encaps_decaps, key64, &ctx);
2911         if (!entry)
2912                 return -rte_errno;
2913         resource = container_of(entry, typeof(*resource), entry);
2914         dev_flow->dv.encap_decap = resource;
2915         dev_flow->handle->dvh.rix_encap_decap = resource->idx;
2916         return 0;
2917 }
2918
2919 /**
2920  * Find existing table jump resource or create and register a new one.
2921  *
2922  * @param[in, out] dev
2923  *   Pointer to rte_eth_dev structure.
2924  * @param[in, out] tbl
2925  *   Pointer to flow table resource.
2926  * @parm[in, out] dev_flow
2927  *   Pointer to the dev_flow.
2928  * @param[out] error
2929  *   pointer to error structure.
2930  *
2931  * @return
2932  *   0 on success otherwise -errno and errno is set.
2933  */
2934 static int
2935 flow_dv_jump_tbl_resource_register
2936                         (struct rte_eth_dev *dev __rte_unused,
2937                          struct mlx5_flow_tbl_resource *tbl,
2938                          struct mlx5_flow *dev_flow,
2939                          struct rte_flow_error *error __rte_unused)
2940 {
2941         struct mlx5_flow_tbl_data_entry *tbl_data =
2942                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2943
2944         MLX5_ASSERT(tbl);
2945         MLX5_ASSERT(tbl_data->jump.action);
2946         dev_flow->handle->rix_jump = tbl_data->idx;
2947         dev_flow->dv.jump = &tbl_data->jump;
2948         return 0;
2949 }
2950
2951 int
2952 flow_dv_port_id_match_cb(struct mlx5_cache_list *list __rte_unused,
2953                          struct mlx5_cache_entry *entry, void *cb_ctx)
2954 {
2955         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2956         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2957         struct mlx5_flow_dv_port_id_action_resource *res =
2958                         container_of(entry, typeof(*res), entry);
2959
2960         return ref->port_id != res->port_id;
2961 }
2962
2963 struct mlx5_cache_entry *
2964 flow_dv_port_id_create_cb(struct mlx5_cache_list *list,
2965                           struct mlx5_cache_entry *entry __rte_unused,
2966                           void *cb_ctx)
2967 {
2968         struct mlx5_dev_ctx_shared *sh = list->ctx;
2969         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
2970         struct mlx5_flow_dv_port_id_action_resource *ref = ctx->data;
2971         struct mlx5_flow_dv_port_id_action_resource *cache;
2972         uint32_t idx;
2973         int ret;
2974
2975         /* Register new port id action resource. */
2976         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID], &idx);
2977         if (!cache) {
2978                 rte_flow_error_set(ctx->error, ENOMEM,
2979                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2980                                    "cannot allocate port_id action cache memory");
2981                 return NULL;
2982         }
2983         *cache = *ref;
2984         ret = mlx5_flow_os_create_flow_action_dest_port(sh->fdb_domain,
2985                                                         ref->port_id,
2986                                                         &cache->action);
2987         if (ret) {
2988                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], idx);
2989                 rte_flow_error_set(ctx->error, ENOMEM,
2990                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2991                                    "cannot create action");
2992                 return NULL;
2993         }
2994         return &cache->entry;
2995 }
2996
2997 /**
2998  * Find existing table port ID resource or create and register a new one.
2999  *
3000  * @param[in, out] dev
3001  *   Pointer to rte_eth_dev structure.
3002  * @param[in, out] resource
3003  *   Pointer to port ID action resource.
3004  * @parm[in, out] dev_flow
3005  *   Pointer to the dev_flow.
3006  * @param[out] error
3007  *   pointer to error structure.
3008  *
3009  * @return
3010  *   0 on success otherwise -errno and errno is set.
3011  */
3012 static int
3013 flow_dv_port_id_action_resource_register
3014                         (struct rte_eth_dev *dev,
3015                          struct mlx5_flow_dv_port_id_action_resource *resource,
3016                          struct mlx5_flow *dev_flow,
3017                          struct rte_flow_error *error)
3018 {
3019         struct mlx5_priv *priv = dev->data->dev_private;
3020         struct mlx5_cache_entry *entry;
3021         struct mlx5_flow_dv_port_id_action_resource *cache;
3022         struct mlx5_flow_cb_ctx ctx = {
3023                 .error = error,
3024                 .data = resource,
3025         };
3026
3027         entry = mlx5_cache_register(&priv->sh->port_id_action_list, &ctx);
3028         if (!entry)
3029                 return -rte_errno;
3030         cache = container_of(entry, typeof(*cache), entry);
3031         dev_flow->dv.port_id_action = cache;
3032         dev_flow->handle->rix_port_id_action = cache->idx;
3033         return 0;
3034 }
3035
3036 int
3037 flow_dv_push_vlan_match_cb(struct mlx5_cache_list *list __rte_unused,
3038                          struct mlx5_cache_entry *entry, void *cb_ctx)
3039 {
3040         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3041         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3042         struct mlx5_flow_dv_push_vlan_action_resource *res =
3043                         container_of(entry, typeof(*res), entry);
3044
3045         return ref->vlan_tag != res->vlan_tag || ref->ft_type != res->ft_type;
3046 }
3047
3048 struct mlx5_cache_entry *
3049 flow_dv_push_vlan_create_cb(struct mlx5_cache_list *list,
3050                           struct mlx5_cache_entry *entry __rte_unused,
3051                           void *cb_ctx)
3052 {
3053         struct mlx5_dev_ctx_shared *sh = list->ctx;
3054         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3055         struct mlx5_flow_dv_push_vlan_action_resource *ref = ctx->data;
3056         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3057         struct mlx5dv_dr_domain *domain;
3058         uint32_t idx;
3059         int ret;
3060
3061         /* Register new port id action resource. */
3062         cache = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &idx);
3063         if (!cache) {
3064                 rte_flow_error_set(ctx->error, ENOMEM,
3065                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3066                                    "cannot allocate push_vlan action cache memory");
3067                 return NULL;
3068         }
3069         *cache = *ref;
3070         if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3071                 domain = sh->fdb_domain;
3072         else if (ref->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
3073                 domain = sh->rx_domain;
3074         else
3075                 domain = sh->tx_domain;
3076         ret = mlx5_flow_os_create_flow_action_push_vlan(domain, ref->vlan_tag,
3077                                                         &cache->action);
3078         if (ret) {
3079                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
3080                 rte_flow_error_set(ctx->error, ENOMEM,
3081                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3082                                    "cannot create push vlan action");
3083                 return NULL;
3084         }
3085         return &cache->entry;
3086 }
3087
3088 /**
3089  * Find existing push vlan resource or create and register a new one.
3090  *
3091  * @param [in, out] dev
3092  *   Pointer to rte_eth_dev structure.
3093  * @param[in, out] resource
3094  *   Pointer to port ID action resource.
3095  * @parm[in, out] dev_flow
3096  *   Pointer to the dev_flow.
3097  * @param[out] error
3098  *   pointer to error structure.
3099  *
3100  * @return
3101  *   0 on success otherwise -errno and errno is set.
3102  */
3103 static int
3104 flow_dv_push_vlan_action_resource_register
3105                        (struct rte_eth_dev *dev,
3106                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
3107                         struct mlx5_flow *dev_flow,
3108                         struct rte_flow_error *error)
3109 {
3110         struct mlx5_priv *priv = dev->data->dev_private;
3111         struct mlx5_flow_dv_push_vlan_action_resource *cache;
3112         struct mlx5_cache_entry *entry;
3113         struct mlx5_flow_cb_ctx ctx = {
3114                 .error = error,
3115                 .data = resource,
3116         };
3117
3118         entry = mlx5_cache_register(&priv->sh->push_vlan_action_list, &ctx);
3119         if (!entry)
3120                 return -rte_errno;
3121         cache = container_of(entry, typeof(*cache), entry);
3122
3123         dev_flow->handle->dvh.rix_push_vlan = cache->idx;
3124         dev_flow->dv.push_vlan_res = cache;
3125         return 0;
3126 }
3127
3128 /**
3129  * Get the size of specific rte_flow_item_type hdr size
3130  *
3131  * @param[in] item_type
3132  *   Tested rte_flow_item_type.
3133  *
3134  * @return
3135  *   sizeof struct item_type, 0 if void or irrelevant.
3136  */
3137 static size_t
3138 flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
3139 {
3140         size_t retval;
3141
3142         switch (item_type) {
3143         case RTE_FLOW_ITEM_TYPE_ETH:
3144                 retval = sizeof(struct rte_ether_hdr);
3145                 break;
3146         case RTE_FLOW_ITEM_TYPE_VLAN:
3147                 retval = sizeof(struct rte_vlan_hdr);
3148                 break;
3149         case RTE_FLOW_ITEM_TYPE_IPV4:
3150                 retval = sizeof(struct rte_ipv4_hdr);
3151                 break;
3152         case RTE_FLOW_ITEM_TYPE_IPV6:
3153                 retval = sizeof(struct rte_ipv6_hdr);
3154                 break;
3155         case RTE_FLOW_ITEM_TYPE_UDP:
3156                 retval = sizeof(struct rte_udp_hdr);
3157                 break;
3158         case RTE_FLOW_ITEM_TYPE_TCP:
3159                 retval = sizeof(struct rte_tcp_hdr);
3160                 break;
3161         case RTE_FLOW_ITEM_TYPE_VXLAN:
3162         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3163                 retval = sizeof(struct rte_vxlan_hdr);
3164                 break;
3165         case RTE_FLOW_ITEM_TYPE_GRE:
3166         case RTE_FLOW_ITEM_TYPE_NVGRE:
3167                 retval = sizeof(struct rte_gre_hdr);
3168                 break;
3169         case RTE_FLOW_ITEM_TYPE_MPLS:
3170                 retval = sizeof(struct rte_mpls_hdr);
3171                 break;
3172         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
3173         default:
3174                 retval = 0;
3175                 break;
3176         }
3177         return retval;
3178 }
3179
3180 #define MLX5_ENCAP_IPV4_VERSION         0x40
3181 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
3182 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
3183 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
3184 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
3185 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
3186 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
3187
3188 /**
3189  * Convert the encap action data from list of rte_flow_item to raw buffer
3190  *
3191  * @param[in] items
3192  *   Pointer to rte_flow_item objects list.
3193  * @param[out] buf
3194  *   Pointer to the output buffer.
3195  * @param[out] size
3196  *   Pointer to the output buffer size.
3197  * @param[out] error
3198  *   Pointer to the error structure.
3199  *
3200  * @return
3201  *   0 on success, a negative errno value otherwise and rte_errno is set.
3202  */
3203 static int
3204 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
3205                            size_t *size, struct rte_flow_error *error)
3206 {
3207         struct rte_ether_hdr *eth = NULL;
3208         struct rte_vlan_hdr *vlan = NULL;
3209         struct rte_ipv4_hdr *ipv4 = NULL;
3210         struct rte_ipv6_hdr *ipv6 = NULL;
3211         struct rte_udp_hdr *udp = NULL;
3212         struct rte_vxlan_hdr *vxlan = NULL;
3213         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
3214         struct rte_gre_hdr *gre = NULL;
3215         size_t len;
3216         size_t temp_size = 0;
3217
3218         if (!items)
3219                 return rte_flow_error_set(error, EINVAL,
3220                                           RTE_FLOW_ERROR_TYPE_ACTION,
3221                                           NULL, "invalid empty data");
3222         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3223                 len = flow_dv_get_item_hdr_len(items->type);
3224                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
3225                         return rte_flow_error_set(error, EINVAL,
3226                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3227                                                   (void *)items->type,
3228                                                   "items total size is too big"
3229                                                   " for encap action");
3230                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
3231                 switch (items->type) {
3232                 case RTE_FLOW_ITEM_TYPE_ETH:
3233                         eth = (struct rte_ether_hdr *)&buf[temp_size];
3234                         break;
3235                 case RTE_FLOW_ITEM_TYPE_VLAN:
3236                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
3237                         if (!eth)
3238                                 return rte_flow_error_set(error, EINVAL,
3239                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3240                                                 (void *)items->type,
3241                                                 "eth header not found");
3242                         if (!eth->ether_type)
3243                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
3244                         break;
3245                 case RTE_FLOW_ITEM_TYPE_IPV4:
3246                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
3247                         if (!vlan && !eth)
3248                                 return rte_flow_error_set(error, EINVAL,
3249                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3250                                                 (void *)items->type,
3251                                                 "neither eth nor vlan"
3252                                                 " header found");
3253                         if (vlan && !vlan->eth_proto)
3254                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3255                         else if (eth && !eth->ether_type)
3256                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3257                         if (!ipv4->version_ihl)
3258                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3259                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3260                         if (!ipv4->time_to_live)
3261                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3262                         break;
3263                 case RTE_FLOW_ITEM_TYPE_IPV6:
3264                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3265                         if (!vlan && !eth)
3266                                 return rte_flow_error_set(error, EINVAL,
3267                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3268                                                 (void *)items->type,
3269                                                 "neither eth nor vlan"
3270                                                 " header found");
3271                         if (vlan && !vlan->eth_proto)
3272                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3273                         else if (eth && !eth->ether_type)
3274                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3275                         if (!ipv6->vtc_flow)
3276                                 ipv6->vtc_flow =
3277                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3278                         if (!ipv6->hop_limits)
3279                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3280                         break;
3281                 case RTE_FLOW_ITEM_TYPE_UDP:
3282                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3283                         if (!ipv4 && !ipv6)
3284                                 return rte_flow_error_set(error, EINVAL,
3285                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3286                                                 (void *)items->type,
3287                                                 "ip header not found");
3288                         if (ipv4 && !ipv4->next_proto_id)
3289                                 ipv4->next_proto_id = IPPROTO_UDP;
3290                         else if (ipv6 && !ipv6->proto)
3291                                 ipv6->proto = IPPROTO_UDP;
3292                         break;
3293                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3294                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3295                         if (!udp)
3296                                 return rte_flow_error_set(error, EINVAL,
3297                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3298                                                 (void *)items->type,
3299                                                 "udp header not found");
3300                         if (!udp->dst_port)
3301                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3302                         if (!vxlan->vx_flags)
3303                                 vxlan->vx_flags =
3304                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3305                         break;
3306                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3307                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3308                         if (!udp)
3309                                 return rte_flow_error_set(error, EINVAL,
3310                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3311                                                 (void *)items->type,
3312                                                 "udp header not found");
3313                         if (!vxlan_gpe->proto)
3314                                 return rte_flow_error_set(error, EINVAL,
3315                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3316                                                 (void *)items->type,
3317                                                 "next protocol not found");
3318                         if (!udp->dst_port)
3319                                 udp->dst_port =
3320                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3321                         if (!vxlan_gpe->vx_flags)
3322                                 vxlan_gpe->vx_flags =
3323                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3324                         break;
3325                 case RTE_FLOW_ITEM_TYPE_GRE:
3326                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3327                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3328                         if (!gre->proto)
3329                                 return rte_flow_error_set(error, EINVAL,
3330                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3331                                                 (void *)items->type,
3332                                                 "next protocol not found");
3333                         if (!ipv4 && !ipv6)
3334                                 return rte_flow_error_set(error, EINVAL,
3335                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3336                                                 (void *)items->type,
3337                                                 "ip header not found");
3338                         if (ipv4 && !ipv4->next_proto_id)
3339                                 ipv4->next_proto_id = IPPROTO_GRE;
3340                         else if (ipv6 && !ipv6->proto)
3341                                 ipv6->proto = IPPROTO_GRE;
3342                         break;
3343                 case RTE_FLOW_ITEM_TYPE_VOID:
3344                         break;
3345                 default:
3346                         return rte_flow_error_set(error, EINVAL,
3347                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3348                                                   (void *)items->type,
3349                                                   "unsupported item type");
3350                         break;
3351                 }
3352                 temp_size += len;
3353         }
3354         *size = temp_size;
3355         return 0;
3356 }
3357
3358 static int
3359 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3360 {
3361         struct rte_ether_hdr *eth = NULL;
3362         struct rte_vlan_hdr *vlan = NULL;
3363         struct rte_ipv6_hdr *ipv6 = NULL;
3364         struct rte_udp_hdr *udp = NULL;
3365         char *next_hdr;
3366         uint16_t proto;
3367
3368         eth = (struct rte_ether_hdr *)data;
3369         next_hdr = (char *)(eth + 1);
3370         proto = RTE_BE16(eth->ether_type);
3371
3372         /* VLAN skipping */
3373         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3374                 vlan = (struct rte_vlan_hdr *)next_hdr;
3375                 proto = RTE_BE16(vlan->eth_proto);
3376                 next_hdr += sizeof(struct rte_vlan_hdr);
3377         }
3378
3379         /* HW calculates IPv4 csum. no need to proceed */
3380         if (proto == RTE_ETHER_TYPE_IPV4)
3381                 return 0;
3382
3383         /* non IPv4/IPv6 header. not supported */
3384         if (proto != RTE_ETHER_TYPE_IPV6) {
3385                 return rte_flow_error_set(error, ENOTSUP,
3386                                           RTE_FLOW_ERROR_TYPE_ACTION,
3387                                           NULL, "Cannot offload non IPv4/IPv6");
3388         }
3389
3390         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3391
3392         /* ignore non UDP */
3393         if (ipv6->proto != IPPROTO_UDP)
3394                 return 0;
3395
3396         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3397         udp->dgram_cksum = 0;
3398
3399         return 0;
3400 }
3401
3402 /**
3403  * Convert L2 encap action to DV specification.
3404  *
3405  * @param[in] dev
3406  *   Pointer to rte_eth_dev structure.
3407  * @param[in] action
3408  *   Pointer to action structure.
3409  * @param[in, out] dev_flow
3410  *   Pointer to the mlx5_flow.
3411  * @param[in] transfer
3412  *   Mark if the flow is E-Switch flow.
3413  * @param[out] error
3414  *   Pointer to the error structure.
3415  *
3416  * @return
3417  *   0 on success, a negative errno value otherwise and rte_errno is set.
3418  */
3419 static int
3420 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3421                                const struct rte_flow_action *action,
3422                                struct mlx5_flow *dev_flow,
3423                                uint8_t transfer,
3424                                struct rte_flow_error *error)
3425 {
3426         const struct rte_flow_item *encap_data;
3427         const struct rte_flow_action_raw_encap *raw_encap_data;
3428         struct mlx5_flow_dv_encap_decap_resource res = {
3429                 .reformat_type =
3430                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3431                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3432                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3433         };
3434
3435         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3436                 raw_encap_data =
3437                         (const struct rte_flow_action_raw_encap *)action->conf;
3438                 res.size = raw_encap_data->size;
3439                 memcpy(res.buf, raw_encap_data->data, res.size);
3440         } else {
3441                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3442                         encap_data =
3443                                 ((const struct rte_flow_action_vxlan_encap *)
3444                                                 action->conf)->definition;
3445                 else
3446                         encap_data =
3447                                 ((const struct rte_flow_action_nvgre_encap *)
3448                                                 action->conf)->definition;
3449                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3450                                                &res.size, error))
3451                         return -rte_errno;
3452         }
3453         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3454                 return -rte_errno;
3455         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3456                 return rte_flow_error_set(error, EINVAL,
3457                                           RTE_FLOW_ERROR_TYPE_ACTION,
3458                                           NULL, "can't create L2 encap action");
3459         return 0;
3460 }
3461
3462 /**
3463  * Convert L2 decap action to DV specification.
3464  *
3465  * @param[in] dev
3466  *   Pointer to rte_eth_dev structure.
3467  * @param[in, out] dev_flow
3468  *   Pointer to the mlx5_flow.
3469  * @param[in] transfer
3470  *   Mark if the flow is E-Switch flow.
3471  * @param[out] error
3472  *   Pointer to the error structure.
3473  *
3474  * @return
3475  *   0 on success, a negative errno value otherwise and rte_errno is set.
3476  */
3477 static int
3478 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3479                                struct mlx5_flow *dev_flow,
3480                                uint8_t transfer,
3481                                struct rte_flow_error *error)
3482 {
3483         struct mlx5_flow_dv_encap_decap_resource res = {
3484                 .size = 0,
3485                 .reformat_type =
3486                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3487                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3488                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3489         };
3490
3491         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3492                 return rte_flow_error_set(error, EINVAL,
3493                                           RTE_FLOW_ERROR_TYPE_ACTION,
3494                                           NULL, "can't create L2 decap action");
3495         return 0;
3496 }
3497
3498 /**
3499  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3500  *
3501  * @param[in] dev
3502  *   Pointer to rte_eth_dev structure.
3503  * @param[in] action
3504  *   Pointer to action structure.
3505  * @param[in, out] dev_flow
3506  *   Pointer to the mlx5_flow.
3507  * @param[in] attr
3508  *   Pointer to the flow attributes.
3509  * @param[out] error
3510  *   Pointer to the error structure.
3511  *
3512  * @return
3513  *   0 on success, a negative errno value otherwise and rte_errno is set.
3514  */
3515 static int
3516 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3517                                 const struct rte_flow_action *action,
3518                                 struct mlx5_flow *dev_flow,
3519                                 const struct rte_flow_attr *attr,
3520                                 struct rte_flow_error *error)
3521 {
3522         const struct rte_flow_action_raw_encap *encap_data;
3523         struct mlx5_flow_dv_encap_decap_resource res;
3524
3525         memset(&res, 0, sizeof(res));
3526         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3527         res.size = encap_data->size;
3528         memcpy(res.buf, encap_data->data, res.size);
3529         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3530                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3531                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3532         if (attr->transfer)
3533                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3534         else
3535                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3536                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3537         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3538                 return rte_flow_error_set(error, EINVAL,
3539                                           RTE_FLOW_ERROR_TYPE_ACTION,
3540                                           NULL, "can't create encap action");
3541         return 0;
3542 }
3543
3544 /**
3545  * Create action push VLAN.
3546  *
3547  * @param[in] dev
3548  *   Pointer to rte_eth_dev structure.
3549  * @param[in] attr
3550  *   Pointer to the flow attributes.
3551  * @param[in] vlan
3552  *   Pointer to the vlan to push to the Ethernet header.
3553  * @param[in, out] dev_flow
3554  *   Pointer to the mlx5_flow.
3555  * @param[out] error
3556  *   Pointer to the error structure.
3557  *
3558  * @return
3559  *   0 on success, a negative errno value otherwise and rte_errno is set.
3560  */
3561 static int
3562 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3563                                 const struct rte_flow_attr *attr,
3564                                 const struct rte_vlan_hdr *vlan,
3565                                 struct mlx5_flow *dev_flow,
3566                                 struct rte_flow_error *error)
3567 {
3568         struct mlx5_flow_dv_push_vlan_action_resource res;
3569
3570         memset(&res, 0, sizeof(res));
3571         res.vlan_tag =
3572                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3573                                  vlan->vlan_tci);
3574         if (attr->transfer)
3575                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3576         else
3577                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3578                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3579         return flow_dv_push_vlan_action_resource_register
3580                                             (dev, &res, dev_flow, error);
3581 }
3582
3583 static int fdb_mirror;
3584
3585 /**
3586  * Validate the modify-header actions.
3587  *
3588  * @param[in] action_flags
3589  *   Holds the actions detected until now.
3590  * @param[in] action
3591  *   Pointer to the modify action.
3592  * @param[out] error
3593  *   Pointer to error structure.
3594  *
3595  * @return
3596  *   0 on success, a negative errno value otherwise and rte_errno is set.
3597  */
3598 static int
3599 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3600                                    const struct rte_flow_action *action,
3601                                    struct rte_flow_error *error)
3602 {
3603         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3604                 return rte_flow_error_set(error, EINVAL,
3605                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3606                                           NULL, "action configuration not set");
3607         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3608                 return rte_flow_error_set(error, EINVAL,
3609                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3610                                           "can't have encap action before"
3611                                           " modify action");
3612         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3613                 return rte_flow_error_set(error, EINVAL,
3614                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3615                                           "can't support sample action before"
3616                                           " modify action for E-Switch"
3617                                           " mirroring");
3618         return 0;
3619 }
3620
3621 /**
3622  * Validate the modify-header MAC address actions.
3623  *
3624  * @param[in] action_flags
3625  *   Holds the actions detected until now.
3626  * @param[in] action
3627  *   Pointer to the modify action.
3628  * @param[in] item_flags
3629  *   Holds the items detected.
3630  * @param[out] error
3631  *   Pointer to error structure.
3632  *
3633  * @return
3634  *   0 on success, a negative errno value otherwise and rte_errno is set.
3635  */
3636 static int
3637 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3638                                    const struct rte_flow_action *action,
3639                                    const uint64_t item_flags,
3640                                    struct rte_flow_error *error)
3641 {
3642         int ret = 0;
3643
3644         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3645         if (!ret) {
3646                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3647                         return rte_flow_error_set(error, EINVAL,
3648                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3649                                                   NULL,
3650                                                   "no L2 item in pattern");
3651         }
3652         return ret;
3653 }
3654
3655 /**
3656  * Validate the modify-header IPv4 address actions.
3657  *
3658  * @param[in] action_flags
3659  *   Holds the actions detected until now.
3660  * @param[in] action
3661  *   Pointer to the modify action.
3662  * @param[in] item_flags
3663  *   Holds the items detected.
3664  * @param[out] error
3665  *   Pointer to error structure.
3666  *
3667  * @return
3668  *   0 on success, a negative errno value otherwise and rte_errno is set.
3669  */
3670 static int
3671 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3672                                     const struct rte_flow_action *action,
3673                                     const uint64_t item_flags,
3674                                     struct rte_flow_error *error)
3675 {
3676         int ret = 0;
3677         uint64_t layer;
3678
3679         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3680         if (!ret) {
3681                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3682                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3683                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3684                 if (!(item_flags & layer))
3685                         return rte_flow_error_set(error, EINVAL,
3686                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3687                                                   NULL,
3688                                                   "no ipv4 item in pattern");
3689         }
3690         return ret;
3691 }
3692
3693 /**
3694  * Validate the modify-header IPv6 address actions.
3695  *
3696  * @param[in] action_flags
3697  *   Holds the actions detected until now.
3698  * @param[in] action
3699  *   Pointer to the modify action.
3700  * @param[in] item_flags
3701  *   Holds the items detected.
3702  * @param[out] error
3703  *   Pointer to error structure.
3704  *
3705  * @return
3706  *   0 on success, a negative errno value otherwise and rte_errno is set.
3707  */
3708 static int
3709 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3710                                     const struct rte_flow_action *action,
3711                                     const uint64_t item_flags,
3712                                     struct rte_flow_error *error)
3713 {
3714         int ret = 0;
3715         uint64_t layer;
3716
3717         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3718         if (!ret) {
3719                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3720                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3721                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3722                 if (!(item_flags & layer))
3723                         return rte_flow_error_set(error, EINVAL,
3724                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3725                                                   NULL,
3726                                                   "no ipv6 item in pattern");
3727         }
3728         return ret;
3729 }
3730
3731 /**
3732  * Validate the modify-header TP actions.
3733  *
3734  * @param[in] action_flags
3735  *   Holds the actions detected until now.
3736  * @param[in] action
3737  *   Pointer to the modify action.
3738  * @param[in] item_flags
3739  *   Holds the items detected.
3740  * @param[out] error
3741  *   Pointer to error structure.
3742  *
3743  * @return
3744  *   0 on success, a negative errno value otherwise and rte_errno is set.
3745  */
3746 static int
3747 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3748                                   const struct rte_flow_action *action,
3749                                   const uint64_t item_flags,
3750                                   struct rte_flow_error *error)
3751 {
3752         int ret = 0;
3753         uint64_t layer;
3754
3755         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3756         if (!ret) {
3757                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3758                                  MLX5_FLOW_LAYER_INNER_L4 :
3759                                  MLX5_FLOW_LAYER_OUTER_L4;
3760                 if (!(item_flags & layer))
3761                         return rte_flow_error_set(error, EINVAL,
3762                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3763                                                   NULL, "no transport layer "
3764                                                   "in pattern");
3765         }
3766         return ret;
3767 }
3768
3769 /**
3770  * Validate the modify-header actions of increment/decrement
3771  * TCP Sequence-number.
3772  *
3773  * @param[in] action_flags
3774  *   Holds the actions detected until now.
3775  * @param[in] action
3776  *   Pointer to the modify action.
3777  * @param[in] item_flags
3778  *   Holds the items detected.
3779  * @param[out] error
3780  *   Pointer to error structure.
3781  *
3782  * @return
3783  *   0 on success, a negative errno value otherwise and rte_errno is set.
3784  */
3785 static int
3786 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3787                                        const struct rte_flow_action *action,
3788                                        const uint64_t item_flags,
3789                                        struct rte_flow_error *error)
3790 {
3791         int ret = 0;
3792         uint64_t layer;
3793
3794         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3795         if (!ret) {
3796                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3797                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3798                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3799                 if (!(item_flags & layer))
3800                         return rte_flow_error_set(error, EINVAL,
3801                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3802                                                   NULL, "no TCP item in"
3803                                                   " pattern");
3804                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3805                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3806                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3807                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3808                         return rte_flow_error_set(error, EINVAL,
3809                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3810                                                   NULL,
3811                                                   "cannot decrease and increase"
3812                                                   " TCP sequence number"
3813                                                   " at the same time");
3814         }
3815         return ret;
3816 }
3817
3818 /**
3819  * Validate the modify-header actions of increment/decrement
3820  * TCP Acknowledgment number.
3821  *
3822  * @param[in] action_flags
3823  *   Holds the actions detected until now.
3824  * @param[in] action
3825  *   Pointer to the modify action.
3826  * @param[in] item_flags
3827  *   Holds the items detected.
3828  * @param[out] error
3829  *   Pointer to error structure.
3830  *
3831  * @return
3832  *   0 on success, a negative errno value otherwise and rte_errno is set.
3833  */
3834 static int
3835 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3836                                        const struct rte_flow_action *action,
3837                                        const uint64_t item_flags,
3838                                        struct rte_flow_error *error)
3839 {
3840         int ret = 0;
3841         uint64_t layer;
3842
3843         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3844         if (!ret) {
3845                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3846                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3847                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3848                 if (!(item_flags & layer))
3849                         return rte_flow_error_set(error, EINVAL,
3850                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3851                                                   NULL, "no TCP item in"
3852                                                   " pattern");
3853                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3854                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3855                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3856                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3857                         return rte_flow_error_set(error, EINVAL,
3858                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3859                                                   NULL,
3860                                                   "cannot decrease and increase"
3861                                                   " TCP acknowledgment number"
3862                                                   " at the same time");
3863         }
3864         return ret;
3865 }
3866
3867 /**
3868  * Validate the modify-header TTL actions.
3869  *
3870  * @param[in] action_flags
3871  *   Holds the actions detected until now.
3872  * @param[in] action
3873  *   Pointer to the modify action.
3874  * @param[in] item_flags
3875  *   Holds the items detected.
3876  * @param[out] error
3877  *   Pointer to error structure.
3878  *
3879  * @return
3880  *   0 on success, a negative errno value otherwise and rte_errno is set.
3881  */
3882 static int
3883 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3884                                    const struct rte_flow_action *action,
3885                                    const uint64_t item_flags,
3886                                    struct rte_flow_error *error)
3887 {
3888         int ret = 0;
3889         uint64_t layer;
3890
3891         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3892         if (!ret) {
3893                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3894                                  MLX5_FLOW_LAYER_INNER_L3 :
3895                                  MLX5_FLOW_LAYER_OUTER_L3;
3896                 if (!(item_flags & layer))
3897                         return rte_flow_error_set(error, EINVAL,
3898                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3899                                                   NULL,
3900                                                   "no IP protocol in pattern");
3901         }
3902         return ret;
3903 }
3904
3905 /**
3906  * Validate jump action.
3907  *
3908  * @param[in] action
3909  *   Pointer to the jump action.
3910  * @param[in] action_flags
3911  *   Holds the actions detected until now.
3912  * @param[in] attributes
3913  *   Pointer to flow attributes
3914  * @param[in] external
3915  *   Action belongs to flow rule created by request external to PMD.
3916  * @param[out] error
3917  *   Pointer to error structure.
3918  *
3919  * @return
3920  *   0 on success, a negative errno value otherwise and rte_errno is set.
3921  */
3922 static int
3923 flow_dv_validate_action_jump(struct rte_eth_dev *dev,
3924                              const struct mlx5_flow_tunnel *tunnel,
3925                              const struct rte_flow_action *action,
3926                              uint64_t action_flags,
3927                              const struct rte_flow_attr *attributes,
3928                              bool external, struct rte_flow_error *error)
3929 {
3930         uint32_t target_group, table;
3931         int ret = 0;
3932         struct flow_grp_info grp_info = {
3933                 .external = !!external,
3934                 .transfer = !!attributes->transfer,
3935                 .fdb_def_rule = 1,
3936                 .std_tbl_fix = 0
3937         };
3938         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3939                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3940                 return rte_flow_error_set(error, EINVAL,
3941                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3942                                           "can't have 2 fate actions in"
3943                                           " same flow");
3944         if (action_flags & MLX5_FLOW_ACTION_METER)
3945                 return rte_flow_error_set(error, ENOTSUP,
3946                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3947                                           "jump with meter not support");
3948         if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) && fdb_mirror)
3949                 return rte_flow_error_set(error, EINVAL,
3950                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3951                                           "E-Switch mirroring can't support"
3952                                           " Sample action and jump action in"
3953                                           " same flow now");
3954         if (!action->conf)
3955                 return rte_flow_error_set(error, EINVAL,
3956                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3957                                           NULL, "action configuration not set");
3958         target_group =
3959                 ((const struct rte_flow_action_jump *)action->conf)->group;
3960         ret = mlx5_flow_group_to_table(dev, tunnel, target_group, &table,
3961                                        &grp_info, error);
3962         if (ret)
3963                 return ret;
3964         if (attributes->group == target_group &&
3965             !(action_flags & (MLX5_FLOW_ACTION_TUNNEL_SET |
3966                               MLX5_FLOW_ACTION_TUNNEL_MATCH)))
3967                 return rte_flow_error_set(error, EINVAL,
3968                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3969                                           "target group must be other than"
3970                                           " the current flow group");
3971         return 0;
3972 }
3973
3974 /*
3975  * Validate the port_id action.
3976  *
3977  * @param[in] dev
3978  *   Pointer to rte_eth_dev structure.
3979  * @param[in] action_flags
3980  *   Bit-fields that holds the actions detected until now.
3981  * @param[in] action
3982  *   Port_id RTE action structure.
3983  * @param[in] attr
3984  *   Attributes of flow that includes this action.
3985  * @param[out] error
3986  *   Pointer to error structure.
3987  *
3988  * @return
3989  *   0 on success, a negative errno value otherwise and rte_errno is set.
3990  */
3991 static int
3992 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3993                                 uint64_t action_flags,
3994                                 const struct rte_flow_action *action,
3995                                 const struct rte_flow_attr *attr,
3996                                 struct rte_flow_error *error)
3997 {
3998         const struct rte_flow_action_port_id *port_id;
3999         struct mlx5_priv *act_priv;
4000         struct mlx5_priv *dev_priv;
4001         uint16_t port;
4002
4003         if (!attr->transfer)
4004                 return rte_flow_error_set(error, ENOTSUP,
4005                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4006                                           NULL,
4007                                           "port id action is valid in transfer"
4008                                           " mode only");
4009         if (!action || !action->conf)
4010                 return rte_flow_error_set(error, ENOTSUP,
4011                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4012                                           NULL,
4013                                           "port id action parameters must be"
4014                                           " specified");
4015         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
4016                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4017                 return rte_flow_error_set(error, EINVAL,
4018                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4019                                           "can have only one fate actions in"
4020                                           " a flow");
4021         dev_priv = mlx5_dev_to_eswitch_info(dev);
4022         if (!dev_priv)
4023                 return rte_flow_error_set(error, rte_errno,
4024                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4025                                           NULL,
4026                                           "failed to obtain E-Switch info");
4027         port_id = action->conf;
4028         port = port_id->original ? dev->data->port_id : port_id->id;
4029         act_priv = mlx5_port_to_eswitch_info(port, false);
4030         if (!act_priv)
4031                 return rte_flow_error_set
4032                                 (error, rte_errno,
4033                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
4034                                  "failed to obtain E-Switch port id for port");
4035         if (act_priv->domain_id != dev_priv->domain_id)
4036                 return rte_flow_error_set
4037                                 (error, EINVAL,
4038                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4039                                  "port does not belong to"
4040                                  " E-Switch being configured");
4041         return 0;
4042 }
4043
4044 /**
4045  * Get the maximum number of modify header actions.
4046  *
4047  * @param dev
4048  *   Pointer to rte_eth_dev structure.
4049  * @param flags
4050  *   Flags bits to check if root level.
4051  *
4052  * @return
4053  *   Max number of modify header actions device can support.
4054  */
4055 static inline unsigned int
4056 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
4057                               uint64_t flags)
4058 {
4059         /*
4060          * There's no way to directly query the max capacity from FW.
4061          * The maximal value on root table should be assumed to be supported.
4062          */
4063         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
4064                 return MLX5_MAX_MODIFY_NUM;
4065         else
4066                 return MLX5_ROOT_TBL_MODIFY_NUM;
4067 }
4068
4069 /**
4070  * Validate the meter action.
4071  *
4072  * @param[in] dev
4073  *   Pointer to rte_eth_dev structure.
4074  * @param[in] action_flags
4075  *   Bit-fields that holds the actions detected until now.
4076  * @param[in] action
4077  *   Pointer to the meter action.
4078  * @param[in] attr
4079  *   Attributes of flow that includes this action.
4080  * @param[out] error
4081  *   Pointer to error structure.
4082  *
4083  * @return
4084  *   0 on success, a negative errno value otherwise and rte_ernno is set.
4085  */
4086 static int
4087 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
4088                                 uint64_t action_flags,
4089                                 const struct rte_flow_action *action,
4090                                 const struct rte_flow_attr *attr,
4091                                 struct rte_flow_error *error)
4092 {
4093         struct mlx5_priv *priv = dev->data->dev_private;
4094         const struct rte_flow_action_meter *am = action->conf;
4095         struct mlx5_flow_meter *fm;
4096
4097         if (!am)
4098                 return rte_flow_error_set(error, EINVAL,
4099                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4100                                           "meter action conf is NULL");
4101
4102         if (action_flags & MLX5_FLOW_ACTION_METER)
4103                 return rte_flow_error_set(error, ENOTSUP,
4104                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4105                                           "meter chaining not support");
4106         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4107                 return rte_flow_error_set(error, ENOTSUP,
4108                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4109                                           "meter with jump not support");
4110         if (!priv->mtr_en)
4111                 return rte_flow_error_set(error, ENOTSUP,
4112                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4113                                           NULL,
4114                                           "meter action not supported");
4115         fm = mlx5_flow_meter_find(priv, am->mtr_id);
4116         if (!fm)
4117                 return rte_flow_error_set(error, EINVAL,
4118                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4119                                           "Meter not found");
4120         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
4121               (!fm->ingress && !attr->ingress && attr->egress) ||
4122               (!fm->egress && !attr->egress && attr->ingress))))
4123                 return rte_flow_error_set(error, EINVAL,
4124                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4125                                           "Flow attributes are either invalid "
4126                                           "or have a conflict with current "
4127                                           "meter attributes");
4128         return 0;
4129 }
4130
4131 /**
4132  * Validate the age action.
4133  *
4134  * @param[in] action_flags
4135  *   Holds the actions detected until now.
4136  * @param[in] action
4137  *   Pointer to the age action.
4138  * @param[in] dev
4139  *   Pointer to the Ethernet device structure.
4140  * @param[out] error
4141  *   Pointer to error structure.
4142  *
4143  * @return
4144  *   0 on success, a negative errno value otherwise and rte_errno is set.
4145  */
4146 static int
4147 flow_dv_validate_action_age(uint64_t action_flags,
4148                             const struct rte_flow_action *action,
4149                             struct rte_eth_dev *dev,
4150                             struct rte_flow_error *error)
4151 {
4152         struct mlx5_priv *priv = dev->data->dev_private;
4153         const struct rte_flow_action_age *age = action->conf;
4154
4155         if (!priv->config.devx || (priv->sh->cmng.counter_fallback &&
4156             !priv->sh->aso_age_mng))
4157                 return rte_flow_error_set(error, ENOTSUP,
4158                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4159                                           NULL,
4160                                           "age action not supported");
4161         if (!(action->conf))
4162                 return rte_flow_error_set(error, EINVAL,
4163                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4164                                           "configuration cannot be null");
4165         if (!(age->timeout))
4166                 return rte_flow_error_set(error, EINVAL,
4167                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4168                                           "invalid timeout value 0");
4169         if (action_flags & MLX5_FLOW_ACTION_AGE)
4170                 return rte_flow_error_set(error, EINVAL,
4171                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4172                                           "duplicate age actions set");
4173         return 0;
4174 }
4175
4176 /**
4177  * Validate the modify-header IPv4 DSCP actions.
4178  *
4179  * @param[in] action_flags
4180  *   Holds the actions detected until now.
4181  * @param[in] action
4182  *   Pointer to the modify action.
4183  * @param[in] item_flags
4184  *   Holds the items detected.
4185  * @param[out] error
4186  *   Pointer to error structure.
4187  *
4188  * @return
4189  *   0 on success, a negative errno value otherwise and rte_errno is set.
4190  */
4191 static int
4192 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
4193                                          const struct rte_flow_action *action,
4194                                          const uint64_t item_flags,
4195                                          struct rte_flow_error *error)
4196 {
4197         int ret = 0;
4198
4199         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4200         if (!ret) {
4201                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
4202                         return rte_flow_error_set(error, EINVAL,
4203                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4204                                                   NULL,
4205                                                   "no ipv4 item in pattern");
4206         }
4207         return ret;
4208 }
4209
4210 /**
4211  * Validate the modify-header IPv6 DSCP actions.
4212  *
4213  * @param[in] action_flags
4214  *   Holds the actions detected until now.
4215  * @param[in] action
4216  *   Pointer to the modify action.
4217  * @param[in] item_flags
4218  *   Holds the items detected.
4219  * @param[out] error
4220  *   Pointer to error structure.
4221  *
4222  * @return
4223  *   0 on success, a negative errno value otherwise and rte_errno is set.
4224  */
4225 static int
4226 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
4227                                          const struct rte_flow_action *action,
4228                                          const uint64_t item_flags,
4229                                          struct rte_flow_error *error)
4230 {
4231         int ret = 0;
4232
4233         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
4234         if (!ret) {
4235                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
4236                         return rte_flow_error_set(error, EINVAL,
4237                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4238                                                   NULL,
4239                                                   "no ipv6 item in pattern");
4240         }
4241         return ret;
4242 }
4243
4244 /**
4245  * Match modify-header resource.
4246  *
4247  * @param list
4248  *   Pointer to the hash list.
4249  * @param entry
4250  *   Pointer to exist resource entry object.
4251  * @param key
4252  *   Key of the new entry.
4253  * @param ctx
4254  *   Pointer to new modify-header resource.
4255  *
4256  * @return
4257  *   0 on matching, non-zero otherwise.
4258  */
4259 int
4260 flow_dv_modify_match_cb(struct mlx5_hlist *list __rte_unused,
4261                         struct mlx5_hlist_entry *entry,
4262                         uint64_t key __rte_unused, void *cb_ctx)
4263 {
4264         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4265         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4266         struct mlx5_flow_dv_modify_hdr_resource *resource =
4267                         container_of(entry, typeof(*resource), entry);
4268         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4269
4270         key_len += ref->actions_num * sizeof(ref->actions[0]);
4271         return ref->actions_num != resource->actions_num ||
4272                memcmp(&ref->ft_type, &resource->ft_type, key_len);
4273 }
4274
4275 struct mlx5_hlist_entry *
4276 flow_dv_modify_create_cb(struct mlx5_hlist *list, uint64_t key __rte_unused,
4277                          void *cb_ctx)
4278 {
4279         struct mlx5_dev_ctx_shared *sh = list->ctx;
4280         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
4281         struct mlx5dv_dr_domain *ns;
4282         struct mlx5_flow_dv_modify_hdr_resource *entry;
4283         struct mlx5_flow_dv_modify_hdr_resource *ref = ctx->data;
4284         int ret;
4285         uint32_t data_len = ref->actions_num * sizeof(ref->actions[0]);
4286         uint32_t key_len = sizeof(*ref) - offsetof(typeof(*ref), ft_type);
4287
4288         entry = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*entry) + data_len, 0,
4289                             SOCKET_ID_ANY);
4290         if (!entry) {
4291                 rte_flow_error_set(ctx->error, ENOMEM,
4292                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4293                                    "cannot allocate resource memory");
4294                 return NULL;
4295         }
4296         rte_memcpy(&entry->ft_type,
4297                    RTE_PTR_ADD(ref, offsetof(typeof(*ref), ft_type)),
4298                    key_len + data_len);
4299         if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4300                 ns = sh->fdb_domain;
4301         else if (entry->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4302                 ns = sh->tx_domain;
4303         else
4304                 ns = sh->rx_domain;
4305         ret = mlx5_flow_os_create_flow_action_modify_header
4306                                         (sh->ctx, ns, entry,
4307                                          data_len, &entry->action);
4308         if (ret) {
4309                 mlx5_free(entry);
4310                 rte_flow_error_set(ctx->error, ENOMEM,
4311                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4312                                    NULL, "cannot create modification action");
4313                 return NULL;
4314         }
4315         return &entry->entry;
4316 }
4317
4318 /**
4319  * Validate the sample action.
4320  *
4321  * @param[in] action_flags
4322  *   Holds the actions detected until now.
4323  * @param[in] action
4324  *   Pointer to the sample action.
4325  * @param[in] dev
4326  *   Pointer to the Ethernet device structure.
4327  * @param[in] attr
4328  *   Attributes of flow that includes this action.
4329  * @param[out] error
4330  *   Pointer to error structure.
4331  *
4332  * @return
4333  *   0 on success, a negative errno value otherwise and rte_errno is set.
4334  */
4335 static int
4336 flow_dv_validate_action_sample(uint64_t action_flags,
4337                                const struct rte_flow_action *action,
4338                                struct rte_eth_dev *dev,
4339                                const struct rte_flow_attr *attr,
4340                                struct rte_flow_error *error)
4341 {
4342         struct mlx5_priv *priv = dev->data->dev_private;
4343         struct mlx5_dev_config *dev_conf = &priv->config;
4344         const struct rte_flow_action_sample *sample = action->conf;
4345         const struct rte_flow_action *act;
4346         uint64_t sub_action_flags = 0;
4347         uint16_t queue_index = 0xFFFF;
4348         int actions_n = 0;
4349         int ret;
4350         fdb_mirror = 0;
4351
4352         if (!sample)
4353                 return rte_flow_error_set(error, EINVAL,
4354                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4355                                           "configuration cannot be NULL");
4356         if (sample->ratio == 0)
4357                 return rte_flow_error_set(error, EINVAL,
4358                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4359                                           "ratio value starts from 1");
4360         if (!priv->config.devx || (sample->ratio > 0 && !priv->sampler_en))
4361                 return rte_flow_error_set(error, ENOTSUP,
4362                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4363                                           NULL,
4364                                           "sample action not supported");
4365         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
4366                 return rte_flow_error_set(error, EINVAL,
4367                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4368                                           "Multiple sample actions not "
4369                                           "supported");
4370         if (action_flags & MLX5_FLOW_ACTION_METER)
4371                 return rte_flow_error_set(error, EINVAL,
4372                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4373                                           "wrong action order, meter should "
4374                                           "be after sample action");
4375         if (action_flags & MLX5_FLOW_ACTION_JUMP)
4376                 return rte_flow_error_set(error, EINVAL,
4377                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
4378                                           "wrong action order, jump should "
4379                                           "be after sample action");
4380         act = sample->actions;
4381         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++) {
4382                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4383                         return rte_flow_error_set(error, ENOTSUP,
4384                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4385                                                   act, "too many actions");
4386                 switch (act->type) {
4387                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4388                         ret = mlx5_flow_validate_action_queue(act,
4389                                                               sub_action_flags,
4390                                                               dev,
4391                                                               attr, error);
4392                         if (ret < 0)
4393                                 return ret;
4394                         queue_index = ((const struct rte_flow_action_queue *)
4395                                                         (act->conf))->index;
4396                         sub_action_flags |= MLX5_FLOW_ACTION_QUEUE;
4397                         ++actions_n;
4398                         break;
4399                 case RTE_FLOW_ACTION_TYPE_MARK:
4400                         ret = flow_dv_validate_action_mark(dev, act,
4401                                                            sub_action_flags,
4402                                                            attr, error);
4403                         if (ret < 0)
4404                                 return ret;
4405                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY)
4406                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK |
4407                                                 MLX5_FLOW_ACTION_MARK_EXT;
4408                         else
4409                                 sub_action_flags |= MLX5_FLOW_ACTION_MARK;
4410                         ++actions_n;
4411                         break;
4412                 case RTE_FLOW_ACTION_TYPE_COUNT:
4413                         ret = flow_dv_validate_action_count(dev, error);
4414                         if (ret < 0)
4415                                 return ret;
4416                         sub_action_flags |= MLX5_FLOW_ACTION_COUNT;
4417                         ++actions_n;
4418                         break;
4419                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4420                         ret = flow_dv_validate_action_port_id(dev,
4421                                                               sub_action_flags,
4422                                                               act,
4423                                                               attr,
4424                                                               error);
4425                         if (ret)
4426                                 return ret;
4427                         sub_action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4428                         ++actions_n;
4429                         break;
4430                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4431                         ret = flow_dv_validate_action_raw_encap_decap
4432                                 (dev, NULL, act->conf, attr, &sub_action_flags,
4433                                  &actions_n, error);
4434                         if (ret < 0)
4435                                 return ret;
4436                         ++actions_n;
4437                         break;
4438                 default:
4439                         return rte_flow_error_set(error, ENOTSUP,
4440                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4441                                                   NULL,
4442                                                   "Doesn't support optional "
4443                                                   "action");
4444                 }
4445         }
4446         if (attr->ingress && !attr->transfer) {
4447                 if (!(sub_action_flags & MLX5_FLOW_ACTION_QUEUE))
4448                         return rte_flow_error_set(error, EINVAL,
4449                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4450                                                   NULL,
4451                                                   "Ingress must has a dest "
4452                                                   "QUEUE for Sample");
4453         } else if (attr->egress && !attr->transfer) {
4454                 return rte_flow_error_set(error, ENOTSUP,
4455                                           RTE_FLOW_ERROR_TYPE_ACTION,
4456                                           NULL,
4457                                           "Sample Only support Ingress "
4458                                           "or E-Switch");
4459         } else if (sample->actions->type != RTE_FLOW_ACTION_TYPE_END) {
4460                 MLX5_ASSERT(attr->transfer);
4461                 if (sample->ratio > 1)
4462                         return rte_flow_error_set(error, ENOTSUP,
4463                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4464                                                   NULL,
4465                                                   "E-Switch doesn't support "
4466                                                   "any optional action "
4467                                                   "for sampling");
4468                 fdb_mirror = 1;
4469                 if (sub_action_flags & MLX5_FLOW_ACTION_QUEUE)
4470                         return rte_flow_error_set(error, ENOTSUP,
4471                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4472                                                   NULL,
4473                                                   "unsupported action QUEUE");
4474                 if (!(sub_action_flags & MLX5_FLOW_ACTION_PORT_ID))
4475                         return rte_flow_error_set(error, EINVAL,
4476                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4477                                                   NULL,
4478                                                   "E-Switch must has a dest "
4479                                                   "port for mirroring");
4480         }
4481         /* Continue validation for Xcap actions.*/
4482         if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
4483             (queue_index == 0xFFFF ||
4484              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
4485                 if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
4486                      MLX5_FLOW_XCAP_ACTIONS)
4487                         return rte_flow_error_set(error, ENOTSUP,
4488                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4489                                                   NULL, "encap and decap "
4490                                                   "combination aren't "
4491                                                   "supported");
4492                 if (!attr->transfer && attr->ingress && (sub_action_flags &
4493                                                         MLX5_FLOW_ACTION_ENCAP))
4494                         return rte_flow_error_set(error, ENOTSUP,
4495                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4496                                                   NULL, "encap is not supported"
4497                                                   " for ingress traffic");
4498         }
4499         return 0;
4500 }
4501
4502 /**
4503  * Find existing modify-header resource or create and register a new one.
4504  *
4505  * @param dev[in, out]
4506  *   Pointer to rte_eth_dev structure.
4507  * @param[in, out] resource
4508  *   Pointer to modify-header resource.
4509  * @parm[in, out] dev_flow
4510  *   Pointer to the dev_flow.
4511  * @param[out] error
4512  *   pointer to error structure.
4513  *
4514  * @return
4515  *   0 on success otherwise -errno and errno is set.
4516  */
4517 static int
4518 flow_dv_modify_hdr_resource_register
4519                         (struct rte_eth_dev *dev,
4520                          struct mlx5_flow_dv_modify_hdr_resource *resource,
4521                          struct mlx5_flow *dev_flow,
4522                          struct rte_flow_error *error)
4523 {
4524         struct mlx5_priv *priv = dev->data->dev_private;
4525         struct mlx5_dev_ctx_shared *sh = priv->sh;
4526         uint32_t key_len = sizeof(*resource) -
4527                            offsetof(typeof(*resource), ft_type) +
4528                            resource->actions_num * sizeof(resource->actions[0]);
4529         struct mlx5_hlist_entry *entry;
4530         struct mlx5_flow_cb_ctx ctx = {
4531                 .error = error,
4532                 .data = resource,
4533         };
4534         uint64_t key64;
4535
4536         resource->flags = dev_flow->dv.group ? 0 :
4537                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4538         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4539                                     resource->flags))
4540                 return rte_flow_error_set(error, EOVERFLOW,
4541                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4542                                           "too many modify header items");
4543         key64 = __rte_raw_cksum(&resource->ft_type, key_len, 0);
4544         entry = mlx5_hlist_register(sh->modify_cmds, key64, &ctx);
4545         if (!entry)
4546                 return -rte_errno;
4547         resource = container_of(entry, typeof(*resource), entry);
4548         dev_flow->handle->dvh.modify_hdr = resource;
4549         return 0;
4550 }
4551
4552 /**
4553  * Get DV flow counter by index.
4554  *
4555  * @param[in] dev
4556  *   Pointer to the Ethernet device structure.
4557  * @param[in] idx
4558  *   mlx5 flow counter index in the container.
4559  * @param[out] ppool
4560  *   mlx5 flow counter pool in the container,
4561  *
4562  * @return
4563  *   Pointer to the counter, NULL otherwise.
4564  */
4565 static struct mlx5_flow_counter *
4566 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4567                            uint32_t idx,
4568                            struct mlx5_flow_counter_pool **ppool)
4569 {
4570         struct mlx5_priv *priv = dev->data->dev_private;
4571         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4572         struct mlx5_flow_counter_pool *pool;
4573
4574         /* Decrease to original index and clear shared bit. */
4575         idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1);
4576         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cmng->n);
4577         pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL];
4578         MLX5_ASSERT(pool);
4579         if (ppool)
4580                 *ppool = pool;
4581         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4582 }
4583
4584 /**
4585  * Check the devx counter belongs to the pool.
4586  *
4587  * @param[in] pool
4588  *   Pointer to the counter pool.
4589  * @param[in] id
4590  *   The counter devx ID.
4591  *
4592  * @return
4593  *   True if counter belongs to the pool, false otherwise.
4594  */
4595 static bool
4596 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4597 {
4598         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4599                    MLX5_COUNTERS_PER_POOL;
4600
4601         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4602                 return true;
4603         return false;
4604 }
4605
4606 /**
4607  * Get a pool by devx counter ID.
4608  *
4609  * @param[in] cmng
4610  *   Pointer to the counter management.
4611  * @param[in] id
4612  *   The counter devx ID.
4613  *
4614  * @return
4615  *   The counter pool pointer if exists, NULL otherwise,
4616  */
4617 static struct mlx5_flow_counter_pool *
4618 flow_dv_find_pool_by_id(struct mlx5_flow_counter_mng *cmng, int id)
4619 {
4620         uint32_t i;
4621         struct mlx5_flow_counter_pool *pool = NULL;
4622
4623         rte_spinlock_lock(&cmng->pool_update_sl);
4624         /* Check last used pool. */
4625         if (cmng->last_pool_idx != POOL_IDX_INVALID &&
4626             flow_dv_is_counter_in_pool(cmng->pools[cmng->last_pool_idx], id)) {
4627                 pool = cmng->pools[cmng->last_pool_idx];
4628                 goto out;
4629         }
4630         /* ID out of range means no suitable pool in the container. */
4631         if (id > cmng->max_id || id < cmng->min_id)
4632                 goto out;
4633         /*
4634          * Find the pool from the end of the container, since mostly counter
4635          * ID is sequence increasing, and the last pool should be the needed
4636          * one.
4637          */
4638         i = cmng->n_valid;
4639         while (i--) {
4640                 struct mlx5_flow_counter_pool *pool_tmp = cmng->pools[i];
4641
4642                 if (flow_dv_is_counter_in_pool(pool_tmp, id)) {
4643                         pool = pool_tmp;
4644                         break;
4645                 }
4646         }
4647 out:
4648         rte_spinlock_unlock(&cmng->pool_update_sl);
4649         return pool;
4650 }
4651
4652 /**
4653  * Resize a counter container.
4654  *
4655  * @param[in] dev
4656  *   Pointer to the Ethernet device structure.
4657  *
4658  * @return
4659  *   0 on success, otherwise negative errno value and rte_errno is set.
4660  */
4661 static int
4662 flow_dv_container_resize(struct rte_eth_dev *dev)
4663 {
4664         struct mlx5_priv *priv = dev->data->dev_private;
4665         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4666         void *old_pools = cmng->pools;
4667         uint32_t resize = cmng->n + MLX5_CNT_CONTAINER_RESIZE;
4668         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4669         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4670
4671         if (!pools) {
4672                 rte_errno = ENOMEM;
4673                 return -ENOMEM;
4674         }
4675         if (old_pools)
4676                 memcpy(pools, old_pools, cmng->n *
4677                                        sizeof(struct mlx5_flow_counter_pool *));
4678         cmng->n = resize;
4679         cmng->pools = pools;
4680         if (old_pools)
4681                 mlx5_free(old_pools);
4682         return 0;
4683 }
4684
4685 /**
4686  * Query a devx flow counter.
4687  *
4688  * @param[in] dev
4689  *   Pointer to the Ethernet device structure.
4690  * @param[in] cnt
4691  *   Index to the flow counter.
4692  * @param[out] pkts
4693  *   The statistics value of packets.
4694  * @param[out] bytes
4695  *   The statistics value of bytes.
4696  *
4697  * @return
4698  *   0 on success, otherwise a negative errno value and rte_errno is set.
4699  */
4700 static inline int
4701 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4702                      uint64_t *bytes)
4703 {
4704         struct mlx5_priv *priv = dev->data->dev_private;
4705         struct mlx5_flow_counter_pool *pool = NULL;
4706         struct mlx5_flow_counter *cnt;
4707         int offset;
4708
4709         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4710         MLX5_ASSERT(pool);
4711         if (priv->sh->cmng.counter_fallback)
4712                 return mlx5_devx_cmd_flow_counter_query(cnt->dcs_when_active, 0,
4713                                         0, pkts, bytes, 0, NULL, NULL, 0);
4714         rte_spinlock_lock(&pool->sl);
4715         if (!pool->raw) {
4716                 *pkts = 0;
4717                 *bytes = 0;
4718         } else {
4719                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4720                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4721                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4722         }
4723         rte_spinlock_unlock(&pool->sl);
4724         return 0;
4725 }
4726
4727 /**
4728  * Create and initialize a new counter pool.
4729  *
4730  * @param[in] dev
4731  *   Pointer to the Ethernet device structure.
4732  * @param[out] dcs
4733  *   The devX counter handle.
4734  * @param[in] age
4735  *   Whether the pool is for counter that was allocated for aging.
4736  * @param[in/out] cont_cur
4737  *   Pointer to the container pointer, it will be update in pool resize.
4738  *
4739  * @return
4740  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4741  */
4742 static struct mlx5_flow_counter_pool *
4743 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4744                     uint32_t age)
4745 {
4746         struct mlx5_priv *priv = dev->data->dev_private;
4747         struct mlx5_flow_counter_pool *pool;
4748         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4749         bool fallback = priv->sh->cmng.counter_fallback;
4750         uint32_t size = sizeof(*pool);
4751
4752         size += MLX5_COUNTERS_PER_POOL * MLX5_CNT_SIZE;
4753         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * MLX5_AGE_SIZE);
4754         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4755         if (!pool) {
4756                 rte_errno = ENOMEM;
4757                 return NULL;
4758         }
4759         pool->raw = NULL;
4760         pool->is_aged = !!age;
4761         pool->query_gen = 0;
4762         pool->min_dcs = dcs;
4763         rte_spinlock_init(&pool->sl);
4764         rte_spinlock_init(&pool->csl);
4765         TAILQ_INIT(&pool->counters[0]);
4766         TAILQ_INIT(&pool->counters[1]);
4767         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
4768         rte_spinlock_lock(&cmng->pool_update_sl);
4769         pool->index = cmng->n_valid;
4770         if (pool->index == cmng->n && flow_dv_container_resize(dev)) {
4771                 mlx5_free(pool);
4772                 rte_spinlock_unlock(&cmng->pool_update_sl);
4773                 return NULL;
4774         }
4775         cmng->pools[pool->index] = pool;
4776         cmng->n_valid++;
4777         if (unlikely(fallback)) {
4778                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4779
4780                 if (base < cmng->min_id)
4781                         cmng->min_id = base;
4782                 if (base > cmng->max_id)
4783                         cmng->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4784                 cmng->last_pool_idx = pool->index;
4785         }
4786         rte_spinlock_unlock(&cmng->pool_update_sl);
4787         return pool;
4788 }
4789
4790 /**
4791  * Prepare a new counter and/or a new counter pool.
4792  *
4793  * @param[in] dev
4794  *   Pointer to the Ethernet device structure.
4795  * @param[out] cnt_free
4796  *   Where to put the pointer of a new counter.
4797  * @param[in] age
4798  *   Whether the pool is for counter that was allocated for aging.
4799  *
4800  * @return
4801  *   The counter pool pointer and @p cnt_free is set on success,
4802  *   NULL otherwise and rte_errno is set.
4803  */
4804 static struct mlx5_flow_counter_pool *
4805 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4806                              struct mlx5_flow_counter **cnt_free,
4807                              uint32_t age)
4808 {
4809         struct mlx5_priv *priv = dev->data->dev_private;
4810         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4811         struct mlx5_flow_counter_pool *pool;
4812         struct mlx5_counters tmp_tq;
4813         struct mlx5_devx_obj *dcs = NULL;
4814         struct mlx5_flow_counter *cnt;
4815         enum mlx5_counter_type cnt_type =
4816                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4817         bool fallback = priv->sh->cmng.counter_fallback;
4818         uint32_t i;
4819
4820         if (fallback) {
4821                 /* bulk_bitmap must be 0 for single counter allocation. */
4822                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4823                 if (!dcs)
4824                         return NULL;
4825                 pool = flow_dv_find_pool_by_id(cmng, dcs->id);
4826                 if (!pool) {
4827                         pool = flow_dv_pool_create(dev, dcs, age);
4828                         if (!pool) {
4829                                 mlx5_devx_cmd_destroy(dcs);
4830                                 return NULL;
4831                         }
4832                 }
4833                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4834                 cnt = MLX5_POOL_GET_CNT(pool, i);
4835                 cnt->pool = pool;
4836                 cnt->dcs_when_free = dcs;
4837                 *cnt_free = cnt;
4838                 return pool;
4839         }
4840         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4841         if (!dcs) {
4842                 rte_errno = ENODATA;
4843                 return NULL;
4844         }
4845         pool = flow_dv_pool_create(dev, dcs, age);
4846         if (!pool) {
4847                 mlx5_devx_cmd_destroy(dcs);
4848                 return NULL;
4849         }
4850         TAILQ_INIT(&tmp_tq);
4851         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4852                 cnt = MLX5_POOL_GET_CNT(pool, i);
4853                 cnt->pool = pool;
4854                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4855         }
4856         rte_spinlock_lock(&cmng->csl[cnt_type]);
4857         TAILQ_CONCAT(&cmng->counters[cnt_type], &tmp_tq, next);
4858         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4859         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4860         (*cnt_free)->pool = pool;
4861         return pool;
4862 }
4863
4864 /**
4865  * Allocate a flow counter.
4866  *
4867  * @param[in] dev
4868  *   Pointer to the Ethernet device structure.
4869  * @param[in] age
4870  *   Whether the counter was allocated for aging.
4871  *
4872  * @return
4873  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4874  */
4875 static uint32_t
4876 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age)
4877 {
4878         struct mlx5_priv *priv = dev->data->dev_private;
4879         struct mlx5_flow_counter_pool *pool = NULL;
4880         struct mlx5_flow_counter *cnt_free = NULL;
4881         bool fallback = priv->sh->cmng.counter_fallback;
4882         struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng;
4883         enum mlx5_counter_type cnt_type =
4884                         age ? MLX5_COUNTER_TYPE_AGE : MLX5_COUNTER_TYPE_ORIGIN;
4885         uint32_t cnt_idx;
4886
4887         if (!priv->config.devx) {
4888                 rte_errno = ENOTSUP;
4889                 return 0;
4890         }
4891         /* Get free counters from container. */
4892         rte_spinlock_lock(&cmng->csl[cnt_type]);
4893         cnt_free = TAILQ_FIRST(&cmng->counters[cnt_type]);
4894         if (cnt_free)
4895                 TAILQ_REMOVE(&cmng->counters[cnt_type], cnt_free, next);
4896         rte_spinlock_unlock(&cmng->csl[cnt_type]);
4897         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free, age))
4898                 goto err;
4899         pool = cnt_free->pool;
4900         if (fallback)
4901                 cnt_free->dcs_when_active = cnt_free->dcs_when_free;
4902         /* Create a DV counter action only in the first time usage. */
4903         if (!cnt_free->action) {
4904                 uint16_t offset;
4905                 struct mlx5_devx_obj *dcs;
4906                 int ret;
4907
4908                 if (!fallback) {
4909                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4910                         dcs = pool->min_dcs;
4911                 } else {
4912                         offset = 0;
4913                         dcs = cnt_free->dcs_when_free;
4914                 }
4915                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4916                                                             &cnt_free->action);
4917                 if (ret) {
4918                         rte_errno = errno;
4919                         goto err;
4920                 }
4921         }
4922         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4923                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4924         /* Update the counter reset values. */
4925         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4926                                  &cnt_free->bytes))
4927                 goto err;
4928         if (!fallback && !priv->sh->cmng.query_thread_on)
4929                 /* Start the asynchronous batch query by the host thread. */
4930                 mlx5_set_query_alarm(priv->sh);
4931         return cnt_idx;
4932 err:
4933         if (cnt_free) {
4934                 cnt_free->pool = pool;
4935                 if (fallback)
4936                         cnt_free->dcs_when_free = cnt_free->dcs_when_active;
4937                 rte_spinlock_lock(&cmng->csl[cnt_type]);
4938                 TAILQ_INSERT_TAIL(&cmng->counters[cnt_type], cnt_free, next);
4939                 rte_spinlock_unlock(&cmng->csl[cnt_type]);
4940         }
4941         return 0;
4942 }
4943
4944 /**
4945  * Allocate a shared flow counter.
4946  *
4947  * @param[in] ctx
4948  *   Pointer to the shared counter configuration.
4949  * @param[in] data
4950  *   Pointer to save the allocated counter index.
4951  *
4952  * @return
4953  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4954  */
4955
4956 static int32_t
4957 flow_dv_counter_alloc_shared_cb(void *ctx, union mlx5_l3t_data *data)
4958 {
4959         struct mlx5_shared_counter_conf *conf = ctx;
4960         struct rte_eth_dev *dev = conf->dev;
4961         struct mlx5_flow_counter *cnt;
4962
4963         data->dword = flow_dv_counter_alloc(dev, 0);
4964         data->dword |= MLX5_CNT_SHARED_OFFSET;
4965         cnt = flow_dv_counter_get_by_idx(dev, data->dword, NULL);
4966         cnt->shared_info.id = conf->id;
4967         return 0;
4968 }
4969
4970 /**
4971  * Get a shared flow counter.
4972  *
4973  * @param[in] dev
4974  *   Pointer to the Ethernet device structure.
4975  * @param[in] id
4976  *   Counter identifier.
4977  *
4978  * @return
4979  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4980  */
4981 static uint32_t
4982 flow_dv_counter_get_shared(struct rte_eth_dev *dev, uint32_t id)
4983 {
4984         struct mlx5_priv *priv = dev->data->dev_private;
4985         struct mlx5_shared_counter_conf conf = {
4986                 .dev = dev,
4987                 .id = id,
4988         };
4989         union mlx5_l3t_data data = {
4990                 .dword = 0,
4991         };
4992
4993         mlx5_l3t_prepare_entry(priv->sh->cnt_id_tbl, id, &data,
4994                                flow_dv_counter_alloc_shared_cb, &conf);
4995         return data.dword;
4996 }
4997
4998 /**
4999  * Get age param from counter index.
5000  *
5001  * @param[in] dev
5002  *   Pointer to the Ethernet device structure.
5003  * @param[in] counter
5004  *   Index to the counter handler.
5005  *
5006  * @return
5007  *   The aging parameter specified for the counter index.
5008  */
5009 static struct mlx5_age_param*
5010 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
5011                                 uint32_t counter)
5012 {
5013         struct mlx5_flow_counter *cnt;
5014         struct mlx5_flow_counter_pool *pool = NULL;
5015
5016         flow_dv_counter_get_by_idx(dev, counter, &pool);
5017         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
5018         cnt = MLX5_POOL_GET_CNT(pool, counter);
5019         return MLX5_CNT_TO_AGE(cnt);
5020 }
5021
5022 /**
5023  * Remove a flow counter from aged counter list.
5024  *
5025  * @param[in] dev
5026  *   Pointer to the Ethernet device structure.
5027  * @param[in] counter
5028  *   Index to the counter handler.
5029  * @param[in] cnt
5030  *   Pointer to the counter handler.
5031  */
5032 static void
5033 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
5034                                 uint32_t counter, struct mlx5_flow_counter *cnt)
5035 {
5036         struct mlx5_age_info *age_info;
5037         struct mlx5_age_param *age_param;
5038         struct mlx5_priv *priv = dev->data->dev_private;
5039         uint16_t expected = AGE_CANDIDATE;
5040
5041         age_info = GET_PORT_AGE_INFO(priv);
5042         age_param = flow_dv_counter_idx_get_age(dev, counter);
5043         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
5044                                          AGE_FREE, false, __ATOMIC_RELAXED,
5045                                          __ATOMIC_RELAXED)) {
5046                 /**
5047                  * We need the lock even it is age timeout,
5048                  * since counter may still in process.
5049                  */
5050                 rte_spinlock_lock(&age_info->aged_sl);
5051                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
5052                 rte_spinlock_unlock(&age_info->aged_sl);
5053                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
5054         }
5055 }
5056
5057 /**
5058  * Release a flow counter.
5059  *
5060  * @param[in] dev
5061  *   Pointer to the Ethernet device structure.
5062  * @param[in] counter
5063  *   Index to the counter handler.
5064  */
5065 static void
5066 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter)
5067 {
5068         struct mlx5_priv *priv = dev->data->dev_private;
5069         struct mlx5_flow_counter_pool *pool = NULL;
5070         struct mlx5_flow_counter *cnt;
5071         enum mlx5_counter_type cnt_type;
5072
5073         if (!counter)
5074                 return;
5075         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
5076         MLX5_ASSERT(pool);
5077         if (IS_SHARED_CNT(counter) &&
5078             mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id))
5079                 return;
5080         if (pool->is_aged)
5081                 flow_dv_counter_remove_from_age(dev, counter, cnt);
5082         cnt->pool = pool;
5083         /*
5084          * Put the counter back to list to be updated in none fallback mode.
5085          * Currently, we are using two list alternately, while one is in query,
5086          * add the freed counter to the other list based on the pool query_gen
5087          * value. After query finishes, add counter the list to the global
5088          * container counter list. The list changes while query starts. In
5089          * this case, lock will not be needed as query callback and release
5090          * function both operate with the different list.
5091          *
5092          */
5093         if (!priv->sh->cmng.counter_fallback) {
5094                 rte_spinlock_lock(&pool->csl);
5095                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
5096                 rte_spinlock_unlock(&pool->csl);
5097         } else {
5098                 cnt->dcs_when_free = cnt->dcs_when_active;
5099                 cnt_type = pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
5100                                            MLX5_COUNTER_TYPE_ORIGIN;
5101                 rte_spinlock_lock(&priv->sh->cmng.csl[cnt_type]);
5102                 TAILQ_INSERT_TAIL(&priv->sh->cmng.counters[cnt_type],
5103                                   cnt, next);
5104                 rte_spinlock_unlock(&priv->sh->cmng.csl[cnt_type]);
5105         }
5106 }
5107
5108 /**
5109  * Verify the @p attributes will be correctly understood by the NIC and store
5110  * them in the @p flow if everything is correct.
5111  *
5112  * @param[in] dev
5113  *   Pointer to dev struct.
5114  * @param[in] attributes
5115  *   Pointer to flow attributes
5116  * @param[in] external
5117  *   This flow rule is created by request external to PMD.
5118  * @param[out] error
5119  *   Pointer to error structure.
5120  *
5121  * @return
5122  *   - 0 on success and non root table.
5123  *   - 1 on success and root table.
5124  *   - a negative errno value otherwise and rte_errno is set.
5125  */
5126 static int
5127 flow_dv_validate_attributes(struct rte_eth_dev *dev,
5128                             const struct mlx5_flow_tunnel *tunnel,
5129                             const struct rte_flow_attr *attributes,
5130                             const struct flow_grp_info *grp_info,
5131                             struct rte_flow_error *error)
5132 {
5133         struct mlx5_priv *priv = dev->data->dev_private;
5134         uint32_t priority_max = priv->config.flow_prio - 1;
5135         int ret = 0;
5136
5137 #ifndef HAVE_MLX5DV_DR
5138         RTE_SET_USED(tunnel);
5139         RTE_SET_USED(grp_info);
5140         if (attributes->group)
5141                 return rte_flow_error_set(error, ENOTSUP,
5142                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5143                                           NULL,
5144                                           "groups are not supported");
5145 #else
5146         uint32_t table = 0;
5147
5148         ret = mlx5_flow_group_to_table(dev, tunnel, attributes->group, &table,
5149                                        grp_info, error);
5150         if (ret)
5151                 return ret;
5152         if (!table)
5153                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
5154 #endif
5155         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
5156             attributes->priority >= priority_max)
5157                 return rte_flow_error_set(error, ENOTSUP,
5158                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
5159                                           NULL,
5160                                           "priority out of range");
5161         if (attributes->transfer) {
5162                 if (!priv->config.dv_esw_en)
5163                         return rte_flow_error_set
5164                                 (error, ENOTSUP,
5165                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5166                                  "E-Switch dr is not supported");
5167                 if (!(priv->representor || priv->master))
5168                         return rte_flow_error_set
5169                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5170                                  NULL, "E-Switch configuration can only be"
5171                                  " done by a master or a representor device");
5172                 if (attributes->egress)
5173                         return rte_flow_error_set
5174                                 (error, ENOTSUP,
5175                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
5176                                  "egress is not supported");
5177         }
5178         if (!(attributes->egress ^ attributes->ingress))
5179                 return rte_flow_error_set(error, ENOTSUP,
5180                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
5181                                           "must specify exactly one of "
5182                                           "ingress or egress");
5183         return ret;
5184 }
5185
5186 /**
5187  * Internal validation function. For validating both actions and items.
5188  *
5189  * @param[in] dev
5190  *   Pointer to the rte_eth_dev structure.
5191  * @param[in] attr
5192  *   Pointer to the flow attributes.
5193  * @param[in] items
5194  *   Pointer to the list of items.
5195  * @param[in] actions
5196  *   Pointer to the list of actions.
5197  * @param[in] external
5198  *   This flow rule is created by request external to PMD.
5199  * @param[in] hairpin
5200  *   Number of hairpin TX actions, 0 means classic flow.
5201  * @param[out] error
5202  *   Pointer to the error structure.
5203  *
5204  * @return
5205  *   0 on success, a negative errno value otherwise and rte_errno is set.
5206  */
5207 static int
5208 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
5209                  const struct rte_flow_item items[],
5210                  const struct rte_flow_action actions[],
5211                  bool external, int hairpin, struct rte_flow_error *error)
5212 {
5213         int ret;
5214         uint64_t action_flags = 0;
5215         uint64_t item_flags = 0;
5216         uint64_t last_item = 0;
5217         uint8_t next_protocol = 0xff;
5218         uint16_t ether_type = 0;
5219         int actions_n = 0;
5220         uint8_t item_ipv6_proto = 0;
5221         const struct rte_flow_item *gre_item = NULL;
5222         const struct rte_flow_action_raw_decap *decap;
5223         const struct rte_flow_action_raw_encap *encap;
5224         const struct rte_flow_action_rss *rss;
5225         const struct rte_flow_item_tcp nic_tcp_mask = {
5226                 .hdr = {
5227                         .tcp_flags = 0xFF,
5228                         .src_port = RTE_BE16(UINT16_MAX),
5229                         .dst_port = RTE_BE16(UINT16_MAX),
5230                 }
5231         };
5232         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
5233                 .hdr = {
5234                         .src_addr =
5235                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5236                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5237                         .dst_addr =
5238                         "\xff\xff\xff\xff\xff\xff\xff\xff"
5239                         "\xff\xff\xff\xff\xff\xff\xff\xff",
5240                         .vtc_flow = RTE_BE32(0xffffffff),
5241                         .proto = 0xff,
5242                         .hop_limits = 0xff,
5243                 },
5244                 .has_frag_ext = 1,
5245         };
5246         const struct rte_flow_item_ecpri nic_ecpri_mask = {
5247                 .hdr = {
5248                         .common = {
5249                                 .u32 =
5250                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
5251                                         .type = 0xFF,
5252                                         }).u32),
5253                         },
5254                         .dummy[0] = 0xffffffff,
5255                 },
5256         };
5257         struct mlx5_priv *priv = dev->data->dev_private;
5258         struct mlx5_dev_config *dev_conf = &priv->config;
5259         uint16_t queue_index = 0xFFFF;
5260         const struct rte_flow_item_vlan *vlan_m = NULL;
5261         int16_t rw_act_num = 0;
5262         uint64_t is_root;
5263         const struct mlx5_flow_tunnel *tunnel;
5264         struct flow_grp_info grp_info = {
5265                 .external = !!external,
5266                 .transfer = !!attr->transfer,
5267                 .fdb_def_rule = !!priv->fdb_def_rule,
5268         };
5269         const struct rte_eth_hairpin_conf *conf;
5270
5271         if (items == NULL)
5272                 return -1;
5273         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
5274                 tunnel = flow_items_to_tunnel(items);
5275                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_MATCH |
5276                                 MLX5_FLOW_ACTION_DECAP;
5277         } else if (is_flow_tunnel_steer_rule(dev, attr, items, actions)) {
5278                 tunnel = flow_actions_to_tunnel(actions);
5279                 action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
5280         } else {
5281                 tunnel = NULL;
5282         }
5283         if (tunnel && priv->representor)
5284                 return rte_flow_error_set(error, ENOTSUP,
5285                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5286                                           "decap not supported "
5287                                           "for VF representor");
5288         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
5289                                 (dev, tunnel, attr, items, actions);
5290         ret = flow_dv_validate_attributes(dev, tunnel, attr, &grp_info, error);
5291         if (ret < 0)
5292                 return ret;
5293         is_root = (uint64_t)ret;
5294         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5295                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5296                 int type = items->type;
5297
5298                 if (!mlx5_flow_os_item_supported(type))
5299                         return rte_flow_error_set(error, ENOTSUP,
5300                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5301                                                   NULL, "item not supported");
5302                 switch (type) {
5303                 case MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL:
5304                         if (items[0].type != (typeof(items[0].type))
5305                                                 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL)
5306                                 return rte_flow_error_set
5307                                                 (error, EINVAL,
5308                                                 RTE_FLOW_ERROR_TYPE_ITEM,
5309                                                 NULL, "MLX5 private items "
5310                                                 "must be the first");
5311                         break;
5312                 case RTE_FLOW_ITEM_TYPE_VOID:
5313                         break;
5314                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5315                         ret = flow_dv_validate_item_port_id
5316                                         (dev, items, attr, item_flags, error);
5317                         if (ret < 0)
5318                                 return ret;
5319                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5320                         break;
5321                 case RTE_FLOW_ITEM_TYPE_ETH:
5322                         ret = mlx5_flow_validate_item_eth(items, item_flags,
5323                                                           true, error);
5324                         if (ret < 0)
5325                                 return ret;
5326                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5327                                              MLX5_FLOW_LAYER_OUTER_L2;
5328                         if (items->mask != NULL && items->spec != NULL) {
5329                                 ether_type =
5330                                         ((const struct rte_flow_item_eth *)
5331                                          items->spec)->type;
5332                                 ether_type &=
5333                                         ((const struct rte_flow_item_eth *)
5334                                          items->mask)->type;
5335                                 ether_type = rte_be_to_cpu_16(ether_type);
5336                         } else {
5337                                 ether_type = 0;
5338                         }
5339                         break;
5340                 case RTE_FLOW_ITEM_TYPE_VLAN:
5341                         ret = flow_dv_validate_item_vlan(items, item_flags,
5342                                                          dev, error);
5343                         if (ret < 0)
5344                                 return ret;
5345                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
5346                                              MLX5_FLOW_LAYER_OUTER_VLAN;
5347                         if (items->mask != NULL && items->spec != NULL) {
5348                                 ether_type =
5349                                         ((const struct rte_flow_item_vlan *)
5350                                          items->spec)->inner_type;
5351                                 ether_type &=
5352                                         ((const struct rte_flow_item_vlan *)
5353                                          items->mask)->inner_type;
5354                                 ether_type = rte_be_to_cpu_16(ether_type);
5355                         } else {
5356                                 ether_type = 0;
5357                         }
5358                         /* Store outer VLAN mask for of_push_vlan action. */
5359                         if (!tunnel)
5360                                 vlan_m = items->mask;
5361                         break;
5362                 case RTE_FLOW_ITEM_TYPE_IPV4:
5363                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5364                                                   &item_flags, &tunnel);
5365                         ret = flow_dv_validate_item_ipv4(items, item_flags,
5366                                                          last_item, ether_type,
5367                                                          error);
5368                         if (ret < 0)
5369                                 return ret;
5370                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5371                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5372                         if (items->mask != NULL &&
5373                             ((const struct rte_flow_item_ipv4 *)
5374                              items->mask)->hdr.next_proto_id) {
5375                                 next_protocol =
5376                                         ((const struct rte_flow_item_ipv4 *)
5377                                          (items->spec))->hdr.next_proto_id;
5378                                 next_protocol &=
5379                                         ((const struct rte_flow_item_ipv4 *)
5380                                          (items->mask))->hdr.next_proto_id;
5381                         } else {
5382                                 /* Reset for inner layer. */
5383                                 next_protocol = 0xff;
5384                         }
5385                         break;
5386                 case RTE_FLOW_ITEM_TYPE_IPV6:
5387                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5388                                                   &item_flags, &tunnel);
5389                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5390                                                            last_item,
5391                                                            ether_type,
5392                                                            &nic_ipv6_mask,
5393                                                            error);
5394                         if (ret < 0)
5395                                 return ret;
5396                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5397                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5398                         if (items->mask != NULL &&
5399                             ((const struct rte_flow_item_ipv6 *)
5400                              items->mask)->hdr.proto) {
5401                                 item_ipv6_proto =
5402                                         ((const struct rte_flow_item_ipv6 *)
5403                                          items->spec)->hdr.proto;
5404                                 next_protocol =
5405                                         ((const struct rte_flow_item_ipv6 *)
5406                                          items->spec)->hdr.proto;
5407                                 next_protocol &=
5408                                         ((const struct rte_flow_item_ipv6 *)
5409                                          items->mask)->hdr.proto;
5410                         } else {
5411                                 /* Reset for inner layer. */
5412                                 next_protocol = 0xff;
5413                         }
5414                         break;
5415                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
5416                         ret = flow_dv_validate_item_ipv6_frag_ext(items,
5417                                                                   item_flags,
5418                                                                   error);
5419                         if (ret < 0)
5420                                 return ret;
5421                         last_item = tunnel ?
5422                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
5423                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
5424                         if (items->mask != NULL &&
5425                             ((const struct rte_flow_item_ipv6_frag_ext *)
5426                              items->mask)->hdr.next_header) {
5427                                 next_protocol =
5428                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5429                                  items->spec)->hdr.next_header;
5430                                 next_protocol &=
5431                                 ((const struct rte_flow_item_ipv6_frag_ext *)
5432                                  items->mask)->hdr.next_header;
5433                         } else {
5434                                 /* Reset for inner layer. */
5435                                 next_protocol = 0xff;
5436                         }
5437                         break;
5438                 case RTE_FLOW_ITEM_TYPE_TCP:
5439                         ret = mlx5_flow_validate_item_tcp
5440                                                 (items, item_flags,
5441                                                  next_protocol,
5442                                                  &nic_tcp_mask,
5443                                                  error);
5444                         if (ret < 0)
5445                                 return ret;
5446                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5447                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5448                         break;
5449                 case RTE_FLOW_ITEM_TYPE_UDP:
5450                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5451                                                           next_protocol,
5452                                                           error);
5453                         if (ret < 0)
5454                                 return ret;
5455                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5456                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5457                         break;
5458                 case RTE_FLOW_ITEM_TYPE_GRE:
5459                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5460                                                           next_protocol, error);
5461                         if (ret < 0)
5462                                 return ret;
5463                         gre_item = items;
5464                         last_item = MLX5_FLOW_LAYER_GRE;
5465                         break;
5466                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5467                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5468                                                             next_protocol,
5469                                                             error);
5470                         if (ret < 0)
5471                                 return ret;
5472                         last_item = MLX5_FLOW_LAYER_NVGRE;
5473                         break;
5474                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5475                         ret = mlx5_flow_validate_item_gre_key
5476                                 (items, item_flags, gre_item, error);
5477                         if (ret < 0)
5478                                 return ret;
5479                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5480                         break;
5481                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5482                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5483                                                             error);
5484                         if (ret < 0)
5485                                 return ret;
5486                         last_item = MLX5_FLOW_LAYER_VXLAN;
5487                         break;
5488                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5489                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5490                                                                 item_flags, dev,
5491                                                                 error);
5492                         if (ret < 0)
5493                                 return ret;
5494                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5495                         break;
5496                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5497                         ret = mlx5_flow_validate_item_geneve(items,
5498                                                              item_flags, dev,
5499                                                              error);
5500                         if (ret < 0)
5501                                 return ret;
5502                         last_item = MLX5_FLOW_LAYER_GENEVE;
5503                         break;
5504                 case RTE_FLOW_ITEM_TYPE_MPLS:
5505                         ret = mlx5_flow_validate_item_mpls(dev, items,
5506                                                            item_flags,
5507                                                            last_item, error);
5508                         if (ret < 0)
5509                                 return ret;
5510                         last_item = MLX5_FLOW_LAYER_MPLS;
5511                         break;
5512
5513                 case RTE_FLOW_ITEM_TYPE_MARK:
5514                         ret = flow_dv_validate_item_mark(dev, items, attr,
5515                                                          error);
5516                         if (ret < 0)
5517                                 return ret;
5518                         last_item = MLX5_FLOW_ITEM_MARK;
5519                         break;
5520                 case RTE_FLOW_ITEM_TYPE_META:
5521                         ret = flow_dv_validate_item_meta(dev, items, attr,
5522                                                          error);
5523                         if (ret < 0)
5524                                 return ret;
5525                         last_item = MLX5_FLOW_ITEM_METADATA;
5526                         break;
5527                 case RTE_FLOW_ITEM_TYPE_ICMP:
5528                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5529                                                            next_protocol,
5530                                                            error);
5531                         if (ret < 0)
5532                                 return ret;
5533                         last_item = MLX5_FLOW_LAYER_ICMP;
5534                         break;
5535                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5536                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5537                                                             next_protocol,
5538                                                             error);
5539                         if (ret < 0)
5540                                 return ret;
5541                         item_ipv6_proto = IPPROTO_ICMPV6;
5542                         last_item = MLX5_FLOW_LAYER_ICMP6;
5543                         break;
5544                 case RTE_FLOW_ITEM_TYPE_TAG:
5545                         ret = flow_dv_validate_item_tag(dev, items,
5546                                                         attr, error);
5547                         if (ret < 0)
5548                                 return ret;
5549                         last_item = MLX5_FLOW_ITEM_TAG;
5550                         break;
5551                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5552                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5553                         break;
5554                 case RTE_FLOW_ITEM_TYPE_GTP:
5555                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5556                                                         error);
5557                         if (ret < 0)
5558                                 return ret;
5559                         last_item = MLX5_FLOW_LAYER_GTP;
5560                         break;
5561                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5562                         /* Capacity will be checked in the translate stage. */
5563                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5564                                                             last_item,
5565                                                             ether_type,
5566                                                             &nic_ecpri_mask,
5567                                                             error);
5568                         if (ret < 0)
5569                                 return ret;
5570                         last_item = MLX5_FLOW_LAYER_ECPRI;
5571                         break;
5572                 default:
5573                         return rte_flow_error_set(error, ENOTSUP,
5574                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5575                                                   NULL, "item not supported");
5576                 }
5577                 item_flags |= last_item;
5578         }
5579         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5580                 int type = actions->type;
5581
5582                 if (!mlx5_flow_os_action_supported(type))
5583                         return rte_flow_error_set(error, ENOTSUP,
5584                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5585                                                   actions,
5586                                                   "action not supported");
5587                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5588                         return rte_flow_error_set(error, ENOTSUP,
5589                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5590                                                   actions, "too many actions");
5591                 switch (type) {
5592                 case RTE_FLOW_ACTION_TYPE_VOID:
5593                         break;
5594                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5595                         ret = flow_dv_validate_action_port_id(dev,
5596                                                               action_flags,
5597                                                               actions,
5598                                                               attr,
5599                                                               error);
5600                         if (ret)
5601                                 return ret;
5602                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5603                         ++actions_n;
5604                         break;
5605                 case RTE_FLOW_ACTION_TYPE_FLAG:
5606                         ret = flow_dv_validate_action_flag(dev, action_flags,
5607                                                            attr, error);
5608                         if (ret < 0)
5609                                 return ret;
5610                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5611                                 /* Count all modify-header actions as one. */
5612                                 if (!(action_flags &
5613                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5614                                         ++actions_n;
5615                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5616                                                 MLX5_FLOW_ACTION_MARK_EXT;
5617                         } else {
5618                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5619                                 ++actions_n;
5620                         }
5621                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5622                         break;
5623                 case RTE_FLOW_ACTION_TYPE_MARK:
5624                         ret = flow_dv_validate_action_mark(dev, actions,
5625                                                            action_flags,
5626                                                            attr, error);
5627                         if (ret < 0)
5628                                 return ret;
5629                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5630                                 /* Count all modify-header actions as one. */
5631                                 if (!(action_flags &
5632                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5633                                         ++actions_n;
5634                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5635                                                 MLX5_FLOW_ACTION_MARK_EXT;
5636                         } else {
5637                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5638                                 ++actions_n;
5639                         }
5640                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5641                         break;
5642                 case RTE_FLOW_ACTION_TYPE_SET_META:
5643                         ret = flow_dv_validate_action_set_meta(dev, actions,
5644                                                                action_flags,
5645                                                                attr, error);
5646                         if (ret < 0)
5647                                 return ret;
5648                         /* Count all modify-header actions as one action. */
5649                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5650                                 ++actions_n;
5651                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5652                         rw_act_num += MLX5_ACT_NUM_SET_META;
5653                         break;
5654                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5655                         ret = flow_dv_validate_action_set_tag(dev, actions,
5656                                                               action_flags,
5657                                                               attr, error);
5658                         if (ret < 0)
5659                                 return ret;
5660                         /* Count all modify-header actions as one action. */
5661                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5662                                 ++actions_n;
5663                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5664                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5665                         break;
5666                 case RTE_FLOW_ACTION_TYPE_DROP:
5667                         ret = mlx5_flow_validate_action_drop(action_flags,
5668                                                              attr, error);
5669                         if (ret < 0)
5670                                 return ret;
5671                         action_flags |= MLX5_FLOW_ACTION_DROP;
5672                         ++actions_n;
5673                         break;
5674                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5675                         ret = mlx5_flow_validate_action_queue(actions,
5676                                                               action_flags, dev,
5677                                                               attr, error);
5678                         if (ret < 0)
5679                                 return ret;
5680                         queue_index = ((const struct rte_flow_action_queue *)
5681                                                         (actions->conf))->index;
5682                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5683                         ++actions_n;
5684                         break;
5685                 case RTE_FLOW_ACTION_TYPE_RSS:
5686                         rss = actions->conf;
5687                         ret = mlx5_flow_validate_action_rss(actions,
5688                                                             action_flags, dev,
5689                                                             attr, item_flags,
5690                                                             error);
5691                         if (ret < 0)
5692                                 return ret;
5693                         if (rss != NULL && rss->queue_num)
5694                                 queue_index = rss->queue[0];
5695                         action_flags |= MLX5_FLOW_ACTION_RSS;
5696                         ++actions_n;
5697                         break;
5698                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5699                         ret =
5700                         mlx5_flow_validate_action_default_miss(action_flags,
5701                                         attr, error);
5702                         if (ret < 0)
5703                                 return ret;
5704                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5705                         ++actions_n;
5706                         break;
5707                 case RTE_FLOW_ACTION_TYPE_COUNT:
5708                         ret = flow_dv_validate_action_count(dev, error);
5709                         if (ret < 0)
5710                                 return ret;
5711                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5712                         ++actions_n;
5713                         break;
5714                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5715                         if (flow_dv_validate_action_pop_vlan(dev,
5716                                                              action_flags,
5717                                                              actions,
5718                                                              item_flags, attr,
5719                                                              error))
5720                                 return -rte_errno;
5721                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5722                         ++actions_n;
5723                         break;
5724                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5725                         ret = flow_dv_validate_action_push_vlan(dev,
5726                                                                 action_flags,
5727                                                                 vlan_m,
5728                                                                 actions, attr,
5729                                                                 error);
5730                         if (ret < 0)
5731                                 return ret;
5732                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5733                         ++actions_n;
5734                         break;
5735                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5736                         ret = flow_dv_validate_action_set_vlan_pcp
5737                                                 (action_flags, actions, error);
5738                         if (ret < 0)
5739                                 return ret;
5740                         /* Count PCP with push_vlan command. */
5741                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5742                         break;
5743                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5744                         ret = flow_dv_validate_action_set_vlan_vid
5745                                                 (item_flags, action_flags,
5746                                                  actions, error);
5747                         if (ret < 0)
5748                                 return ret;
5749                         /* Count VID with push_vlan command. */
5750                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5751                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5752                         break;
5753                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5754                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5755                         ret = flow_dv_validate_action_l2_encap(dev,
5756                                                                action_flags,
5757                                                                actions, attr,
5758                                                                error);
5759                         if (ret < 0)
5760                                 return ret;
5761                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5762                         ++actions_n;
5763                         break;
5764                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5765                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5766                         ret = flow_dv_validate_action_decap(dev, action_flags,
5767                                                             attr, error);
5768                         if (ret < 0)
5769                                 return ret;
5770                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5771                         ++actions_n;
5772                         break;
5773                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5774                         ret = flow_dv_validate_action_raw_encap_decap
5775                                 (dev, NULL, actions->conf, attr, &action_flags,
5776                                  &actions_n, error);
5777                         if (ret < 0)
5778                                 return ret;
5779                         break;
5780                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5781                         decap = actions->conf;
5782                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5783                                 ;
5784                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5785                                 encap = NULL;
5786                                 actions--;
5787                         } else {
5788                                 encap = actions->conf;
5789                         }
5790                         ret = flow_dv_validate_action_raw_encap_decap
5791                                            (dev,
5792                                             decap ? decap : &empty_decap, encap,
5793                                             attr, &action_flags, &actions_n,
5794                                             error);
5795                         if (ret < 0)
5796                                 return ret;
5797                         break;
5798                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5799                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5800                         ret = flow_dv_validate_action_modify_mac(action_flags,
5801                                                                  actions,
5802                                                                  item_flags,
5803                                                                  error);
5804                         if (ret < 0)
5805                                 return ret;
5806                         /* Count all modify-header actions as one action. */
5807                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5808                                 ++actions_n;
5809                         action_flags |= actions->type ==
5810                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5811                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5812                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5813                         /*
5814                          * Even if the source and destination MAC addresses have
5815                          * overlap in the header with 4B alignment, the convert
5816                          * function will handle them separately and 4 SW actions
5817                          * will be created. And 2 actions will be added each
5818                          * time no matter how many bytes of address will be set.
5819                          */
5820                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5821                         break;
5822                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5823                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5824                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5825                                                                   actions,
5826                                                                   item_flags,
5827                                                                   error);
5828                         if (ret < 0)
5829                                 return ret;
5830                         /* Count all modify-header actions as one action. */
5831                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5832                                 ++actions_n;
5833                         action_flags |= actions->type ==
5834                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5835                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5836                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5837                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5838                         break;
5839                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5840                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5841                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5842                                                                   actions,
5843                                                                   item_flags,
5844                                                                   error);
5845                         if (ret < 0)
5846                                 return ret;
5847                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5848                                 return rte_flow_error_set(error, ENOTSUP,
5849                                         RTE_FLOW_ERROR_TYPE_ACTION,
5850                                         actions,
5851                                         "Can't change header "
5852                                         "with ICMPv6 proto");
5853                         /* Count all modify-header actions as one action. */
5854                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5855                                 ++actions_n;
5856                         action_flags |= actions->type ==
5857                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5858                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5859                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5860                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5861                         break;
5862                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5863                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5864                         ret = flow_dv_validate_action_modify_tp(action_flags,
5865                                                                 actions,
5866                                                                 item_flags,
5867                                                                 error);
5868                         if (ret < 0)
5869                                 return ret;
5870                         /* Count all modify-header actions as one action. */
5871                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5872                                 ++actions_n;
5873                         action_flags |= actions->type ==
5874                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5875                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5876                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5877                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5878                         break;
5879                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5880                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5881                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5882                                                                  actions,
5883                                                                  item_flags,
5884                                                                  error);
5885                         if (ret < 0)
5886                                 return ret;
5887                         /* Count all modify-header actions as one action. */
5888                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5889                                 ++actions_n;
5890                         action_flags |= actions->type ==
5891                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5892                                                 MLX5_FLOW_ACTION_SET_TTL :
5893                                                 MLX5_FLOW_ACTION_DEC_TTL;
5894                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5895                         break;
5896                 case RTE_FLOW_ACTION_TYPE_JUMP:
5897                         ret = flow_dv_validate_action_jump(dev, tunnel, actions,
5898                                                            action_flags,
5899                                                            attr, external,
5900                                                            error);
5901                         if (ret)
5902                                 return ret;
5903                         ++actions_n;
5904                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5905                         break;
5906                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5907                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5908                         ret = flow_dv_validate_action_modify_tcp_seq
5909                                                                 (action_flags,
5910                                                                  actions,
5911                                                                  item_flags,
5912                                                                  error);
5913                         if (ret < 0)
5914                                 return ret;
5915                         /* Count all modify-header actions as one action. */
5916                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5917                                 ++actions_n;
5918                         action_flags |= actions->type ==
5919                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5920                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5921                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5922                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5923                         break;
5924                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5925                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5926                         ret = flow_dv_validate_action_modify_tcp_ack
5927                                                                 (action_flags,
5928                                                                  actions,
5929                                                                  item_flags,
5930                                                                  error);
5931                         if (ret < 0)
5932                                 return ret;
5933                         /* Count all modify-header actions as one action. */
5934                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5935                                 ++actions_n;
5936                         action_flags |= actions->type ==
5937                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5938                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5939                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5940                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5941                         break;
5942                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5943                         break;
5944                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5945                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5946                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5947                         break;
5948                 case RTE_FLOW_ACTION_TYPE_METER:
5949                         ret = mlx5_flow_validate_action_meter(dev,
5950                                                               action_flags,
5951                                                               actions, attr,
5952                                                               error);
5953                         if (ret < 0)
5954                                 return ret;
5955                         action_flags |= MLX5_FLOW_ACTION_METER;
5956                         ++actions_n;
5957                         /* Meter action will add one more TAG action. */
5958                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5959                         break;
5960                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
5961                         if (!attr->transfer && !attr->group)
5962                                 return rte_flow_error_set(error, ENOTSUP,
5963                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5964                                                                            NULL,
5965                           "Shared ASO age action is not supported for group 0");
5966                         action_flags |= MLX5_FLOW_ACTION_AGE;
5967                         ++actions_n;
5968                         break;
5969                 case RTE_FLOW_ACTION_TYPE_AGE:
5970                         ret = flow_dv_validate_action_age(action_flags,
5971                                                           actions, dev,
5972                                                           error);
5973                         if (ret < 0)
5974                                 return ret;
5975                         action_flags |= MLX5_FLOW_ACTION_AGE;
5976                         ++actions_n;
5977                         break;
5978                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5979                         ret = flow_dv_validate_action_modify_ipv4_dscp
5980                                                          (action_flags,
5981                                                           actions,
5982                                                           item_flags,
5983                                                           error);
5984                         if (ret < 0)
5985                                 return ret;
5986                         /* Count all modify-header actions as one action. */
5987                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5988                                 ++actions_n;
5989                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5990                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5991                         break;
5992                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5993                         ret = flow_dv_validate_action_modify_ipv6_dscp
5994                                                                 (action_flags,
5995                                                                  actions,
5996                                                                  item_flags,
5997                                                                  error);
5998                         if (ret < 0)
5999                                 return ret;
6000                         /* Count all modify-header actions as one action. */
6001                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
6002                                 ++actions_n;
6003                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
6004                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
6005                         break;
6006                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
6007                         ret = flow_dv_validate_action_sample(action_flags,
6008                                                              actions, dev,
6009                                                              attr, error);
6010                         if (ret < 0)
6011                                 return ret;
6012                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
6013                         ++actions_n;
6014                         break;
6015                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
6016                         if (actions[0].type != (typeof(actions[0].type))
6017                                 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET)
6018                                 return rte_flow_error_set
6019                                                 (error, EINVAL,
6020                                                 RTE_FLOW_ERROR_TYPE_ACTION,
6021                                                 NULL, "MLX5 private action "
6022                                                 "must be the first");
6023
6024                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
6025                         break;
6026                 default:
6027                         return rte_flow_error_set(error, ENOTSUP,
6028                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6029                                                   actions,
6030                                                   "action not supported");
6031                 }
6032         }
6033         /*
6034          * Validate actions in flow rules
6035          * - Explicit decap action is prohibited by the tunnel offload API.
6036          * - Drop action in tunnel steer rule is prohibited by the API.
6037          * - Application cannot use MARK action because it's value can mask
6038          *   tunnel default miss nitification.
6039          * - JUMP in tunnel match rule has no support in current PMD
6040          *   implementation.
6041          * - TAG & META are reserved for future uses.
6042          */
6043         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_SET) {
6044                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_DECAP    |
6045                                             MLX5_FLOW_ACTION_MARK     |
6046                                             MLX5_FLOW_ACTION_SET_TAG  |
6047                                             MLX5_FLOW_ACTION_SET_META |
6048                                             MLX5_FLOW_ACTION_DROP;
6049
6050                 if (action_flags & bad_actions_mask)
6051                         return rte_flow_error_set
6052                                         (error, EINVAL,
6053                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6054                                         "Invalid RTE action in tunnel "
6055                                         "set decap rule");
6056                 if (!(action_flags & MLX5_FLOW_ACTION_JUMP))
6057                         return rte_flow_error_set
6058                                         (error, EINVAL,
6059                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6060                                         "tunnel set decap rule must terminate "
6061                                         "with JUMP");
6062                 if (!attr->ingress)
6063                         return rte_flow_error_set
6064                                         (error, EINVAL,
6065                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6066                                         "tunnel flows for ingress traffic only");
6067         }
6068         if (action_flags & MLX5_FLOW_ACTION_TUNNEL_MATCH) {
6069                 uint64_t bad_actions_mask = MLX5_FLOW_ACTION_JUMP    |
6070                                             MLX5_FLOW_ACTION_MARK    |
6071                                             MLX5_FLOW_ACTION_SET_TAG |
6072                                             MLX5_FLOW_ACTION_SET_META;
6073
6074                 if (action_flags & bad_actions_mask)
6075                         return rte_flow_error_set
6076                                         (error, EINVAL,
6077                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6078                                         "Invalid RTE action in tunnel "
6079                                         "set match rule");
6080         }
6081         /*
6082          * Validate the drop action mutual exclusion with other actions.
6083          * Drop action is mutually-exclusive with any other action, except for
6084          * Count action.
6085          */
6086         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
6087             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
6088                 return rte_flow_error_set(error, EINVAL,
6089                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
6090                                           "Drop action is mutually-exclusive "
6091                                           "with any other action, except for "
6092                                           "Count action");
6093         /* Eswitch has few restrictions on using items and actions */
6094         if (attr->transfer) {
6095                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6096                     action_flags & MLX5_FLOW_ACTION_FLAG)
6097                         return rte_flow_error_set(error, ENOTSUP,
6098                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6099                                                   NULL,
6100                                                   "unsupported action FLAG");
6101                 if (!mlx5_flow_ext_mreg_supported(dev) &&
6102                     action_flags & MLX5_FLOW_ACTION_MARK)
6103                         return rte_flow_error_set(error, ENOTSUP,
6104                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6105                                                   NULL,
6106                                                   "unsupported action MARK");
6107                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
6108                         return rte_flow_error_set(error, ENOTSUP,
6109                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6110                                                   NULL,
6111                                                   "unsupported action QUEUE");
6112                 if (action_flags & MLX5_FLOW_ACTION_RSS)
6113                         return rte_flow_error_set(error, ENOTSUP,
6114                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6115                                                   NULL,
6116                                                   "unsupported action RSS");
6117                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
6118                         return rte_flow_error_set(error, EINVAL,
6119                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6120                                                   actions,
6121                                                   "no fate action is found");
6122         } else {
6123                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
6124                         return rte_flow_error_set(error, EINVAL,
6125                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6126                                                   actions,
6127                                                   "no fate action is found");
6128         }
6129         /*
6130          * Continue validation for Xcap and VLAN actions.
6131          * If hairpin is working in explicit TX rule mode, there is no actions
6132          * splitting and the validation of hairpin ingress flow should be the
6133          * same as other standard flows.
6134          */
6135         if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
6136                              MLX5_FLOW_VLAN_ACTIONS)) &&
6137             (queue_index == 0xFFFF ||
6138              mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
6139              ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
6140              conf->tx_explicit != 0))) {
6141                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
6142                     MLX5_FLOW_XCAP_ACTIONS)
6143                         return rte_flow_error_set(error, ENOTSUP,
6144                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6145                                                   NULL, "encap and decap "
6146                                                   "combination aren't supported");
6147                 if (!attr->transfer && attr->ingress) {
6148                         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
6149                                 return rte_flow_error_set
6150                                                 (error, ENOTSUP,
6151                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6152                                                  NULL, "encap is not supported"
6153                                                  " for ingress traffic");
6154                         else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
6155                                 return rte_flow_error_set
6156                                                 (error, ENOTSUP,
6157                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6158                                                  NULL, "push VLAN action not "
6159                                                  "supported for ingress");
6160                         else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
6161                                         MLX5_FLOW_VLAN_ACTIONS)
6162                                 return rte_flow_error_set
6163                                                 (error, ENOTSUP,
6164                                                  RTE_FLOW_ERROR_TYPE_ACTION,
6165                                                  NULL, "no support for "
6166                                                  "multiple VLAN actions");
6167                 }
6168         }
6169         /*
6170          * Hairpin flow will add one more TAG action in TX implicit mode.
6171          * In TX explicit mode, there will be no hairpin flow ID.
6172          */
6173         if (hairpin > 0)
6174                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6175         /* extra metadata enabled: one more TAG action will be add. */
6176         if (dev_conf->dv_flow_en &&
6177             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
6178             mlx5_flow_ext_mreg_supported(dev))
6179                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
6180         if ((uint32_t)rw_act_num >
6181                         flow_dv_modify_hdr_action_max(dev, is_root)) {
6182                 return rte_flow_error_set(error, ENOTSUP,
6183                                           RTE_FLOW_ERROR_TYPE_ACTION,
6184                                           NULL, "too many header modify"
6185                                           " actions to support");
6186         }
6187         return 0;
6188 }
6189
6190 /**
6191  * Internal preparation function. Allocates the DV flow size,
6192  * this size is constant.
6193  *
6194  * @param[in] dev
6195  *   Pointer to the rte_eth_dev structure.
6196  * @param[in] attr
6197  *   Pointer to the flow attributes.
6198  * @param[in] items
6199  *   Pointer to the list of items.
6200  * @param[in] actions
6201  *   Pointer to the list of actions.
6202  * @param[out] error
6203  *   Pointer to the error structure.
6204  *
6205  * @return
6206  *   Pointer to mlx5_flow object on success,
6207  *   otherwise NULL and rte_errno is set.
6208  */
6209 static struct mlx5_flow *
6210 flow_dv_prepare(struct rte_eth_dev *dev,
6211                 const struct rte_flow_attr *attr __rte_unused,
6212                 const struct rte_flow_item items[] __rte_unused,
6213                 const struct rte_flow_action actions[] __rte_unused,
6214                 struct rte_flow_error *error)
6215 {
6216         uint32_t handle_idx = 0;
6217         struct mlx5_flow *dev_flow;
6218         struct mlx5_flow_handle *dev_handle;
6219         struct mlx5_priv *priv = dev->data->dev_private;
6220         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
6221
6222         MLX5_ASSERT(wks);
6223         /* In case of corrupting the memory. */
6224         if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
6225                 rte_flow_error_set(error, ENOSPC,
6226                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6227                                    "not free temporary device flow");
6228                 return NULL;
6229         }
6230         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
6231                                    &handle_idx);
6232         if (!dev_handle) {
6233                 rte_flow_error_set(error, ENOMEM,
6234                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6235                                    "not enough memory to create flow handle");
6236                 return NULL;
6237         }
6238         MLX5_ASSERT(wks->flow_idx < RTE_DIM(wks->flows));
6239         dev_flow = &wks->flows[wks->flow_idx++];
6240         memset(dev_flow, 0, sizeof(*dev_flow));
6241         dev_flow->handle = dev_handle;
6242         dev_flow->handle_idx = handle_idx;
6243         /*
6244          * In some old rdma-core releases, before continuing, a check of the
6245          * length of matching parameter will be done at first. It needs to use
6246          * the length without misc4 param. If the flow has misc4 support, then
6247          * the length needs to be adjusted accordingly. Each param member is
6248          * aligned with a 64B boundary naturally.
6249          */
6250         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
6251                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
6252         dev_flow->ingress = attr->ingress;
6253         dev_flow->dv.transfer = attr->transfer;
6254         return dev_flow;
6255 }
6256
6257 #ifdef RTE_LIBRTE_MLX5_DEBUG
6258 /**
6259  * Sanity check for match mask and value. Similar to check_valid_spec() in
6260  * kernel driver. If unmasked bit is present in value, it returns failure.
6261  *
6262  * @param match_mask
6263  *   pointer to match mask buffer.
6264  * @param match_value
6265  *   pointer to match value buffer.
6266  *
6267  * @return
6268  *   0 if valid, -EINVAL otherwise.
6269  */
6270 static int
6271 flow_dv_check_valid_spec(void *match_mask, void *match_value)
6272 {
6273         uint8_t *m = match_mask;
6274         uint8_t *v = match_value;
6275         unsigned int i;
6276
6277         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
6278                 if (v[i] & ~m[i]) {
6279                         DRV_LOG(ERR,
6280                                 "match_value differs from match_criteria"
6281                                 " %p[%u] != %p[%u]",
6282                                 match_value, i, match_mask, i);
6283                         return -EINVAL;
6284                 }
6285         }
6286         return 0;
6287 }
6288 #endif
6289
6290 /**
6291  * Add match of ip_version.
6292  *
6293  * @param[in] group
6294  *   Flow group.
6295  * @param[in] headers_v
6296  *   Values header pointer.
6297  * @param[in] headers_m
6298  *   Masks header pointer.
6299  * @param[in] ip_version
6300  *   The IP version to set.
6301  */
6302 static inline void
6303 flow_dv_set_match_ip_version(uint32_t group,
6304                              void *headers_v,
6305                              void *headers_m,
6306                              uint8_t ip_version)
6307 {
6308         if (group == 0)
6309                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
6310         else
6311                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
6312                          ip_version);
6313         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
6314         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
6315         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
6316 }
6317
6318 /**
6319  * Add Ethernet item to matcher and to the value.
6320  *
6321  * @param[in, out] matcher
6322  *   Flow matcher.
6323  * @param[in, out] key
6324  *   Flow matcher value.
6325  * @param[in] item
6326  *   Flow pattern to translate.
6327  * @param[in] inner
6328  *   Item is inner pattern.
6329  */
6330 static void
6331 flow_dv_translate_item_eth(void *matcher, void *key,
6332                            const struct rte_flow_item *item, int inner,
6333                            uint32_t group)
6334 {
6335         const struct rte_flow_item_eth *eth_m = item->mask;
6336         const struct rte_flow_item_eth *eth_v = item->spec;
6337         const struct rte_flow_item_eth nic_mask = {
6338                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6339                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
6340                 .type = RTE_BE16(0xffff),
6341                 .has_vlan = 0,
6342         };
6343         void *hdrs_m;
6344         void *hdrs_v;
6345         char *l24_v;
6346         unsigned int i;
6347
6348         if (!eth_v)
6349                 return;
6350         if (!eth_m)
6351                 eth_m = &nic_mask;
6352         if (inner) {
6353                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6354                                          inner_headers);
6355                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6356         } else {
6357                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6358                                          outer_headers);
6359                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6360         }
6361         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16),
6362                &eth_m->dst, sizeof(eth_m->dst));
6363         /* The value must be in the range of the mask. */
6364         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16);
6365         for (i = 0; i < sizeof(eth_m->dst); ++i)
6366                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
6367         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16),
6368                &eth_m->src, sizeof(eth_m->src));
6369         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16);
6370         /* The value must be in the range of the mask. */
6371         for (i = 0; i < sizeof(eth_m->dst); ++i)
6372                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
6373         /*
6374          * HW supports match on one Ethertype, the Ethertype following the last
6375          * VLAN tag of the packet (see PRM).
6376          * Set match on ethertype only if ETH header is not followed by VLAN.
6377          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6378          * ethertype, and use ip_version field instead.
6379          * eCPRI over Ether layer will use type value 0xAEFE.
6380          */
6381         if (eth_m->type == 0xFFFF) {
6382                 /* Set cvlan_tag mask for any single\multi\un-tagged case. */
6383                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6384                 switch (eth_v->type) {
6385                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6386                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6387                         return;
6388                 case RTE_BE16(RTE_ETHER_TYPE_QINQ):
6389                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6390                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6391                         return;
6392                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6393                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6394                         return;
6395                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6396                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6397                         return;
6398                 default:
6399                         break;
6400                 }
6401         }
6402         if (eth_m->has_vlan) {
6403                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6404                 if (eth_v->has_vlan) {
6405                         /*
6406                          * Here, when also has_more_vlan field in VLAN item is
6407                          * not set, only single-tagged packets will be matched.
6408                          */
6409                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6410                         return;
6411                 }
6412         }
6413         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6414                  rte_be_to_cpu_16(eth_m->type));
6415         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
6416         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
6417 }
6418
6419 /**
6420  * Add VLAN item to matcher and to the value.
6421  *
6422  * @param[in, out] dev_flow
6423  *   Flow descriptor.
6424  * @param[in, out] matcher
6425  *   Flow matcher.
6426  * @param[in, out] key
6427  *   Flow matcher value.
6428  * @param[in] item
6429  *   Flow pattern to translate.
6430  * @param[in] inner
6431  *   Item is inner pattern.
6432  */
6433 static void
6434 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
6435                             void *matcher, void *key,
6436                             const struct rte_flow_item *item,
6437                             int inner, uint32_t group)
6438 {
6439         const struct rte_flow_item_vlan *vlan_m = item->mask;
6440         const struct rte_flow_item_vlan *vlan_v = item->spec;
6441         void *hdrs_m;
6442         void *hdrs_v;
6443         uint16_t tci_m;
6444         uint16_t tci_v;
6445
6446         if (inner) {
6447                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6448                                          inner_headers);
6449                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6450         } else {
6451                 hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher,
6452                                          outer_headers);
6453                 hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6454                 /*
6455                  * This is workaround, masks are not supported,
6456                  * and pre-validated.
6457                  */
6458                 if (vlan_v)
6459                         dev_flow->handle->vf_vlan.tag =
6460                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
6461         }
6462         /*
6463          * When VLAN item exists in flow, mark packet as tagged,
6464          * even if TCI is not specified.
6465          */
6466         if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) {
6467                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1);
6468                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1);
6469         }
6470         if (!vlan_v)
6471                 return;
6472         if (!vlan_m)
6473                 vlan_m = &rte_flow_item_vlan_mask;
6474         tci_m = rte_be_to_cpu_16(vlan_m->tci);
6475         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
6476         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m);
6477         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v);
6478         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12);
6479         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12);
6480         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13);
6481         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13);
6482         /*
6483          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
6484          * ethertype, and use ip_version field instead.
6485          */
6486         if (vlan_m->inner_type == 0xFFFF) {
6487                 switch (vlan_v->inner_type) {
6488                 case RTE_BE16(RTE_ETHER_TYPE_VLAN):
6489                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6490                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6491                         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6492                         return;
6493                 case RTE_BE16(RTE_ETHER_TYPE_IPV4):
6494                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4);
6495                         return;
6496                 case RTE_BE16(RTE_ETHER_TYPE_IPV6):
6497                         flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6);
6498                         return;
6499                 default:
6500                         break;
6501                 }
6502         }
6503         if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) {
6504                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1);
6505                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1);
6506                 /* Only one vlan_tag bit can be set. */
6507                 MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0);
6508                 return;
6509         }
6510         MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype,
6511                  rte_be_to_cpu_16(vlan_m->inner_type));
6512         MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype,
6513                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
6514 }
6515
6516 /**
6517  * Add IPV4 item to matcher and to the value.
6518  *
6519  * @param[in, out] matcher
6520  *   Flow matcher.
6521  * @param[in, out] key
6522  *   Flow matcher value.
6523  * @param[in] item
6524  *   Flow pattern to translate.
6525  * @param[in] inner
6526  *   Item is inner pattern.
6527  * @param[in] group
6528  *   The group to insert the rule.
6529  */
6530 static void
6531 flow_dv_translate_item_ipv4(void *matcher, void *key,
6532                             const struct rte_flow_item *item,
6533                             int inner, uint32_t group)
6534 {
6535         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6536         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6537         const struct rte_flow_item_ipv4 nic_mask = {
6538                 .hdr = {
6539                         .src_addr = RTE_BE32(0xffffffff),
6540                         .dst_addr = RTE_BE32(0xffffffff),
6541                         .type_of_service = 0xff,
6542                         .next_proto_id = 0xff,
6543                         .time_to_live = 0xff,
6544                 },
6545         };
6546         void *headers_m;
6547         void *headers_v;
6548         char *l24_m;
6549         char *l24_v;
6550         uint8_t tos;
6551
6552         if (inner) {
6553                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6554                                          inner_headers);
6555                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6556         } else {
6557                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6558                                          outer_headers);
6559                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6560         }
6561         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6562         if (!ipv4_v)
6563                 return;
6564         if (!ipv4_m)
6565                 ipv4_m = &nic_mask;
6566         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6567                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6568         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6569                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6570         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6571         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6572         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6573                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6574         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6575                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6576         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6577         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6578         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6579         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6580                  ipv4_m->hdr.type_of_service);
6581         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6582         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6583                  ipv4_m->hdr.type_of_service >> 2);
6584         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6585         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6586                  ipv4_m->hdr.next_proto_id);
6587         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6588                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6589         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6590                  ipv4_m->hdr.time_to_live);
6591         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6592                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6593         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6594                  !!(ipv4_m->hdr.fragment_offset));
6595         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6596                  !!(ipv4_v->hdr.fragment_offset & ipv4_m->hdr.fragment_offset));
6597 }
6598
6599 /**
6600  * Add IPV6 item to matcher and to the value.
6601  *
6602  * @param[in, out] matcher
6603  *   Flow matcher.
6604  * @param[in, out] key
6605  *   Flow matcher value.
6606  * @param[in] item
6607  *   Flow pattern to translate.
6608  * @param[in] inner
6609  *   Item is inner pattern.
6610  * @param[in] group
6611  *   The group to insert the rule.
6612  */
6613 static void
6614 flow_dv_translate_item_ipv6(void *matcher, void *key,
6615                             const struct rte_flow_item *item,
6616                             int inner, uint32_t group)
6617 {
6618         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6619         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6620         const struct rte_flow_item_ipv6 nic_mask = {
6621                 .hdr = {
6622                         .src_addr =
6623                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6624                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6625                         .dst_addr =
6626                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6627                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6628                         .vtc_flow = RTE_BE32(0xffffffff),
6629                         .proto = 0xff,
6630                         .hop_limits = 0xff,
6631                 },
6632         };
6633         void *headers_m;
6634         void *headers_v;
6635         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6636         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6637         char *l24_m;
6638         char *l24_v;
6639         uint32_t vtc_m;
6640         uint32_t vtc_v;
6641         int i;
6642         int size;
6643
6644         if (inner) {
6645                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6646                                          inner_headers);
6647                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6648         } else {
6649                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6650                                          outer_headers);
6651                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6652         }
6653         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6654         if (!ipv6_v)
6655                 return;
6656         if (!ipv6_m)
6657                 ipv6_m = &nic_mask;
6658         size = sizeof(ipv6_m->hdr.dst_addr);
6659         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6660                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6661         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6662                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6663         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6664         for (i = 0; i < size; ++i)
6665                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6666         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6667                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6668         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6669                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6670         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6671         for (i = 0; i < size; ++i)
6672                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6673         /* TOS. */
6674         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6675         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6676         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6677         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6678         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6679         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6680         /* Label. */
6681         if (inner) {
6682                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6683                          vtc_m);
6684                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6685                          vtc_v);
6686         } else {
6687                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6688                          vtc_m);
6689                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6690                          vtc_v);
6691         }
6692         /* Protocol. */
6693         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6694                  ipv6_m->hdr.proto);
6695         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6696                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6697         /* Hop limit. */
6698         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6699                  ipv6_m->hdr.hop_limits);
6700         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6701                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6702         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag,
6703                  !!(ipv6_m->has_frag_ext));
6704         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
6705                  !!(ipv6_v->has_frag_ext & ipv6_m->has_frag_ext));
6706 }
6707
6708 /**
6709  * Add IPV6 fragment extension item to matcher and to the value.
6710  *
6711  * @param[in, out] matcher
6712  *   Flow matcher.
6713  * @param[in, out] key
6714  *   Flow matcher value.
6715  * @param[in] item
6716  *   Flow pattern to translate.
6717  * @param[in] inner
6718  *   Item is inner pattern.
6719  */
6720 static void
6721 flow_dv_translate_item_ipv6_frag_ext(void *matcher, void *key,
6722                                      const struct rte_flow_item *item,
6723                                      int inner)
6724 {
6725         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_m = item->mask;
6726         const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_ext_v = item->spec;
6727         const struct rte_flow_item_ipv6_frag_ext nic_mask = {
6728                 .hdr = {
6729                         .next_header = 0xff,
6730                         .frag_data = RTE_BE16(0xffff),
6731                 },
6732         };
6733         void *headers_m;
6734         void *headers_v;
6735
6736         if (inner) {
6737                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6738                                          inner_headers);
6739                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6740         } else {
6741                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6742                                          outer_headers);
6743                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6744         }
6745         /* IPv6 fragment extension item exists, so packet is IP fragment. */
6746         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6747         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 1);
6748         if (!ipv6_frag_ext_v)
6749                 return;
6750         if (!ipv6_frag_ext_m)
6751                 ipv6_frag_ext_m = &nic_mask;
6752         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6753                  ipv6_frag_ext_m->hdr.next_header);
6754         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6755                  ipv6_frag_ext_v->hdr.next_header &
6756                  ipv6_frag_ext_m->hdr.next_header);
6757 }
6758
6759 /**
6760  * Add TCP item to matcher and to the value.
6761  *
6762  * @param[in, out] matcher
6763  *   Flow matcher.
6764  * @param[in, out] key
6765  *   Flow matcher value.
6766  * @param[in] item
6767  *   Flow pattern to translate.
6768  * @param[in] inner
6769  *   Item is inner pattern.
6770  */
6771 static void
6772 flow_dv_translate_item_tcp(void *matcher, void *key,
6773                            const struct rte_flow_item *item,
6774                            int inner)
6775 {
6776         const struct rte_flow_item_tcp *tcp_m = item->mask;
6777         const struct rte_flow_item_tcp *tcp_v = item->spec;
6778         void *headers_m;
6779         void *headers_v;
6780
6781         if (inner) {
6782                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6783                                          inner_headers);
6784                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6785         } else {
6786                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6787                                          outer_headers);
6788                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6789         }
6790         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6791         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6792         if (!tcp_v)
6793                 return;
6794         if (!tcp_m)
6795                 tcp_m = &rte_flow_item_tcp_mask;
6796         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6797                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6798         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6799                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6800         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6801                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6802         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6803                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6804         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6805                  tcp_m->hdr.tcp_flags);
6806         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6807                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6808 }
6809
6810 /**
6811  * Add UDP item to matcher and to the value.
6812  *
6813  * @param[in, out] matcher
6814  *   Flow matcher.
6815  * @param[in, out] key
6816  *   Flow matcher value.
6817  * @param[in] item
6818  *   Flow pattern to translate.
6819  * @param[in] inner
6820  *   Item is inner pattern.
6821  */
6822 static void
6823 flow_dv_translate_item_udp(void *matcher, void *key,
6824                            const struct rte_flow_item *item,
6825                            int inner)
6826 {
6827         const struct rte_flow_item_udp *udp_m = item->mask;
6828         const struct rte_flow_item_udp *udp_v = item->spec;
6829         void *headers_m;
6830         void *headers_v;
6831
6832         if (inner) {
6833                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6834                                          inner_headers);
6835                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6836         } else {
6837                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6838                                          outer_headers);
6839                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6840         }
6841         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6842         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6843         if (!udp_v)
6844                 return;
6845         if (!udp_m)
6846                 udp_m = &rte_flow_item_udp_mask;
6847         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6848                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6849         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6850                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6851         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6852                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6853         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6854                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6855 }
6856
6857 /**
6858  * Add GRE optional Key item to matcher and to the value.
6859  *
6860  * @param[in, out] matcher
6861  *   Flow matcher.
6862  * @param[in, out] key
6863  *   Flow matcher value.
6864  * @param[in] item
6865  *   Flow pattern to translate.
6866  * @param[in] inner
6867  *   Item is inner pattern.
6868  */
6869 static void
6870 flow_dv_translate_item_gre_key(void *matcher, void *key,
6871                                    const struct rte_flow_item *item)
6872 {
6873         const rte_be32_t *key_m = item->mask;
6874         const rte_be32_t *key_v = item->spec;
6875         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6876         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6877         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6878
6879         /* GRE K bit must be on and should already be validated */
6880         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6881         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6882         if (!key_v)
6883                 return;
6884         if (!key_m)
6885                 key_m = &gre_key_default_mask;
6886         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6887                  rte_be_to_cpu_32(*key_m) >> 8);
6888         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6889                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6890         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6891                  rte_be_to_cpu_32(*key_m) & 0xFF);
6892         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6893                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6894 }
6895
6896 /**
6897  * Add GRE item to matcher and to the value.
6898  *
6899  * @param[in, out] matcher
6900  *   Flow matcher.
6901  * @param[in, out] key
6902  *   Flow matcher value.
6903  * @param[in] item
6904  *   Flow pattern to translate.
6905  * @param[in] inner
6906  *   Item is inner pattern.
6907  */
6908 static void
6909 flow_dv_translate_item_gre(void *matcher, void *key,
6910                            const struct rte_flow_item *item,
6911                            int inner)
6912 {
6913         const struct rte_flow_item_gre *gre_m = item->mask;
6914         const struct rte_flow_item_gre *gre_v = item->spec;
6915         void *headers_m;
6916         void *headers_v;
6917         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6918         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6919         struct {
6920                 union {
6921                         __extension__
6922                         struct {
6923                                 uint16_t version:3;
6924                                 uint16_t rsvd0:9;
6925                                 uint16_t s_present:1;
6926                                 uint16_t k_present:1;
6927                                 uint16_t rsvd_bit1:1;
6928                                 uint16_t c_present:1;
6929                         };
6930                         uint16_t value;
6931                 };
6932         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6933
6934         if (inner) {
6935                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6936                                          inner_headers);
6937                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6938         } else {
6939                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6940                                          outer_headers);
6941                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6942         }
6943         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6944         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6945         if (!gre_v)
6946                 return;
6947         if (!gre_m)
6948                 gre_m = &rte_flow_item_gre_mask;
6949         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6950                  rte_be_to_cpu_16(gre_m->protocol));
6951         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6952                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6953         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6954         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6955         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6956                  gre_crks_rsvd0_ver_m.c_present);
6957         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6958                  gre_crks_rsvd0_ver_v.c_present &
6959                  gre_crks_rsvd0_ver_m.c_present);
6960         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6961                  gre_crks_rsvd0_ver_m.k_present);
6962         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6963                  gre_crks_rsvd0_ver_v.k_present &
6964                  gre_crks_rsvd0_ver_m.k_present);
6965         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6966                  gre_crks_rsvd0_ver_m.s_present);
6967         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6968                  gre_crks_rsvd0_ver_v.s_present &
6969                  gre_crks_rsvd0_ver_m.s_present);
6970 }
6971
6972 /**
6973  * Add NVGRE item to matcher and to the value.
6974  *
6975  * @param[in, out] matcher
6976  *   Flow matcher.
6977  * @param[in, out] key
6978  *   Flow matcher value.
6979  * @param[in] item
6980  *   Flow pattern to translate.
6981  * @param[in] inner
6982  *   Item is inner pattern.
6983  */
6984 static void
6985 flow_dv_translate_item_nvgre(void *matcher, void *key,
6986                              const struct rte_flow_item *item,
6987                              int inner)
6988 {
6989         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6990         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6991         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6992         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6993         const char *tni_flow_id_m;
6994         const char *tni_flow_id_v;
6995         char *gre_key_m;
6996         char *gre_key_v;
6997         int size;
6998         int i;
6999
7000         /* For NVGRE, GRE header fields must be set with defined values. */
7001         const struct rte_flow_item_gre gre_spec = {
7002                 .c_rsvd0_ver = RTE_BE16(0x2000),
7003                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
7004         };
7005         const struct rte_flow_item_gre gre_mask = {
7006                 .c_rsvd0_ver = RTE_BE16(0xB000),
7007                 .protocol = RTE_BE16(UINT16_MAX),
7008         };
7009         const struct rte_flow_item gre_item = {
7010                 .spec = &gre_spec,
7011                 .mask = &gre_mask,
7012                 .last = NULL,
7013         };
7014         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
7015         if (!nvgre_v)
7016                 return;
7017         if (!nvgre_m)
7018                 nvgre_m = &rte_flow_item_nvgre_mask;
7019         tni_flow_id_m = (const char *)nvgre_m->tni;
7020         tni_flow_id_v = (const char *)nvgre_v->tni;
7021         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
7022         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
7023         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
7024         memcpy(gre_key_m, tni_flow_id_m, size);
7025         for (i = 0; i < size; ++i)
7026                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
7027 }
7028
7029 /**
7030  * Add VXLAN item to matcher and to the value.
7031  *
7032  * @param[in, out] matcher
7033  *   Flow matcher.
7034  * @param[in, out] key
7035  *   Flow matcher value.
7036  * @param[in] item
7037  *   Flow pattern to translate.
7038  * @param[in] inner
7039  *   Item is inner pattern.
7040  */
7041 static void
7042 flow_dv_translate_item_vxlan(void *matcher, void *key,
7043                              const struct rte_flow_item *item,
7044                              int inner)
7045 {
7046         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
7047         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
7048         void *headers_m;
7049         void *headers_v;
7050         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7051         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7052         char *vni_m;
7053         char *vni_v;
7054         uint16_t dport;
7055         int size;
7056         int i;
7057
7058         if (inner) {
7059                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7060                                          inner_headers);
7061                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7062         } else {
7063                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7064                                          outer_headers);
7065                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7066         }
7067         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7068                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7069         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7070                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7071                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7072         }
7073         if (!vxlan_v)
7074                 return;
7075         if (!vxlan_m)
7076                 vxlan_m = &rte_flow_item_vxlan_mask;
7077         size = sizeof(vxlan_m->vni);
7078         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
7079         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
7080         memcpy(vni_m, vxlan_m->vni, size);
7081         for (i = 0; i < size; ++i)
7082                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7083 }
7084
7085 /**
7086  * Add VXLAN-GPE item to matcher and to the value.
7087  *
7088  * @param[in, out] matcher
7089  *   Flow matcher.
7090  * @param[in, out] key
7091  *   Flow matcher value.
7092  * @param[in] item
7093  *   Flow pattern to translate.
7094  * @param[in] inner
7095  *   Item is inner pattern.
7096  */
7097
7098 static void
7099 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
7100                                  const struct rte_flow_item *item, int inner)
7101 {
7102         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
7103         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
7104         void *headers_m;
7105         void *headers_v;
7106         void *misc_m =
7107                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
7108         void *misc_v =
7109                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7110         char *vni_m;
7111         char *vni_v;
7112         uint16_t dport;
7113         int size;
7114         int i;
7115         uint8_t flags_m = 0xff;
7116         uint8_t flags_v = 0xc;
7117
7118         if (inner) {
7119                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7120                                          inner_headers);
7121                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7122         } else {
7123                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7124                                          outer_headers);
7125                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7126         }
7127         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
7128                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
7129         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7130                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7131                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7132         }
7133         if (!vxlan_v)
7134                 return;
7135         if (!vxlan_m)
7136                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
7137         size = sizeof(vxlan_m->vni);
7138         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
7139         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
7140         memcpy(vni_m, vxlan_m->vni, size);
7141         for (i = 0; i < size; ++i)
7142                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
7143         if (vxlan_m->flags) {
7144                 flags_m = vxlan_m->flags;
7145                 flags_v = vxlan_v->flags;
7146         }
7147         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
7148         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
7149         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
7150                  vxlan_m->protocol);
7151         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
7152                  vxlan_v->protocol);
7153 }
7154
7155 /**
7156  * Add Geneve item to matcher and to the value.
7157  *
7158  * @param[in, out] matcher
7159  *   Flow matcher.
7160  * @param[in, out] key
7161  *   Flow matcher value.
7162  * @param[in] item
7163  *   Flow pattern to translate.
7164  * @param[in] inner
7165  *   Item is inner pattern.
7166  */
7167
7168 static void
7169 flow_dv_translate_item_geneve(void *matcher, void *key,
7170                               const struct rte_flow_item *item, int inner)
7171 {
7172         const struct rte_flow_item_geneve *geneve_m = item->mask;
7173         const struct rte_flow_item_geneve *geneve_v = item->spec;
7174         void *headers_m;
7175         void *headers_v;
7176         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7177         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7178         uint16_t dport;
7179         uint16_t gbhdr_m;
7180         uint16_t gbhdr_v;
7181         char *vni_m;
7182         char *vni_v;
7183         size_t size, i;
7184
7185         if (inner) {
7186                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7187                                          inner_headers);
7188                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7189         } else {
7190                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7191                                          outer_headers);
7192                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7193         }
7194         dport = MLX5_UDP_PORT_GENEVE;
7195         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7196                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7197                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7198         }
7199         if (!geneve_v)
7200                 return;
7201         if (!geneve_m)
7202                 geneve_m = &rte_flow_item_geneve_mask;
7203         size = sizeof(geneve_m->vni);
7204         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
7205         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
7206         memcpy(vni_m, geneve_m->vni, size);
7207         for (i = 0; i < size; ++i)
7208                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
7209         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
7210                  rte_be_to_cpu_16(geneve_m->protocol));
7211         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
7212                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
7213         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
7214         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
7215         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
7216                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7217         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
7218                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
7219         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
7220                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7221         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
7222                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
7223                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
7224 }
7225
7226 /**
7227  * Add MPLS item to matcher and to the value.
7228  *
7229  * @param[in, out] matcher
7230  *   Flow matcher.
7231  * @param[in, out] key
7232  *   Flow matcher value.
7233  * @param[in] item
7234  *   Flow pattern to translate.
7235  * @param[in] prev_layer
7236  *   The protocol layer indicated in previous item.
7237  * @param[in] inner
7238  *   Item is inner pattern.
7239  */
7240 static void
7241 flow_dv_translate_item_mpls(void *matcher, void *key,
7242                             const struct rte_flow_item *item,
7243                             uint64_t prev_layer,
7244                             int inner)
7245 {
7246         const uint32_t *in_mpls_m = item->mask;
7247         const uint32_t *in_mpls_v = item->spec;
7248         uint32_t *out_mpls_m = 0;
7249         uint32_t *out_mpls_v = 0;
7250         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7251         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7252         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
7253                                      misc_parameters_2);
7254         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7255         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
7256         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7257
7258         switch (prev_layer) {
7259         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7260                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
7261                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
7262                          MLX5_UDP_PORT_MPLS);
7263                 break;
7264         case MLX5_FLOW_LAYER_GRE:
7265                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
7266                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
7267                          RTE_ETHER_TYPE_MPLS);
7268                 break;
7269         default:
7270                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
7271                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
7272                          IPPROTO_MPLS);
7273                 break;
7274         }
7275         if (!in_mpls_v)
7276                 return;
7277         if (!in_mpls_m)
7278                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
7279         switch (prev_layer) {
7280         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
7281                 out_mpls_m =
7282                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7283                                                  outer_first_mpls_over_udp);
7284                 out_mpls_v =
7285                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7286                                                  outer_first_mpls_over_udp);
7287                 break;
7288         case MLX5_FLOW_LAYER_GRE:
7289                 out_mpls_m =
7290                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
7291                                                  outer_first_mpls_over_gre);
7292                 out_mpls_v =
7293                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
7294                                                  outer_first_mpls_over_gre);
7295                 break;
7296         default:
7297                 /* Inner MPLS not over GRE is not supported. */
7298                 if (!inner) {
7299                         out_mpls_m =
7300                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7301                                                          misc2_m,
7302                                                          outer_first_mpls);
7303                         out_mpls_v =
7304                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
7305                                                          misc2_v,
7306                                                          outer_first_mpls);
7307                 }
7308                 break;
7309         }
7310         if (out_mpls_m && out_mpls_v) {
7311                 *out_mpls_m = *in_mpls_m;
7312                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
7313         }
7314 }
7315
7316 /**
7317  * Add metadata register item to matcher
7318  *
7319  * @param[in, out] matcher
7320  *   Flow matcher.
7321  * @param[in, out] key
7322  *   Flow matcher value.
7323  * @param[in] reg_type
7324  *   Type of device metadata register
7325  * @param[in] value
7326  *   Register value
7327  * @param[in] mask
7328  *   Register mask
7329  */
7330 static void
7331 flow_dv_match_meta_reg(void *matcher, void *key,
7332                        enum modify_reg reg_type,
7333                        uint32_t data, uint32_t mask)
7334 {
7335         void *misc2_m =
7336                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
7337         void *misc2_v =
7338                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
7339         uint32_t temp;
7340
7341         data &= mask;
7342         switch (reg_type) {
7343         case REG_A:
7344                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
7345                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
7346                 break;
7347         case REG_B:
7348                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
7349                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
7350                 break;
7351         case REG_C_0:
7352                 /*
7353                  * The metadata register C0 field might be divided into
7354                  * source vport index and META item value, we should set
7355                  * this field according to specified mask, not as whole one.
7356                  */
7357                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
7358                 temp |= mask;
7359                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
7360                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
7361                 temp &= ~mask;
7362                 temp |= data;
7363                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
7364                 break;
7365         case REG_C_1:
7366                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
7367                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
7368                 break;
7369         case REG_C_2:
7370                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
7371                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
7372                 break;
7373         case REG_C_3:
7374                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
7375                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
7376                 break;
7377         case REG_C_4:
7378                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
7379                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
7380                 break;
7381         case REG_C_5:
7382                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
7383                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
7384                 break;
7385         case REG_C_6:
7386                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
7387                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
7388                 break;
7389         case REG_C_7:
7390                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
7391                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
7392                 break;
7393         default:
7394                 MLX5_ASSERT(false);
7395                 break;
7396         }
7397 }
7398
7399 /**
7400  * Add MARK item to matcher
7401  *
7402  * @param[in] dev
7403  *   The device to configure through.
7404  * @param[in, out] matcher
7405  *   Flow matcher.
7406  * @param[in, out] key
7407  *   Flow matcher value.
7408  * @param[in] item
7409  *   Flow pattern to translate.
7410  */
7411 static void
7412 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
7413                             void *matcher, void *key,
7414                             const struct rte_flow_item *item)
7415 {
7416         struct mlx5_priv *priv = dev->data->dev_private;
7417         const struct rte_flow_item_mark *mark;
7418         uint32_t value;
7419         uint32_t mask;
7420
7421         mark = item->mask ? (const void *)item->mask :
7422                             &rte_flow_item_mark_mask;
7423         mask = mark->id & priv->sh->dv_mark_mask;
7424         mark = (const void *)item->spec;
7425         MLX5_ASSERT(mark);
7426         value = mark->id & priv->sh->dv_mark_mask & mask;
7427         if (mask) {
7428                 enum modify_reg reg;
7429
7430                 /* Get the metadata register index for the mark. */
7431                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
7432                 MLX5_ASSERT(reg > 0);
7433                 if (reg == REG_C_0) {
7434                         struct mlx5_priv *priv = dev->data->dev_private;
7435                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7436                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7437
7438                         mask &= msk_c0;
7439                         mask <<= shl_c0;
7440                         value <<= shl_c0;
7441                 }
7442                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7443         }
7444 }
7445
7446 /**
7447  * Add META item to matcher
7448  *
7449  * @param[in] dev
7450  *   The devich to configure through.
7451  * @param[in, out] matcher
7452  *   Flow matcher.
7453  * @param[in, out] key
7454  *   Flow matcher value.
7455  * @param[in] attr
7456  *   Attributes of flow that includes this item.
7457  * @param[in] item
7458  *   Flow pattern to translate.
7459  */
7460 static void
7461 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
7462                             void *matcher, void *key,
7463                             const struct rte_flow_attr *attr,
7464                             const struct rte_flow_item *item)
7465 {
7466         const struct rte_flow_item_meta *meta_m;
7467         const struct rte_flow_item_meta *meta_v;
7468
7469         meta_m = (const void *)item->mask;
7470         if (!meta_m)
7471                 meta_m = &rte_flow_item_meta_mask;
7472         meta_v = (const void *)item->spec;
7473         if (meta_v) {
7474                 int reg;
7475                 uint32_t value = meta_v->data;
7476                 uint32_t mask = meta_m->data;
7477
7478                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
7479                 if (reg < 0)
7480                         return;
7481                 MLX5_ASSERT(reg != REG_NON);
7482                 /*
7483                  * In datapath code there is no endianness
7484                  * coversions for perfromance reasons, all
7485                  * pattern conversions are done in rte_flow.
7486                  */
7487                 value = rte_cpu_to_be_32(value);
7488                 mask = rte_cpu_to_be_32(mask);
7489                 if (reg == REG_C_0) {
7490                         struct mlx5_priv *priv = dev->data->dev_private;
7491                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7492                         uint32_t shl_c0 = rte_bsf32(msk_c0);
7493 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
7494                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
7495
7496                         value >>= shr_c0;
7497                         mask >>= shr_c0;
7498 #endif
7499                         value <<= shl_c0;
7500                         mask <<= shl_c0;
7501                         MLX5_ASSERT(msk_c0);
7502                         MLX5_ASSERT(!(~msk_c0 & mask));
7503                 }
7504                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
7505         }
7506 }
7507
7508 /**
7509  * Add vport metadata Reg C0 item to matcher
7510  *
7511  * @param[in, out] matcher
7512  *   Flow matcher.
7513  * @param[in, out] key
7514  *   Flow matcher value.
7515  * @param[in] reg
7516  *   Flow pattern to translate.
7517  */
7518 static void
7519 flow_dv_translate_item_meta_vport(void *matcher, void *key,
7520                                   uint32_t value, uint32_t mask)
7521 {
7522         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
7523 }
7524
7525 /**
7526  * Add tag item to matcher
7527  *
7528  * @param[in] dev
7529  *   The devich to configure through.
7530  * @param[in, out] matcher
7531  *   Flow matcher.
7532  * @param[in, out] key
7533  *   Flow matcher value.
7534  * @param[in] item
7535  *   Flow pattern to translate.
7536  */
7537 static void
7538 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
7539                                 void *matcher, void *key,
7540                                 const struct rte_flow_item *item)
7541 {
7542         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7543         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7544         uint32_t mask, value;
7545
7546         MLX5_ASSERT(tag_v);
7547         value = tag_v->data;
7548         mask = tag_m ? tag_m->data : UINT32_MAX;
7549         if (tag_v->id == REG_C_0) {
7550                 struct mlx5_priv *priv = dev->data->dev_private;
7551                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7552                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7553
7554                 mask &= msk_c0;
7555                 mask <<= shl_c0;
7556                 value <<= shl_c0;
7557         }
7558         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7559 }
7560
7561 /**
7562  * Add TAG item to matcher
7563  *
7564  * @param[in] dev
7565  *   The devich to configure through.
7566  * @param[in, out] matcher
7567  *   Flow matcher.
7568  * @param[in, out] key
7569  *   Flow matcher value.
7570  * @param[in] item
7571  *   Flow pattern to translate.
7572  */
7573 static void
7574 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7575                            void *matcher, void *key,
7576                            const struct rte_flow_item *item)
7577 {
7578         const struct rte_flow_item_tag *tag_v = item->spec;
7579         const struct rte_flow_item_tag *tag_m = item->mask;
7580         enum modify_reg reg;
7581
7582         MLX5_ASSERT(tag_v);
7583         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7584         /* Get the metadata register index for the tag. */
7585         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7586         MLX5_ASSERT(reg > 0);
7587         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7588 }
7589
7590 /**
7591  * Add source vport match to the specified matcher.
7592  *
7593  * @param[in, out] matcher
7594  *   Flow matcher.
7595  * @param[in, out] key
7596  *   Flow matcher value.
7597  * @param[in] port
7598  *   Source vport value to match
7599  * @param[in] mask
7600  *   Mask
7601  */
7602 static void
7603 flow_dv_translate_item_source_vport(void *matcher, void *key,
7604                                     int16_t port, uint16_t mask)
7605 {
7606         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7607         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7608
7609         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7610         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7611 }
7612
7613 /**
7614  * Translate port-id item to eswitch match on  port-id.
7615  *
7616  * @param[in] dev
7617  *   The devich to configure through.
7618  * @param[in, out] matcher
7619  *   Flow matcher.
7620  * @param[in, out] key
7621  *   Flow matcher value.
7622  * @param[in] item
7623  *   Flow pattern to translate.
7624  * @param[in]
7625  *   Flow attributes.
7626  *
7627  * @return
7628  *   0 on success, a negative errno value otherwise.
7629  */
7630 static int
7631 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7632                                void *key, const struct rte_flow_item *item,
7633                                const struct rte_flow_attr *attr)
7634 {
7635         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7636         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7637         struct mlx5_priv *priv;
7638         uint16_t mask, id;
7639
7640         mask = pid_m ? pid_m->id : 0xffff;
7641         id = pid_v ? pid_v->id : dev->data->port_id;
7642         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7643         if (!priv)
7644                 return -rte_errno;
7645         /*
7646          * Translate to vport field or to metadata, depending on mode.
7647          * Kernel can use either misc.source_port or half of C0 metadata
7648          * register.
7649          */
7650         if (priv->vport_meta_mask) {
7651                 /*
7652                  * Provide the hint for SW steering library
7653                  * to insert the flow into ingress domain and
7654                  * save the extra vport match.
7655                  */
7656                 if (mask == 0xffff && priv->vport_id == 0xffff &&
7657                     priv->pf_bond < 0 && attr->transfer)
7658                         flow_dv_translate_item_source_vport
7659                                 (matcher, key, priv->vport_id, mask);
7660                 else
7661                         flow_dv_translate_item_meta_vport
7662                                 (matcher, key,
7663                                  priv->vport_meta_tag,
7664                                  priv->vport_meta_mask);
7665         } else {
7666                 flow_dv_translate_item_source_vport(matcher, key,
7667                                                     priv->vport_id, mask);
7668         }
7669         return 0;
7670 }
7671
7672 /**
7673  * Add ICMP6 item to matcher and to the value.
7674  *
7675  * @param[in, out] matcher
7676  *   Flow matcher.
7677  * @param[in, out] key
7678  *   Flow matcher value.
7679  * @param[in] item
7680  *   Flow pattern to translate.
7681  * @param[in] inner
7682  *   Item is inner pattern.
7683  */
7684 static void
7685 flow_dv_translate_item_icmp6(void *matcher, void *key,
7686                               const struct rte_flow_item *item,
7687                               int inner)
7688 {
7689         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7690         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7691         void *headers_m;
7692         void *headers_v;
7693         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7694                                      misc_parameters_3);
7695         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7696         if (inner) {
7697                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7698                                          inner_headers);
7699                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7700         } else {
7701                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7702                                          outer_headers);
7703                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7704         }
7705         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7706         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7707         if (!icmp6_v)
7708                 return;
7709         if (!icmp6_m)
7710                 icmp6_m = &rte_flow_item_icmp6_mask;
7711         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7712         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7713                  icmp6_v->type & icmp6_m->type);
7714         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7715         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7716                  icmp6_v->code & icmp6_m->code);
7717 }
7718
7719 /**
7720  * Add ICMP item to matcher and to the value.
7721  *
7722  * @param[in, out] matcher
7723  *   Flow matcher.
7724  * @param[in, out] key
7725  *   Flow matcher value.
7726  * @param[in] item
7727  *   Flow pattern to translate.
7728  * @param[in] inner
7729  *   Item is inner pattern.
7730  */
7731 static void
7732 flow_dv_translate_item_icmp(void *matcher, void *key,
7733                             const struct rte_flow_item *item,
7734                             int inner)
7735 {
7736         const struct rte_flow_item_icmp *icmp_m = item->mask;
7737         const struct rte_flow_item_icmp *icmp_v = item->spec;
7738         uint32_t icmp_header_data_m = 0;
7739         uint32_t icmp_header_data_v = 0;
7740         void *headers_m;
7741         void *headers_v;
7742         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7743                                      misc_parameters_3);
7744         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7745         if (inner) {
7746                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7747                                          inner_headers);
7748                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7749         } else {
7750                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7751                                          outer_headers);
7752                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7753         }
7754         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7755         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7756         if (!icmp_v)
7757                 return;
7758         if (!icmp_m)
7759                 icmp_m = &rte_flow_item_icmp_mask;
7760         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7761                  icmp_m->hdr.icmp_type);
7762         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7763                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7764         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7765                  icmp_m->hdr.icmp_code);
7766         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7767                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7768         icmp_header_data_m = rte_be_to_cpu_16(icmp_m->hdr.icmp_seq_nb);
7769         icmp_header_data_m |= rte_be_to_cpu_16(icmp_m->hdr.icmp_ident) << 16;
7770         if (icmp_header_data_m) {
7771                 icmp_header_data_v = rte_be_to_cpu_16(icmp_v->hdr.icmp_seq_nb);
7772                 icmp_header_data_v |=
7773                          rte_be_to_cpu_16(icmp_v->hdr.icmp_ident) << 16;
7774                 MLX5_SET(fte_match_set_misc3, misc3_m, icmp_header_data,
7775                          icmp_header_data_m);
7776                 MLX5_SET(fte_match_set_misc3, misc3_v, icmp_header_data,
7777                          icmp_header_data_v & icmp_header_data_m);
7778         }
7779 }
7780
7781 /**
7782  * Add GTP item to matcher and to the value.
7783  *
7784  * @param[in, out] matcher
7785  *   Flow matcher.
7786  * @param[in, out] key
7787  *   Flow matcher value.
7788  * @param[in] item
7789  *   Flow pattern to translate.
7790  * @param[in] inner
7791  *   Item is inner pattern.
7792  */
7793 static void
7794 flow_dv_translate_item_gtp(void *matcher, void *key,
7795                            const struct rte_flow_item *item, int inner)
7796 {
7797         const struct rte_flow_item_gtp *gtp_m = item->mask;
7798         const struct rte_flow_item_gtp *gtp_v = item->spec;
7799         void *headers_m;
7800         void *headers_v;
7801         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7802                                      misc_parameters_3);
7803         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7804         uint16_t dport = RTE_GTPU_UDP_PORT;
7805
7806         if (inner) {
7807                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7808                                          inner_headers);
7809                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7810         } else {
7811                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7812                                          outer_headers);
7813                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7814         }
7815         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7816                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7817                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7818         }
7819         if (!gtp_v)
7820                 return;
7821         if (!gtp_m)
7822                 gtp_m = &rte_flow_item_gtp_mask;
7823         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7824                  gtp_m->v_pt_rsv_flags);
7825         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7826                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7827         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7828         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7829                  gtp_v->msg_type & gtp_m->msg_type);
7830         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7831                  rte_be_to_cpu_32(gtp_m->teid));
7832         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7833                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7834 }
7835
7836 /**
7837  * Add eCPRI item to matcher and to the value.
7838  *
7839  * @param[in] dev
7840  *   The devich to configure through.
7841  * @param[in, out] matcher
7842  *   Flow matcher.
7843  * @param[in, out] key
7844  *   Flow matcher value.
7845  * @param[in] item
7846  *   Flow pattern to translate.
7847  * @param[in] samples
7848  *   Sample IDs to be used in the matching.
7849  */
7850 static void
7851 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7852                              void *key, const struct rte_flow_item *item)
7853 {
7854         struct mlx5_priv *priv = dev->data->dev_private;
7855         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7856         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7857         struct rte_ecpri_common_hdr common;
7858         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7859                                      misc_parameters_4);
7860         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7861         uint32_t *samples;
7862         void *dw_m;
7863         void *dw_v;
7864
7865         if (!ecpri_v)
7866                 return;
7867         if (!ecpri_m)
7868                 ecpri_m = &rte_flow_item_ecpri_mask;
7869         /*
7870          * Maximal four DW samples are supported in a single matching now.
7871          * Two are used now for a eCPRI matching:
7872          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7873          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7874          *    if any.
7875          */
7876         if (!ecpri_m->hdr.common.u32)
7877                 return;
7878         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7879         /* Need to take the whole DW as the mask to fill the entry. */
7880         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7881                             prog_sample_field_value_0);
7882         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7883                             prog_sample_field_value_0);
7884         /* Already big endian (network order) in the header. */
7885         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7886         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32 & ecpri_m->hdr.common.u32;
7887         /* Sample#0, used for matching type, offset 0. */
7888         MLX5_SET(fte_match_set_misc4, misc4_m,
7889                  prog_sample_field_id_0, samples[0]);
7890         /* It makes no sense to set the sample ID in the mask field. */
7891         MLX5_SET(fte_match_set_misc4, misc4_v,
7892                  prog_sample_field_id_0, samples[0]);
7893         /*
7894          * Checking if message body part needs to be matched.
7895          * Some wildcard rules only matching type field should be supported.
7896          */
7897         if (ecpri_m->hdr.dummy[0]) {
7898                 common.u32 = rte_be_to_cpu_32(ecpri_v->hdr.common.u32);
7899                 switch (common.type) {
7900                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7901                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7902                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7903                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7904                                             prog_sample_field_value_1);
7905                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7906                                             prog_sample_field_value_1);
7907                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7908                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0] &
7909                                             ecpri_m->hdr.dummy[0];
7910                         /* Sample#1, to match message body, offset 4. */
7911                         MLX5_SET(fte_match_set_misc4, misc4_m,
7912                                  prog_sample_field_id_1, samples[1]);
7913                         MLX5_SET(fte_match_set_misc4, misc4_v,
7914                                  prog_sample_field_id_1, samples[1]);
7915                         break;
7916                 default:
7917                         /* Others, do not match any sample ID. */
7918                         break;
7919                 }
7920         }
7921 }
7922
7923 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7924
7925 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7926         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7927                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7928
7929 /**
7930  * Calculate flow matcher enable bitmap.
7931  *
7932  * @param match_criteria
7933  *   Pointer to flow matcher criteria.
7934  *
7935  * @return
7936  *   Bitmap of enabled fields.
7937  */
7938 static uint8_t
7939 flow_dv_matcher_enable(uint32_t *match_criteria)
7940 {
7941         uint8_t match_criteria_enable;
7942
7943         match_criteria_enable =
7944                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7945                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7946         match_criteria_enable |=
7947                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7948                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7949         match_criteria_enable |=
7950                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7951                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7952         match_criteria_enable |=
7953                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7954                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7955         match_criteria_enable |=
7956                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7957                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7958         match_criteria_enable |=
7959                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7960                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7961         return match_criteria_enable;
7962 }
7963
7964 struct mlx5_hlist_entry *
7965 flow_dv_tbl_create_cb(struct mlx5_hlist *list, uint64_t key64, void *cb_ctx)
7966 {
7967         struct mlx5_dev_ctx_shared *sh = list->ctx;
7968         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
7969         struct rte_eth_dev *dev = ctx->dev;
7970         struct mlx5_flow_tbl_data_entry *tbl_data;
7971         struct mlx5_flow_tbl_tunnel_prm *tt_prm = ctx->data;
7972         struct rte_flow_error *error = ctx->error;
7973         union mlx5_flow_tbl_key key = { .v64 = key64 };
7974         struct mlx5_flow_tbl_resource *tbl;
7975         void *domain;
7976         uint32_t idx = 0;
7977         int ret;
7978
7979         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7980         if (!tbl_data) {
7981                 rte_flow_error_set(error, ENOMEM,
7982                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7983                                    NULL,
7984                                    "cannot allocate flow table data entry");
7985                 return NULL;
7986         }
7987         tbl_data->idx = idx;
7988         tbl_data->tunnel = tt_prm->tunnel;
7989         tbl_data->group_id = tt_prm->group_id;
7990         tbl_data->external = !!tt_prm->external;
7991         tbl_data->tunnel_offload = is_tunnel_offload_active(dev);
7992         tbl_data->is_egress = !!key.direction;
7993         tbl_data->is_transfer = !!key.domain;
7994         tbl_data->dummy = !!key.dummy;
7995         tbl_data->table_id = key.table_id;
7996         tbl = &tbl_data->tbl;
7997         if (key.dummy)
7998                 return &tbl_data->entry;
7999         if (key.domain)
8000                 domain = sh->fdb_domain;
8001         else if (key.direction)
8002                 domain = sh->tx_domain;
8003         else
8004                 domain = sh->rx_domain;
8005         ret = mlx5_flow_os_create_flow_tbl(domain, key.table_id, &tbl->obj);
8006         if (ret) {
8007                 rte_flow_error_set(error, ENOMEM,
8008                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8009                                    NULL, "cannot create flow table object");
8010                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8011                 return NULL;
8012         }
8013         if (key.table_id) {
8014                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
8015                                         (tbl->obj, &tbl_data->jump.action);
8016                 if (ret) {
8017                         rte_flow_error_set(error, ENOMEM,
8018                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8019                                            NULL,
8020                                            "cannot create flow jump action");
8021                         mlx5_flow_os_destroy_flow_tbl(tbl->obj);
8022                         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
8023                         return NULL;
8024                 }
8025         }
8026         MKSTR(matcher_name, "%s_%s_%u_matcher_cache",
8027               key.domain ? "FDB" : "NIC", key.direction ? "egress" : "ingress",
8028               key.table_id);
8029         mlx5_cache_list_init(&tbl_data->matchers, matcher_name, 0, sh,
8030                              flow_dv_matcher_create_cb,
8031                              flow_dv_matcher_match_cb,
8032                              flow_dv_matcher_remove_cb);
8033         return &tbl_data->entry;
8034 }
8035
8036 int
8037 flow_dv_tbl_match_cb(struct mlx5_hlist *list __rte_unused,
8038                      struct mlx5_hlist_entry *entry, uint64_t key64,
8039                      void *cb_ctx __rte_unused)
8040 {
8041         struct mlx5_flow_tbl_data_entry *tbl_data =
8042                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8043         union mlx5_flow_tbl_key key = { .v64 = key64 };
8044
8045         return tbl_data->table_id != key.table_id ||
8046                tbl_data->dummy != key.dummy ||
8047                tbl_data->is_transfer != key.domain ||
8048                tbl_data->is_egress != key.direction;
8049 }
8050
8051 /**
8052  * Get a flow table.
8053  *
8054  * @param[in, out] dev
8055  *   Pointer to rte_eth_dev structure.
8056  * @param[in] table_id
8057  *   Table id to use.
8058  * @param[in] egress
8059  *   Direction of the table.
8060  * @param[in] transfer
8061  *   E-Switch or NIC flow.
8062  * @param[in] dummy
8063  *   Dummy entry for dv API.
8064  * @param[out] error
8065  *   pointer to error structure.
8066  *
8067  * @return
8068  *   Returns tables resource based on the index, NULL in case of failed.
8069  */
8070 struct mlx5_flow_tbl_resource *
8071 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
8072                          uint32_t table_id, uint8_t egress,
8073                          uint8_t transfer,
8074                          bool external,
8075                          const struct mlx5_flow_tunnel *tunnel,
8076                          uint32_t group_id, uint8_t dummy,
8077                          struct rte_flow_error *error)
8078 {
8079         struct mlx5_priv *priv = dev->data->dev_private;
8080         union mlx5_flow_tbl_key table_key = {
8081                 {
8082                         .table_id = table_id,
8083                         .dummy = dummy,
8084                         .domain = !!transfer,
8085                         .direction = !!egress,
8086                 }
8087         };
8088         struct mlx5_flow_tbl_tunnel_prm tt_prm = {
8089                 .tunnel = tunnel,
8090                 .group_id = group_id,
8091                 .external = external,
8092         };
8093         struct mlx5_flow_cb_ctx ctx = {
8094                 .dev = dev,
8095                 .error = error,
8096                 .data = &tt_prm,
8097         };
8098         struct mlx5_hlist_entry *entry;
8099         struct mlx5_flow_tbl_data_entry *tbl_data;
8100
8101         entry = mlx5_hlist_register(priv->sh->flow_tbls, table_key.v64, &ctx);
8102         if (!entry) {
8103                 rte_flow_error_set(error, ENOMEM,
8104                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8105                                    "cannot get table");
8106                 return NULL;
8107         }
8108         DRV_LOG(DEBUG, "Table_id %u tunnel %u group %u registered.",
8109                 table_id, tunnel ? tunnel->tunnel_id : 0, group_id);
8110         tbl_data = container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8111         return &tbl_data->tbl;
8112 }
8113
8114 void
8115 flow_dv_tbl_remove_cb(struct mlx5_hlist *list,
8116                       struct mlx5_hlist_entry *entry)
8117 {
8118         struct mlx5_dev_ctx_shared *sh = list->ctx;
8119         struct mlx5_flow_tbl_data_entry *tbl_data =
8120                 container_of(entry, struct mlx5_flow_tbl_data_entry, entry);
8121
8122         MLX5_ASSERT(entry && sh);
8123         if (tbl_data->jump.action)
8124                 mlx5_flow_os_destroy_flow_action(tbl_data->jump.action);
8125         if (tbl_data->tbl.obj)
8126                 mlx5_flow_os_destroy_flow_tbl(tbl_data->tbl.obj);
8127         if (tbl_data->tunnel_offload && tbl_data->external) {
8128                 struct mlx5_hlist_entry *he;
8129                 struct mlx5_hlist *tunnel_grp_hash;
8130                 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8131                 union tunnel_tbl_key tunnel_key = {
8132                         .tunnel_id = tbl_data->tunnel ?
8133                                         tbl_data->tunnel->tunnel_id : 0,
8134                         .group = tbl_data->group_id
8135                 };
8136                 uint32_t table_id = tbl_data->table_id;
8137
8138                 tunnel_grp_hash = tbl_data->tunnel ?
8139                                         tbl_data->tunnel->groups :
8140                                         thub->groups;
8141                 he = mlx5_hlist_lookup(tunnel_grp_hash, tunnel_key.val, NULL);
8142                 if (he)
8143                         mlx5_hlist_unregister(tunnel_grp_hash, he);
8144                 DRV_LOG(DEBUG,
8145                         "Table_id %u tunnel %u group %u released.",
8146                         table_id,
8147                         tbl_data->tunnel ?
8148                         tbl_data->tunnel->tunnel_id : 0,
8149                         tbl_data->group_id);
8150         }
8151         mlx5_cache_list_destroy(&tbl_data->matchers);
8152         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
8153 }
8154
8155 /**
8156  * Release a flow table.
8157  *
8158  * @param[in] sh
8159  *   Pointer to device shared structure.
8160  * @param[in] tbl
8161  *   Table resource to be released.
8162  *
8163  * @return
8164  *   Returns 0 if table was released, else return 1;
8165  */
8166 static int
8167 flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
8168                              struct mlx5_flow_tbl_resource *tbl)
8169 {
8170         struct mlx5_flow_tbl_data_entry *tbl_data =
8171                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8172
8173         if (!tbl)
8174                 return 0;
8175         return mlx5_hlist_unregister(sh->flow_tbls, &tbl_data->entry);
8176 }
8177
8178 int
8179 flow_dv_matcher_match_cb(struct mlx5_cache_list *list __rte_unused,
8180                          struct mlx5_cache_entry *entry, void *cb_ctx)
8181 {
8182         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8183         struct mlx5_flow_dv_matcher *ref = ctx->data;
8184         struct mlx5_flow_dv_matcher *cur = container_of(entry, typeof(*cur),
8185                                                         entry);
8186
8187         return cur->crc != ref->crc ||
8188                cur->priority != ref->priority ||
8189                memcmp((const void *)cur->mask.buf,
8190                       (const void *)ref->mask.buf, ref->mask.size);
8191 }
8192
8193 struct mlx5_cache_entry *
8194 flow_dv_matcher_create_cb(struct mlx5_cache_list *list,
8195                           struct mlx5_cache_entry *entry __rte_unused,
8196                           void *cb_ctx)
8197 {
8198         struct mlx5_dev_ctx_shared *sh = list->ctx;
8199         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8200         struct mlx5_flow_dv_matcher *ref = ctx->data;
8201         struct mlx5_flow_dv_matcher *cache;
8202         struct mlx5dv_flow_matcher_attr dv_attr = {
8203                 .type = IBV_FLOW_ATTR_NORMAL,
8204                 .match_mask = (void *)&ref->mask,
8205         };
8206         struct mlx5_flow_tbl_data_entry *tbl = container_of(ref->tbl,
8207                                                             typeof(*tbl), tbl);
8208         int ret;
8209
8210         cache = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache), 0, SOCKET_ID_ANY);
8211         if (!cache) {
8212                 rte_flow_error_set(ctx->error, ENOMEM,
8213                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8214                                    "cannot create matcher");
8215                 return NULL;
8216         }
8217         *cache = *ref;
8218         dv_attr.match_criteria_enable =
8219                 flow_dv_matcher_enable(cache->mask.buf);
8220         dv_attr.priority = ref->priority;
8221         if (tbl->is_egress)
8222                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
8223         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->tbl.obj,
8224                                                &cache->matcher_object);
8225         if (ret) {
8226                 mlx5_free(cache);
8227                 rte_flow_error_set(ctx->error, ENOMEM,
8228                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8229                                    "cannot create matcher");
8230                 return NULL;
8231         }
8232         return &cache->entry;
8233 }
8234
8235 /**
8236  * Register the flow matcher.
8237  *
8238  * @param[in, out] dev
8239  *   Pointer to rte_eth_dev structure.
8240  * @param[in, out] matcher
8241  *   Pointer to flow matcher.
8242  * @param[in, out] key
8243  *   Pointer to flow table key.
8244  * @parm[in, out] dev_flow
8245  *   Pointer to the dev_flow.
8246  * @param[out] error
8247  *   pointer to error structure.
8248  *
8249  * @return
8250  *   0 on success otherwise -errno and errno is set.
8251  */
8252 static int
8253 flow_dv_matcher_register(struct rte_eth_dev *dev,
8254                          struct mlx5_flow_dv_matcher *ref,
8255                          union mlx5_flow_tbl_key *key,
8256                          struct mlx5_flow *dev_flow,
8257                          const struct mlx5_flow_tunnel *tunnel,
8258                          uint32_t group_id,
8259                          struct rte_flow_error *error)
8260 {
8261         struct mlx5_cache_entry *entry;
8262         struct mlx5_flow_dv_matcher *cache;
8263         struct mlx5_flow_tbl_resource *tbl;
8264         struct mlx5_flow_tbl_data_entry *tbl_data;
8265         struct mlx5_flow_cb_ctx ctx = {
8266                 .error = error,
8267                 .data = ref,
8268         };
8269
8270         /**
8271          * tunnel offload API requires this registration for cases when
8272          * tunnel match rule was inserted before tunnel set rule.
8273          */
8274         tbl = flow_dv_tbl_resource_get(dev, key->table_id,
8275                                        key->direction, key->domain,
8276                                        dev_flow->external, tunnel,
8277                                        group_id, 0, error);
8278         if (!tbl)
8279                 return -rte_errno;      /* No need to refill the error info */
8280         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
8281         ref->tbl = tbl;
8282         entry = mlx5_cache_register(&tbl_data->matchers, &ctx);
8283         if (!entry) {
8284                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
8285                 return rte_flow_error_set(error, ENOMEM,
8286                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8287                                           "cannot allocate ref memory");
8288         }
8289         cache = container_of(entry, typeof(*cache), entry);
8290         dev_flow->handle->dvh.matcher = cache;
8291         return 0;
8292 }
8293
8294 struct mlx5_hlist_entry *
8295 flow_dv_tag_create_cb(struct mlx5_hlist *list, uint64_t key, void *ctx)
8296 {
8297         struct mlx5_dev_ctx_shared *sh = list->ctx;
8298         struct rte_flow_error *error = ctx;
8299         struct mlx5_flow_dv_tag_resource *entry;
8300         uint32_t idx = 0;
8301         int ret;
8302
8303         entry = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG], &idx);
8304         if (!entry) {
8305                 rte_flow_error_set(error, ENOMEM,
8306                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8307                                    "cannot allocate resource memory");
8308                 return NULL;
8309         }
8310         entry->idx = idx;
8311         entry->tag_id = key;
8312         ret = mlx5_flow_os_create_flow_action_tag(key,
8313                                                   &entry->action);
8314         if (ret) {
8315                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], idx);
8316                 rte_flow_error_set(error, ENOMEM,
8317                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8318                                    NULL, "cannot create action");
8319                 return NULL;
8320         }
8321         return &entry->entry;
8322 }
8323
8324 int
8325 flow_dv_tag_match_cb(struct mlx5_hlist *list __rte_unused,
8326                      struct mlx5_hlist_entry *entry, uint64_t key,
8327                      void *cb_ctx __rte_unused)
8328 {
8329         struct mlx5_flow_dv_tag_resource *tag =
8330                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8331
8332         return key != tag->tag_id;
8333 }
8334
8335 /**
8336  * Find existing tag resource or create and register a new one.
8337  *
8338  * @param dev[in, out]
8339  *   Pointer to rte_eth_dev structure.
8340  * @param[in, out] tag_be24
8341  *   Tag value in big endian then R-shift 8.
8342  * @parm[in, out] dev_flow
8343  *   Pointer to the dev_flow.
8344  * @param[out] error
8345  *   pointer to error structure.
8346  *
8347  * @return
8348  *   0 on success otherwise -errno and errno is set.
8349  */
8350 static int
8351 flow_dv_tag_resource_register
8352                         (struct rte_eth_dev *dev,
8353                          uint32_t tag_be24,
8354                          struct mlx5_flow *dev_flow,
8355                          struct rte_flow_error *error)
8356 {
8357         struct mlx5_priv *priv = dev->data->dev_private;
8358         struct mlx5_flow_dv_tag_resource *cache_resource;
8359         struct mlx5_hlist_entry *entry;
8360
8361         entry = mlx5_hlist_register(priv->sh->tag_table, tag_be24, error);
8362         if (entry) {
8363                 cache_resource = container_of
8364                         (entry, struct mlx5_flow_dv_tag_resource, entry);
8365                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
8366                 dev_flow->dv.tag_resource = cache_resource;
8367                 return 0;
8368         }
8369         return -rte_errno;
8370 }
8371
8372 void
8373 flow_dv_tag_remove_cb(struct mlx5_hlist *list,
8374                       struct mlx5_hlist_entry *entry)
8375 {
8376         struct mlx5_dev_ctx_shared *sh = list->ctx;
8377         struct mlx5_flow_dv_tag_resource *tag =
8378                 container_of(entry, struct mlx5_flow_dv_tag_resource, entry);
8379
8380         MLX5_ASSERT(tag && sh && tag->action);
8381         claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
8382         DRV_LOG(DEBUG, "Tag %p: removed.", (void *)tag);
8383         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TAG], tag->idx);
8384 }
8385
8386 /**
8387  * Release the tag.
8388  *
8389  * @param dev
8390  *   Pointer to Ethernet device.
8391  * @param tag_idx
8392  *   Tag index.
8393  *
8394  * @return
8395  *   1 while a reference on it exists, 0 when freed.
8396  */
8397 static int
8398 flow_dv_tag_release(struct rte_eth_dev *dev,
8399                     uint32_t tag_idx)
8400 {
8401         struct mlx5_priv *priv = dev->data->dev_private;
8402         struct mlx5_flow_dv_tag_resource *tag;
8403
8404         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
8405         if (!tag)
8406                 return 0;
8407         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
8408                 dev->data->port_id, (void *)tag, tag->entry.ref_cnt);
8409         return mlx5_hlist_unregister(priv->sh->tag_table, &tag->entry);
8410 }
8411
8412 /**
8413  * Translate port ID action to vport.
8414  *
8415  * @param[in] dev
8416  *   Pointer to rte_eth_dev structure.
8417  * @param[in] action
8418  *   Pointer to the port ID action.
8419  * @param[out] dst_port_id
8420  *   The target port ID.
8421  * @param[out] error
8422  *   Pointer to the error structure.
8423  *
8424  * @return
8425  *   0 on success, a negative errno value otherwise and rte_errno is set.
8426  */
8427 static int
8428 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
8429                                  const struct rte_flow_action *action,
8430                                  uint32_t *dst_port_id,
8431                                  struct rte_flow_error *error)
8432 {
8433         uint32_t port;
8434         struct mlx5_priv *priv;
8435         const struct rte_flow_action_port_id *conf =
8436                         (const struct rte_flow_action_port_id *)action->conf;
8437
8438         port = conf->original ? dev->data->port_id : conf->id;
8439         priv = mlx5_port_to_eswitch_info(port, false);
8440         if (!priv)
8441                 return rte_flow_error_set(error, -rte_errno,
8442                                           RTE_FLOW_ERROR_TYPE_ACTION,
8443                                           NULL,
8444                                           "No eswitch info was found for port");
8445 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
8446         /*
8447          * This parameter is transferred to
8448          * mlx5dv_dr_action_create_dest_ib_port().
8449          */
8450         *dst_port_id = priv->dev_port;
8451 #else
8452         /*
8453          * Legacy mode, no LAG configurations is supported.
8454          * This parameter is transferred to
8455          * mlx5dv_dr_action_create_dest_vport().
8456          */
8457         *dst_port_id = priv->vport_id;
8458 #endif
8459         return 0;
8460 }
8461
8462 /**
8463  * Create a counter with aging configuration.
8464  *
8465  * @param[in] dev
8466  *   Pointer to rte_eth_dev structure.
8467  * @param[out] count
8468  *   Pointer to the counter action configuration.
8469  * @param[in] age
8470  *   Pointer to the aging action configuration.
8471  *
8472  * @return
8473  *   Index to flow counter on success, 0 otherwise.
8474  */
8475 static uint32_t
8476 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
8477                                 struct mlx5_flow *dev_flow,
8478                                 const struct rte_flow_action_count *count,
8479                                 const struct rte_flow_action_age *age)
8480 {
8481         uint32_t counter;
8482         struct mlx5_age_param *age_param;
8483
8484         if (count && count->shared)
8485                 counter = flow_dv_counter_get_shared(dev, count->id);
8486         else
8487                 counter = flow_dv_counter_alloc(dev, !!age);
8488         if (!counter || age == NULL)
8489                 return counter;
8490         age_param  = flow_dv_counter_idx_get_age(dev, counter);
8491         age_param->context = age->context ? age->context :
8492                 (void *)(uintptr_t)(dev_flow->flow_idx);
8493         age_param->timeout = age->timeout;
8494         age_param->port_id = dev->data->port_id;
8495         __atomic_store_n(&age_param->sec_since_last_hit, 0, __ATOMIC_RELAXED);
8496         __atomic_store_n(&age_param->state, AGE_CANDIDATE, __ATOMIC_RELAXED);
8497         return counter;
8498 }
8499
8500 /**
8501  * Add Tx queue matcher
8502  *
8503  * @param[in] dev
8504  *   Pointer to the dev struct.
8505  * @param[in, out] matcher
8506  *   Flow matcher.
8507  * @param[in, out] key
8508  *   Flow matcher value.
8509  * @param[in] item
8510  *   Flow pattern to translate.
8511  * @param[in] inner
8512  *   Item is inner pattern.
8513  */
8514 static void
8515 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
8516                                 void *matcher, void *key,
8517                                 const struct rte_flow_item *item)
8518 {
8519         const struct mlx5_rte_flow_item_tx_queue *queue_m;
8520         const struct mlx5_rte_flow_item_tx_queue *queue_v;
8521         void *misc_m =
8522                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
8523         void *misc_v =
8524                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
8525         struct mlx5_txq_ctrl *txq;
8526         uint32_t queue;
8527
8528
8529         queue_m = (const void *)item->mask;
8530         if (!queue_m)
8531                 return;
8532         queue_v = (const void *)item->spec;
8533         if (!queue_v)
8534                 return;
8535         txq = mlx5_txq_get(dev, queue_v->queue);
8536         if (!txq)
8537                 return;
8538         queue = txq->obj->sq->id;
8539         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
8540         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
8541                  queue & queue_m->queue);
8542         mlx5_txq_release(dev, queue_v->queue);
8543 }
8544
8545 /**
8546  * Set the hash fields according to the @p flow information.
8547  *
8548  * @param[in] dev_flow
8549  *   Pointer to the mlx5_flow.
8550  * @param[in] rss_desc
8551  *   Pointer to the mlx5_flow_rss_desc.
8552  */
8553 static void
8554 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
8555                        struct mlx5_flow_rss_desc *rss_desc)
8556 {
8557         uint64_t items = dev_flow->handle->layers;
8558         int rss_inner = 0;
8559         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
8560
8561         dev_flow->hash_fields = 0;
8562 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
8563         if (rss_desc->level >= 2) {
8564                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
8565                 rss_inner = 1;
8566         }
8567 #endif
8568         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
8569             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
8570                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
8571                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8572                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
8573                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8574                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
8575                         else
8576                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
8577                 }
8578         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
8579                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
8580                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
8581                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
8582                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
8583                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
8584                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
8585                         else
8586                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
8587                 }
8588         }
8589         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
8590             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
8591                 if (rss_types & ETH_RSS_UDP) {
8592                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8593                                 dev_flow->hash_fields |=
8594                                                 IBV_RX_HASH_SRC_PORT_UDP;
8595                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8596                                 dev_flow->hash_fields |=
8597                                                 IBV_RX_HASH_DST_PORT_UDP;
8598                         else
8599                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
8600                 }
8601         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
8602                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
8603                 if (rss_types & ETH_RSS_TCP) {
8604                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
8605                                 dev_flow->hash_fields |=
8606                                                 IBV_RX_HASH_SRC_PORT_TCP;
8607                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
8608                                 dev_flow->hash_fields |=
8609                                                 IBV_RX_HASH_DST_PORT_TCP;
8610                         else
8611                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
8612                 }
8613         }
8614 }
8615
8616 /**
8617  * Prepare an Rx Hash queue.
8618  *
8619  * @param dev
8620  *   Pointer to Ethernet device.
8621  * @param[in] dev_flow
8622  *   Pointer to the mlx5_flow.
8623  * @param[in] rss_desc
8624  *   Pointer to the mlx5_flow_rss_desc.
8625  * @param[out] hrxq_idx
8626  *   Hash Rx queue index.
8627  *
8628  * @return
8629  *   The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
8630  */
8631 static struct mlx5_hrxq *
8632 flow_dv_hrxq_prepare(struct rte_eth_dev *dev,
8633                      struct mlx5_flow *dev_flow,
8634                      struct mlx5_flow_rss_desc *rss_desc,
8635                      uint32_t *hrxq_idx)
8636 {
8637         struct mlx5_priv *priv = dev->data->dev_private;
8638         struct mlx5_flow_handle *dh = dev_flow->handle;
8639         struct mlx5_hrxq *hrxq;
8640
8641         MLX5_ASSERT(rss_desc->queue_num);
8642         rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN;
8643         rss_desc->hash_fields = dev_flow->hash_fields;
8644         rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
8645         rss_desc->shared_rss = 0;
8646         *hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
8647         if (!*hrxq_idx)
8648                 return NULL;
8649         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8650                               *hrxq_idx);
8651         return hrxq;
8652 }
8653
8654 /**
8655  * Release sample sub action resource.
8656  *
8657  * @param[in, out] dev
8658  *   Pointer to rte_eth_dev structure.
8659  * @param[in] act_res
8660  *   Pointer to sample sub action resource.
8661  */
8662 static void
8663 flow_dv_sample_sub_actions_release(struct rte_eth_dev *dev,
8664                                    struct mlx5_flow_sub_actions_idx *act_res)
8665 {
8666         if (act_res->rix_hrxq) {
8667                 mlx5_hrxq_release(dev, act_res->rix_hrxq);
8668                 act_res->rix_hrxq = 0;
8669         }
8670         if (act_res->rix_encap_decap) {
8671                 flow_dv_encap_decap_resource_release(dev,
8672                                                      act_res->rix_encap_decap);
8673                 act_res->rix_encap_decap = 0;
8674         }
8675         if (act_res->rix_port_id_action) {
8676                 flow_dv_port_id_action_resource_release(dev,
8677                                                 act_res->rix_port_id_action);
8678                 act_res->rix_port_id_action = 0;
8679         }
8680         if (act_res->rix_tag) {
8681                 flow_dv_tag_release(dev, act_res->rix_tag);
8682                 act_res->rix_tag = 0;
8683         }
8684         if (act_res->cnt) {
8685                 flow_dv_counter_free(dev, act_res->cnt);
8686                 act_res->cnt = 0;
8687         }
8688 }
8689
8690 int
8691 flow_dv_sample_match_cb(struct mlx5_cache_list *list __rte_unused,
8692                         struct mlx5_cache_entry *entry, void *cb_ctx)
8693 {
8694         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8695         struct rte_eth_dev *dev = ctx->dev;
8696         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8697         struct mlx5_flow_dv_sample_resource *cache_resource =
8698                         container_of(entry, typeof(*cache_resource), entry);
8699
8700         if (resource->ratio == cache_resource->ratio &&
8701             resource->ft_type == cache_resource->ft_type &&
8702             resource->ft_id == cache_resource->ft_id &&
8703             resource->set_action == cache_resource->set_action &&
8704             !memcmp((void *)&resource->sample_act,
8705                     (void *)&cache_resource->sample_act,
8706                     sizeof(struct mlx5_flow_sub_actions_list))) {
8707                 /*
8708                  * Existing sample action should release the prepared
8709                  * sub-actions reference counter.
8710                  */
8711                 flow_dv_sample_sub_actions_release(dev,
8712                                                 &resource->sample_idx);
8713                 return 0;
8714         }
8715         return 1;
8716 }
8717
8718 struct mlx5_cache_entry *
8719 flow_dv_sample_create_cb(struct mlx5_cache_list *list __rte_unused,
8720                          struct mlx5_cache_entry *entry __rte_unused,
8721                          void *cb_ctx)
8722 {
8723         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8724         struct rte_eth_dev *dev = ctx->dev;
8725         struct mlx5_flow_dv_sample_resource *resource = ctx->data;
8726         void **sample_dv_actions = resource->sub_actions;
8727         struct mlx5_flow_dv_sample_resource *cache_resource;
8728         struct mlx5dv_dr_flow_sampler_attr sampler_attr;
8729         struct mlx5_priv *priv = dev->data->dev_private;
8730         struct mlx5_dev_ctx_shared *sh = priv->sh;
8731         struct mlx5_flow_tbl_resource *tbl;
8732         uint32_t idx = 0;
8733         const uint32_t next_ft_step = 1;
8734         uint32_t next_ft_id = resource->ft_id + next_ft_step;
8735         uint8_t is_egress = 0;
8736         uint8_t is_transfer = 0;
8737         struct rte_flow_error *error = ctx->error;
8738
8739         /* Register new sample resource. */
8740         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_SAMPLE], &idx);
8741         if (!cache_resource) {
8742                 rte_flow_error_set(error, ENOMEM,
8743                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8744                                           NULL,
8745                                           "cannot allocate resource memory");
8746                 return NULL;
8747         }
8748         *cache_resource = *resource;
8749         /* Create normal path table level */
8750         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8751                 is_transfer = 1;
8752         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
8753                 is_egress = 1;
8754         tbl = flow_dv_tbl_resource_get(dev, next_ft_id,
8755                                         is_egress, is_transfer,
8756                                         true, NULL, 0, 0, error);
8757         if (!tbl) {
8758                 rte_flow_error_set(error, ENOMEM,
8759                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8760                                           NULL,
8761                                           "fail to create normal path table "
8762                                           "for sample");
8763                 goto error;
8764         }
8765         cache_resource->normal_path_tbl = tbl;
8766         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
8767                 cache_resource->default_miss =
8768                                 mlx5_glue->dr_create_flow_action_default_miss();
8769                 if (!cache_resource->default_miss) {
8770                         rte_flow_error_set(error, ENOMEM,
8771                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8772                                                 NULL,
8773                                                 "cannot create default miss "
8774                                                 "action");
8775                         goto error;
8776                 }
8777                 sample_dv_actions[resource->sample_act.actions_num++] =
8778                                                 cache_resource->default_miss;
8779         }
8780         /* Create a DR sample action */
8781         sampler_attr.sample_ratio = cache_resource->ratio;
8782         sampler_attr.default_next_table = tbl->obj;
8783         sampler_attr.num_sample_actions = resource->sample_act.actions_num;
8784         sampler_attr.sample_actions = (struct mlx5dv_dr_action **)
8785                                                         &sample_dv_actions[0];
8786         sampler_attr.action = cache_resource->set_action;
8787         cache_resource->verbs_action =
8788                 mlx5_glue->dr_create_flow_action_sampler(&sampler_attr);
8789         if (!cache_resource->verbs_action) {
8790                 rte_flow_error_set(error, ENOMEM,
8791                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8792                                         NULL, "cannot create sample action");
8793                 goto error;
8794         }
8795         cache_resource->idx = idx;
8796         cache_resource->dev = dev;
8797         return &cache_resource->entry;
8798 error:
8799         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB &&
8800             cache_resource->default_miss)
8801                 claim_zero(mlx5_glue->destroy_flow_action
8802                                 (cache_resource->default_miss));
8803         else
8804                 flow_dv_sample_sub_actions_release(dev,
8805                                                    &cache_resource->sample_idx);
8806         if (cache_resource->normal_path_tbl)
8807                 flow_dv_tbl_resource_release(MLX5_SH(dev),
8808                                 cache_resource->normal_path_tbl);
8809         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_SAMPLE], idx);
8810         return NULL;
8811
8812 }
8813
8814 /**
8815  * Find existing sample resource or create and register a new one.
8816  *
8817  * @param[in, out] dev
8818  *   Pointer to rte_eth_dev structure.
8819  * @param[in] resource
8820  *   Pointer to sample resource.
8821  * @parm[in, out] dev_flow
8822  *   Pointer to the dev_flow.
8823  * @param[out] error
8824  *   pointer to error structure.
8825  *
8826  * @return
8827  *   0 on success otherwise -errno and errno is set.
8828  */
8829 static int
8830 flow_dv_sample_resource_register(struct rte_eth_dev *dev,
8831                          struct mlx5_flow_dv_sample_resource *resource,
8832                          struct mlx5_flow *dev_flow,
8833                          struct rte_flow_error *error)
8834 {
8835         struct mlx5_flow_dv_sample_resource *cache_resource;
8836         struct mlx5_cache_entry *entry;
8837         struct mlx5_priv *priv = dev->data->dev_private;
8838         struct mlx5_flow_cb_ctx ctx = {
8839                 .dev = dev,
8840                 .error = error,
8841                 .data = resource,
8842         };
8843
8844         entry = mlx5_cache_register(&priv->sh->sample_action_list, &ctx);
8845         if (!entry)
8846                 return -rte_errno;
8847         cache_resource = container_of(entry, typeof(*cache_resource), entry);
8848         dev_flow->handle->dvh.rix_sample = cache_resource->idx;
8849         dev_flow->dv.sample_res = cache_resource;
8850         return 0;
8851 }
8852
8853 int
8854 flow_dv_dest_array_match_cb(struct mlx5_cache_list *list __rte_unused,
8855                             struct mlx5_cache_entry *entry, void *cb_ctx)
8856 {
8857         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8858         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8859         struct rte_eth_dev *dev = ctx->dev;
8860         struct mlx5_flow_dv_dest_array_resource *cache_resource =
8861                         container_of(entry, typeof(*cache_resource), entry);
8862         uint32_t idx = 0;
8863
8864         if (resource->num_of_dest == cache_resource->num_of_dest &&
8865             resource->ft_type == cache_resource->ft_type &&
8866             !memcmp((void *)cache_resource->sample_act,
8867                     (void *)resource->sample_act,
8868                    (resource->num_of_dest *
8869                    sizeof(struct mlx5_flow_sub_actions_list)))) {
8870                 /*
8871                  * Existing sample action should release the prepared
8872                  * sub-actions reference counter.
8873                  */
8874                 for (idx = 0; idx < resource->num_of_dest; idx++)
8875                         flow_dv_sample_sub_actions_release(dev,
8876                                         &resource->sample_idx[idx]);
8877                 return 0;
8878         }
8879         return 1;
8880 }
8881
8882 struct mlx5_cache_entry *
8883 flow_dv_dest_array_create_cb(struct mlx5_cache_list *list __rte_unused,
8884                          struct mlx5_cache_entry *entry __rte_unused,
8885                          void *cb_ctx)
8886 {
8887         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
8888         struct rte_eth_dev *dev = ctx->dev;
8889         struct mlx5_flow_dv_dest_array_resource *cache_resource;
8890         struct mlx5_flow_dv_dest_array_resource *resource = ctx->data;
8891         struct mlx5dv_dr_action_dest_attr *dest_attr[MLX5_MAX_DEST_NUM] = { 0 };
8892         struct mlx5dv_dr_action_dest_reformat dest_reformat[MLX5_MAX_DEST_NUM];
8893         struct mlx5_priv *priv = dev->data->dev_private;
8894         struct mlx5_dev_ctx_shared *sh = priv->sh;
8895         struct mlx5_flow_sub_actions_list *sample_act;
8896         struct mlx5dv_dr_domain *domain;
8897         uint32_t idx = 0, res_idx = 0;
8898         struct rte_flow_error *error = ctx->error;
8899
8900         /* Register new destination array resource. */
8901         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DEST_ARRAY],
8902                                             &res_idx);
8903         if (!cache_resource) {
8904                 rte_flow_error_set(error, ENOMEM,
8905                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8906                                           NULL,
8907                                           "cannot allocate resource memory");
8908                 return NULL;
8909         }
8910         *cache_resource = *resource;
8911         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
8912                 domain = sh->fdb_domain;
8913         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
8914                 domain = sh->rx_domain;
8915         else
8916                 domain = sh->tx_domain;
8917         for (idx = 0; idx < resource->num_of_dest; idx++) {
8918                 dest_attr[idx] = (struct mlx5dv_dr_action_dest_attr *)
8919                                  mlx5_malloc(MLX5_MEM_ZERO,
8920                                  sizeof(struct mlx5dv_dr_action_dest_attr),
8921                                  0, SOCKET_ID_ANY);
8922                 if (!dest_attr[idx]) {
8923                         rte_flow_error_set(error, ENOMEM,
8924                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8925                                            NULL,
8926                                            "cannot allocate resource memory");
8927                         goto error;
8928                 }
8929                 dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST;
8930                 sample_act = &resource->sample_act[idx];
8931                 if (sample_act->action_flags == MLX5_FLOW_ACTION_QUEUE) {
8932                         dest_attr[idx]->dest = sample_act->dr_queue_action;
8933                 } else if (sample_act->action_flags ==
8934                           (MLX5_FLOW_ACTION_PORT_ID | MLX5_FLOW_ACTION_ENCAP)) {
8935                         dest_attr[idx]->type = MLX5DV_DR_ACTION_DEST_REFORMAT;
8936                         dest_attr[idx]->dest_reformat = &dest_reformat[idx];
8937                         dest_attr[idx]->dest_reformat->reformat =
8938                                         sample_act->dr_encap_action;
8939                         dest_attr[idx]->dest_reformat->dest =
8940                                         sample_act->dr_port_id_action;
8941                 } else if (sample_act->action_flags ==
8942                            MLX5_FLOW_ACTION_PORT_ID) {
8943                         dest_attr[idx]->dest = sample_act->dr_port_id_action;
8944                 }
8945         }
8946         /* create a dest array actioin */
8947         cache_resource->action = mlx5_glue->dr_create_flow_action_dest_array
8948                                                 (domain,
8949                                                  cache_resource->num_of_dest,
8950                                                  dest_attr);
8951         if (!cache_resource->action) {
8952                 rte_flow_error_set(error, ENOMEM,
8953                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8954                                    NULL,
8955                                    "cannot create destination array action");
8956                 goto error;
8957         }
8958         cache_resource->idx = res_idx;
8959         cache_resource->dev = dev;
8960         for (idx = 0; idx < resource->num_of_dest; idx++)
8961                 mlx5_free(dest_attr[idx]);
8962         return &cache_resource->entry;
8963 error:
8964         for (idx = 0; idx < resource->num_of_dest; idx++) {
8965                 struct mlx5_flow_sub_actions_idx *act_res =
8966                                         &cache_resource->sample_idx[idx];
8967                 if (act_res->rix_hrxq &&
8968                     !mlx5_hrxq_release(dev,
8969                                 act_res->rix_hrxq))
8970                         act_res->rix_hrxq = 0;
8971                 if (act_res->rix_encap_decap &&
8972                         !flow_dv_encap_decap_resource_release(dev,
8973                                 act_res->rix_encap_decap))
8974                         act_res->rix_encap_decap = 0;
8975                 if (act_res->rix_port_id_action &&
8976                         !flow_dv_port_id_action_resource_release(dev,
8977                                 act_res->rix_port_id_action))
8978                         act_res->rix_port_id_action = 0;
8979                 if (dest_attr[idx])
8980                         mlx5_free(dest_attr[idx]);
8981         }
8982
8983         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DEST_ARRAY], res_idx);
8984         return NULL;
8985 }
8986
8987 /**
8988  * Find existing destination array resource or create and register a new one.
8989  *
8990  * @param[in, out] dev
8991  *   Pointer to rte_eth_dev structure.
8992  * @param[in] resource
8993  *   Pointer to destination array resource.
8994  * @parm[in, out] dev_flow
8995  *   Pointer to the dev_flow.
8996  * @param[out] error
8997  *   pointer to error structure.
8998  *
8999  * @return
9000  *   0 on success otherwise -errno and errno is set.
9001  */
9002 static int
9003 flow_dv_dest_array_resource_register(struct rte_eth_dev *dev,
9004                          struct mlx5_flow_dv_dest_array_resource *resource,
9005                          struct mlx5_flow *dev_flow,
9006                          struct rte_flow_error *error)
9007 {
9008         struct mlx5_flow_dv_dest_array_resource *cache_resource;
9009         struct mlx5_priv *priv = dev->data->dev_private;
9010         struct mlx5_cache_entry *entry;
9011         struct mlx5_flow_cb_ctx ctx = {
9012                 .dev = dev,
9013                 .error = error,
9014                 .data = resource,
9015         };
9016
9017         entry = mlx5_cache_register(&priv->sh->dest_array_list, &ctx);
9018         if (!entry)
9019                 return -rte_errno;
9020         cache_resource = container_of(entry, typeof(*cache_resource), entry);
9021         dev_flow->handle->dvh.rix_dest_array = cache_resource->idx;
9022         dev_flow->dv.dest_array_res = cache_resource;
9023         return 0;
9024 }
9025
9026 /**
9027  * Convert Sample action to DV specification.
9028  *
9029  * @param[in] dev
9030  *   Pointer to rte_eth_dev structure.
9031  * @param[in] action
9032  *   Pointer to action structure.
9033  * @param[in, out] dev_flow
9034  *   Pointer to the mlx5_flow.
9035  * @param[in] attr
9036  *   Pointer to the flow attributes.
9037  * @param[in, out] num_of_dest
9038  *   Pointer to the num of destination.
9039  * @param[in, out] sample_actions
9040  *   Pointer to sample actions list.
9041  * @param[in, out] res
9042  *   Pointer to sample resource.
9043  * @param[out] error
9044  *   Pointer to the error structure.
9045  *
9046  * @return
9047  *   0 on success, a negative errno value otherwise and rte_errno is set.
9048  */
9049 static int
9050 flow_dv_translate_action_sample(struct rte_eth_dev *dev,
9051                                 const struct rte_flow_action *action,
9052                                 struct mlx5_flow *dev_flow,
9053                                 const struct rte_flow_attr *attr,
9054                                 uint32_t *num_of_dest,
9055                                 void **sample_actions,
9056                                 struct mlx5_flow_dv_sample_resource *res,
9057                                 struct rte_flow_error *error)
9058 {
9059         struct mlx5_priv *priv = dev->data->dev_private;
9060         const struct rte_flow_action_sample *sample_action;
9061         const struct rte_flow_action *sub_actions;
9062         const struct rte_flow_action_queue *queue;
9063         struct mlx5_flow_sub_actions_list *sample_act;
9064         struct mlx5_flow_sub_actions_idx *sample_idx;
9065         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9066         struct mlx5_flow_rss_desc *rss_desc;
9067         uint64_t action_flags = 0;
9068
9069         MLX5_ASSERT(wks);
9070         rss_desc = &wks->rss_desc;
9071         sample_act = &res->sample_act;
9072         sample_idx = &res->sample_idx;
9073         sample_action = (const struct rte_flow_action_sample *)action->conf;
9074         res->ratio = sample_action->ratio;
9075         sub_actions = sample_action->actions;
9076         for (; sub_actions->type != RTE_FLOW_ACTION_TYPE_END; sub_actions++) {
9077                 int type = sub_actions->type;
9078                 uint32_t pre_rix = 0;
9079                 void *pre_r;
9080                 switch (type) {
9081                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9082                 {
9083                         struct mlx5_hrxq *hrxq;
9084                         uint32_t hrxq_idx;
9085
9086                         queue = sub_actions->conf;
9087                         rss_desc->queue_num = 1;
9088                         rss_desc->queue[0] = queue->index;
9089                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9090                                                     rss_desc, &hrxq_idx);
9091                         if (!hrxq)
9092                                 return rte_flow_error_set
9093                                         (error, rte_errno,
9094                                          RTE_FLOW_ERROR_TYPE_ACTION,
9095                                          NULL,
9096                                          "cannot create fate queue");
9097                         sample_act->dr_queue_action = hrxq->action;
9098                         sample_idx->rix_hrxq = hrxq_idx;
9099                         sample_actions[sample_act->actions_num++] =
9100                                                 hrxq->action;
9101                         (*num_of_dest)++;
9102                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9103                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9104                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9105                         dev_flow->handle->fate_action =
9106                                         MLX5_FLOW_FATE_QUEUE;
9107                         break;
9108                 }
9109                 case RTE_FLOW_ACTION_TYPE_MARK:
9110                 {
9111                         uint32_t tag_be = mlx5_flow_mark_set
9112                                 (((const struct rte_flow_action_mark *)
9113                                 (sub_actions->conf))->id);
9114
9115                         dev_flow->handle->mark = 1;
9116                         pre_rix = dev_flow->handle->dvh.rix_tag;
9117                         /* Save the mark resource before sample */
9118                         pre_r = dev_flow->dv.tag_resource;
9119                         if (flow_dv_tag_resource_register(dev, tag_be,
9120                                                   dev_flow, error))
9121                                 return -rte_errno;
9122                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9123                         sample_act->dr_tag_action =
9124                                 dev_flow->dv.tag_resource->action;
9125                         sample_idx->rix_tag =
9126                                 dev_flow->handle->dvh.rix_tag;
9127                         sample_actions[sample_act->actions_num++] =
9128                                                 sample_act->dr_tag_action;
9129                         /* Recover the mark resource after sample */
9130                         dev_flow->dv.tag_resource = pre_r;
9131                         dev_flow->handle->dvh.rix_tag = pre_rix;
9132                         action_flags |= MLX5_FLOW_ACTION_MARK;
9133                         break;
9134                 }
9135                 case RTE_FLOW_ACTION_TYPE_COUNT:
9136                 {
9137                         uint32_t counter;
9138
9139                         counter = flow_dv_translate_create_counter(dev,
9140                                         dev_flow, sub_actions->conf, 0);
9141                         if (!counter)
9142                                 return rte_flow_error_set
9143                                                 (error, rte_errno,
9144                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9145                                                  NULL,
9146                                                  "cannot create counter"
9147                                                  " object.");
9148                         sample_idx->cnt = counter;
9149                         sample_act->dr_cnt_action =
9150                                   (flow_dv_counter_get_by_idx(dev,
9151                                   counter, NULL))->action;
9152                         sample_actions[sample_act->actions_num++] =
9153                                                 sample_act->dr_cnt_action;
9154                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9155                         break;
9156                 }
9157                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9158                 {
9159                         struct mlx5_flow_dv_port_id_action_resource
9160                                         port_id_resource;
9161                         uint32_t port_id = 0;
9162
9163                         memset(&port_id_resource, 0, sizeof(port_id_resource));
9164                         /* Save the port id resource before sample */
9165                         pre_rix = dev_flow->handle->rix_port_id_action;
9166                         pre_r = dev_flow->dv.port_id_action;
9167                         if (flow_dv_translate_action_port_id(dev, sub_actions,
9168                                                              &port_id, error))
9169                                 return -rte_errno;
9170                         port_id_resource.port_id = port_id;
9171                         if (flow_dv_port_id_action_resource_register
9172                             (dev, &port_id_resource, dev_flow, error))
9173                                 return -rte_errno;
9174                         sample_act->dr_port_id_action =
9175                                 dev_flow->dv.port_id_action->action;
9176                         sample_idx->rix_port_id_action =
9177                                 dev_flow->handle->rix_port_id_action;
9178                         sample_actions[sample_act->actions_num++] =
9179                                                 sample_act->dr_port_id_action;
9180                         /* Recover the port id resource after sample */
9181                         dev_flow->dv.port_id_action = pre_r;
9182                         dev_flow->handle->rix_port_id_action = pre_rix;
9183                         (*num_of_dest)++;
9184                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9185                         break;
9186                 }
9187                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
9188                         /* Save the encap resource before sample */
9189                         pre_rix = dev_flow->handle->dvh.rix_encap_decap;
9190                         pre_r = dev_flow->dv.encap_decap;
9191                         if (flow_dv_create_action_l2_encap(dev, sub_actions,
9192                                                            dev_flow,
9193                                                            attr->transfer,
9194                                                            error))
9195                                 return -rte_errno;
9196                         sample_act->dr_encap_action =
9197                                 dev_flow->dv.encap_decap->action;
9198                         sample_idx->rix_encap_decap =
9199                                 dev_flow->handle->dvh.rix_encap_decap;
9200                         sample_actions[sample_act->actions_num++] =
9201                                                 sample_act->dr_encap_action;
9202                         /* Recover the encap resource after sample */
9203                         dev_flow->dv.encap_decap = pre_r;
9204                         dev_flow->handle->dvh.rix_encap_decap = pre_rix;
9205                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9206                         break;
9207                 default:
9208                         return rte_flow_error_set(error, EINVAL,
9209                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9210                                 NULL,
9211                                 "Not support for sampler action");
9212                 }
9213         }
9214         sample_act->action_flags = action_flags;
9215         res->ft_id = dev_flow->dv.group;
9216         if (attr->transfer) {
9217                 union {
9218                         uint32_t action_in[MLX5_ST_SZ_DW(set_action_in)];
9219                         uint64_t set_action;
9220                 } action_ctx = { .set_action = 0 };
9221
9222                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9223                 MLX5_SET(set_action_in, action_ctx.action_in, action_type,
9224                          MLX5_MODIFICATION_TYPE_SET);
9225                 MLX5_SET(set_action_in, action_ctx.action_in, field,
9226                          MLX5_MODI_META_REG_C_0);
9227                 MLX5_SET(set_action_in, action_ctx.action_in, data,
9228                          priv->vport_meta_tag);
9229                 res->set_action = action_ctx.set_action;
9230         } else if (attr->ingress) {
9231                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9232         } else {
9233                 res->ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX;
9234         }
9235         return 0;
9236 }
9237
9238 /**
9239  * Convert Sample action to DV specification.
9240  *
9241  * @param[in] dev
9242  *   Pointer to rte_eth_dev structure.
9243  * @param[in, out] dev_flow
9244  *   Pointer to the mlx5_flow.
9245  * @param[in] num_of_dest
9246  *   The num of destination.
9247  * @param[in, out] res
9248  *   Pointer to sample resource.
9249  * @param[in, out] mdest_res
9250  *   Pointer to destination array resource.
9251  * @param[in] sample_actions
9252  *   Pointer to sample path actions list.
9253  * @param[in] action_flags
9254  *   Holds the actions detected until now.
9255  * @param[out] error
9256  *   Pointer to the error structure.
9257  *
9258  * @return
9259  *   0 on success, a negative errno value otherwise and rte_errno is set.
9260  */
9261 static int
9262 flow_dv_create_action_sample(struct rte_eth_dev *dev,
9263                              struct mlx5_flow *dev_flow,
9264                              uint32_t num_of_dest,
9265                              struct mlx5_flow_dv_sample_resource *res,
9266                              struct mlx5_flow_dv_dest_array_resource *mdest_res,
9267                              void **sample_actions,
9268                              uint64_t action_flags,
9269                              struct rte_flow_error *error)
9270 {
9271         /* update normal path action resource into last index of array */
9272         uint32_t dest_index = MLX5_MAX_DEST_NUM - 1;
9273         struct mlx5_flow_sub_actions_list *sample_act =
9274                                         &mdest_res->sample_act[dest_index];
9275         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9276         struct mlx5_flow_rss_desc *rss_desc;
9277         uint32_t normal_idx = 0;
9278         struct mlx5_hrxq *hrxq;
9279         uint32_t hrxq_idx;
9280
9281         MLX5_ASSERT(wks);
9282         rss_desc = &wks->rss_desc;
9283         if (num_of_dest > 1) {
9284                 if (sample_act->action_flags & MLX5_FLOW_ACTION_QUEUE) {
9285                         /* Handle QP action for mirroring */
9286                         hrxq = flow_dv_hrxq_prepare(dev, dev_flow,
9287                                                     rss_desc, &hrxq_idx);
9288                         if (!hrxq)
9289                                 return rte_flow_error_set
9290                                      (error, rte_errno,
9291                                       RTE_FLOW_ERROR_TYPE_ACTION,
9292                                       NULL,
9293                                       "cannot create rx queue");
9294                         normal_idx++;
9295                         mdest_res->sample_idx[dest_index].rix_hrxq = hrxq_idx;
9296                         sample_act->dr_queue_action = hrxq->action;
9297                         if (action_flags & MLX5_FLOW_ACTION_MARK)
9298                                 dev_flow->handle->rix_hrxq = hrxq_idx;
9299                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9300                 }
9301                 if (sample_act->action_flags & MLX5_FLOW_ACTION_ENCAP) {
9302                         normal_idx++;
9303                         mdest_res->sample_idx[dest_index].rix_encap_decap =
9304                                 dev_flow->handle->dvh.rix_encap_decap;
9305                         sample_act->dr_encap_action =
9306                                 dev_flow->dv.encap_decap->action;
9307                 }
9308                 if (sample_act->action_flags & MLX5_FLOW_ACTION_PORT_ID) {
9309                         normal_idx++;
9310                         mdest_res->sample_idx[dest_index].rix_port_id_action =
9311                                 dev_flow->handle->rix_port_id_action;
9312                         sample_act->dr_port_id_action =
9313                                 dev_flow->dv.port_id_action->action;
9314                 }
9315                 sample_act->actions_num = normal_idx;
9316                 /* update sample action resource into first index of array */
9317                 mdest_res->ft_type = res->ft_type;
9318                 memcpy(&mdest_res->sample_idx[0], &res->sample_idx,
9319                                 sizeof(struct mlx5_flow_sub_actions_idx));
9320                 memcpy(&mdest_res->sample_act[0], &res->sample_act,
9321                                 sizeof(struct mlx5_flow_sub_actions_list));
9322                 mdest_res->num_of_dest = num_of_dest;
9323                 if (flow_dv_dest_array_resource_register(dev, mdest_res,
9324                                                          dev_flow, error))
9325                         return rte_flow_error_set(error, EINVAL,
9326                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9327                                                   NULL, "can't create sample "
9328                                                   "action");
9329         } else {
9330                 res->sub_actions = sample_actions;
9331                 if (flow_dv_sample_resource_register(dev, res, dev_flow, error))
9332                         return rte_flow_error_set(error, EINVAL,
9333                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9334                                                   NULL,
9335                                                   "can't create sample action");
9336         }
9337         return 0;
9338 }
9339
9340 /**
9341  * Remove an ASO age action from age actions list.
9342  *
9343  * @param[in] dev
9344  *   Pointer to the Ethernet device structure.
9345  * @param[in] age
9346  *   Pointer to the aso age action handler.
9347  */
9348 static void
9349 flow_dv_aso_age_remove_from_age(struct rte_eth_dev *dev,
9350                                 struct mlx5_aso_age_action *age)
9351 {
9352         struct mlx5_age_info *age_info;
9353         struct mlx5_age_param *age_param = &age->age_params;
9354         struct mlx5_priv *priv = dev->data->dev_private;
9355         uint16_t expected = AGE_CANDIDATE;
9356
9357         age_info = GET_PORT_AGE_INFO(priv);
9358         if (!__atomic_compare_exchange_n(&age_param->state, &expected,
9359                                          AGE_FREE, false, __ATOMIC_RELAXED,
9360                                          __ATOMIC_RELAXED)) {
9361                 /**
9362                  * We need the lock even it is age timeout,
9363                  * since age action may still in process.
9364                  */
9365                 rte_spinlock_lock(&age_info->aged_sl);
9366                 LIST_REMOVE(age, next);
9367                 rte_spinlock_unlock(&age_info->aged_sl);
9368                 __atomic_store_n(&age_param->state, AGE_FREE, __ATOMIC_RELAXED);
9369         }
9370 }
9371
9372 /**
9373  * Release an ASO age action.
9374  *
9375  * @param[in] dev
9376  *   Pointer to the Ethernet device structure.
9377  * @param[in] age_idx
9378  *   Index of ASO age action to release.
9379  * @param[in] flow
9380  *   True if the release operation is during flow destroy operation.
9381  *   False if the release operation is during action destroy operation.
9382  *
9383  * @return
9384  *   0 when age action was removed, otherwise the number of references.
9385  */
9386 static int
9387 flow_dv_aso_age_release(struct rte_eth_dev *dev, uint32_t age_idx)
9388 {
9389         struct mlx5_priv *priv = dev->data->dev_private;
9390         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9391         struct mlx5_aso_age_action *age = flow_aso_age_get_by_idx(dev, age_idx);
9392         uint32_t ret = __atomic_sub_fetch(&age->refcnt, 1, __ATOMIC_RELAXED);
9393
9394         if (!ret) {
9395                 flow_dv_aso_age_remove_from_age(dev, age);
9396                 rte_spinlock_lock(&mng->free_sl);
9397                 LIST_INSERT_HEAD(&mng->free, age, next);
9398                 rte_spinlock_unlock(&mng->free_sl);
9399         }
9400         return ret;
9401 }
9402
9403 /**
9404  * Resize the ASO age pools array by MLX5_CNT_CONTAINER_RESIZE pools.
9405  *
9406  * @param[in] dev
9407  *   Pointer to the Ethernet device structure.
9408  *
9409  * @return
9410  *   0 on success, otherwise negative errno value and rte_errno is set.
9411  */
9412 static int
9413 flow_dv_aso_age_pools_resize(struct rte_eth_dev *dev)
9414 {
9415         struct mlx5_priv *priv = dev->data->dev_private;
9416         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9417         void *old_pools = mng->pools;
9418         uint32_t resize = mng->n + MLX5_CNT_CONTAINER_RESIZE;
9419         uint32_t mem_size = sizeof(struct mlx5_aso_age_pool *) * resize;
9420         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
9421
9422         if (!pools) {
9423                 rte_errno = ENOMEM;
9424                 return -ENOMEM;
9425         }
9426         if (old_pools) {
9427                 memcpy(pools, old_pools,
9428                        mng->n * sizeof(struct mlx5_flow_counter_pool *));
9429                 mlx5_free(old_pools);
9430         } else {
9431                 /* First ASO flow hit allocation - starting ASO data-path. */
9432                 int ret = mlx5_aso_queue_start(priv->sh);
9433
9434                 if (ret) {
9435                         mlx5_free(pools);
9436                         return ret;
9437                 }
9438         }
9439         mng->n = resize;
9440         mng->pools = pools;
9441         return 0;
9442 }
9443
9444 /**
9445  * Create and initialize a new ASO aging pool.
9446  *
9447  * @param[in] dev
9448  *   Pointer to the Ethernet device structure.
9449  * @param[out] age_free
9450  *   Where to put the pointer of a new age action.
9451  *
9452  * @return
9453  *   The age actions pool pointer and @p age_free is set on success,
9454  *   NULL otherwise and rte_errno is set.
9455  */
9456 static struct mlx5_aso_age_pool *
9457 flow_dv_age_pool_create(struct rte_eth_dev *dev,
9458                         struct mlx5_aso_age_action **age_free)
9459 {
9460         struct mlx5_priv *priv = dev->data->dev_private;
9461         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9462         struct mlx5_aso_age_pool *pool = NULL;
9463         struct mlx5_devx_obj *obj = NULL;
9464         uint32_t i;
9465
9466         obj = mlx5_devx_cmd_create_flow_hit_aso_obj(priv->sh->ctx,
9467                                                     priv->sh->pdn);
9468         if (!obj) {
9469                 rte_errno = ENODATA;
9470                 DRV_LOG(ERR, "Failed to create flow_hit_aso_obj using DevX.");
9471                 return NULL;
9472         }
9473         pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY);
9474         if (!pool) {
9475                 claim_zero(mlx5_devx_cmd_destroy(obj));
9476                 rte_errno = ENOMEM;
9477                 return NULL;
9478         }
9479         pool->flow_hit_aso_obj = obj;
9480         pool->time_of_last_age_check = MLX5_CURR_TIME_SEC;
9481         rte_spinlock_lock(&mng->resize_sl);
9482         pool->index = mng->next;
9483         /* Resize pools array if there is no room for the new pool in it. */
9484         if (pool->index == mng->n && flow_dv_aso_age_pools_resize(dev)) {
9485                 claim_zero(mlx5_devx_cmd_destroy(obj));
9486                 mlx5_free(pool);
9487                 rte_spinlock_unlock(&mng->resize_sl);
9488                 return NULL;
9489         }
9490         mng->pools[pool->index] = pool;
9491         mng->next++;
9492         rte_spinlock_unlock(&mng->resize_sl);
9493         /* Assign the first action in the new pool, the rest go to free list. */
9494         *age_free = &pool->actions[0];
9495         for (i = 1; i < MLX5_ASO_AGE_ACTIONS_PER_POOL; i++) {
9496                 pool->actions[i].offset = i;
9497                 LIST_INSERT_HEAD(&mng->free, &pool->actions[i], next);
9498         }
9499         return pool;
9500 }
9501
9502 /**
9503  * Allocate a ASO aging bit.
9504  *
9505  * @param[in] dev
9506  *   Pointer to the Ethernet device structure.
9507  * @param[out] error
9508  *   Pointer to the error structure.
9509  *
9510  * @return
9511  *   Index to ASO age action on success, 0 otherwise and rte_errno is set.
9512  */
9513 static uint32_t
9514 flow_dv_aso_age_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error)
9515 {
9516         struct mlx5_priv *priv = dev->data->dev_private;
9517         const struct mlx5_aso_age_pool *pool;
9518         struct mlx5_aso_age_action *age_free = NULL;
9519         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
9520
9521         MLX5_ASSERT(mng);
9522         /* Try to get the next free age action bit. */
9523         rte_spinlock_lock(&mng->free_sl);
9524         age_free = LIST_FIRST(&mng->free);
9525         if (age_free) {
9526                 LIST_REMOVE(age_free, next);
9527         } else if (!flow_dv_age_pool_create(dev, &age_free)) {
9528                 rte_spinlock_unlock(&mng->free_sl);
9529                 rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION,
9530                                    NULL, "failed to create ASO age pool");
9531                 return 0; /* 0 is an error. */
9532         }
9533         rte_spinlock_unlock(&mng->free_sl);
9534         pool = container_of
9535           ((const struct mlx5_aso_age_action (*)[MLX5_ASO_AGE_ACTIONS_PER_POOL])
9536                   (age_free - age_free->offset), const struct mlx5_aso_age_pool,
9537                                                                        actions);
9538         if (!age_free->dr_action) {
9539                 int reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_FLOW_HIT, 0,
9540                                                  error);
9541
9542                 if (reg_c < 0) {
9543                         rte_flow_error_set(error, rte_errno,
9544                                            RTE_FLOW_ERROR_TYPE_ACTION,
9545                                            NULL, "failed to get reg_c "
9546                                            "for ASO flow hit");
9547                         return 0; /* 0 is an error. */
9548                 }
9549 #ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO
9550                 age_free->dr_action = mlx5_glue->dv_create_flow_action_aso
9551                                 (priv->sh->rx_domain,
9552                                  pool->flow_hit_aso_obj->obj, age_free->offset,
9553                                  MLX5DV_DR_ACTION_FLAGS_ASO_FIRST_HIT_SET,
9554                                  (reg_c - REG_C_0));
9555 #endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */
9556                 if (!age_free->dr_action) {
9557                         rte_errno = errno;
9558                         rte_spinlock_lock(&mng->free_sl);
9559                         LIST_INSERT_HEAD(&mng->free, age_free, next);
9560                         rte_spinlock_unlock(&mng->free_sl);
9561                         rte_flow_error_set(error, rte_errno,
9562                                            RTE_FLOW_ERROR_TYPE_ACTION,
9563                                            NULL, "failed to create ASO "
9564                                            "flow hit action");
9565                         return 0; /* 0 is an error. */
9566                 }
9567         }
9568         __atomic_store_n(&age_free->refcnt, 1, __ATOMIC_RELAXED);
9569         return pool->index | ((age_free->offset + 1) << 16);
9570 }
9571
9572 /**
9573  * Create a age action using ASO mechanism.
9574  *
9575  * @param[in] dev
9576  *   Pointer to rte_eth_dev structure.
9577  * @param[in] age
9578  *   Pointer to the aging action configuration.
9579  * @param[out] error
9580  *   Pointer to the error structure.
9581  *
9582  * @return
9583  *   Index to flow counter on success, 0 otherwise.
9584  */
9585 static uint32_t
9586 flow_dv_translate_create_aso_age(struct rte_eth_dev *dev,
9587                                  const struct rte_flow_action_age *age,
9588                                  struct rte_flow_error *error)
9589 {
9590         uint32_t age_idx = 0;
9591         struct mlx5_aso_age_action *aso_age;
9592
9593         age_idx = flow_dv_aso_age_alloc(dev, error);
9594         if (!age_idx)
9595                 return 0;
9596         aso_age = flow_aso_age_get_by_idx(dev, age_idx);
9597         aso_age->age_params.context = age->context;
9598         aso_age->age_params.timeout = age->timeout;
9599         aso_age->age_params.port_id = dev->data->port_id;
9600         __atomic_store_n(&aso_age->age_params.sec_since_last_hit, 0,
9601                          __ATOMIC_RELAXED);
9602         __atomic_store_n(&aso_age->age_params.state, AGE_CANDIDATE,
9603                          __ATOMIC_RELAXED);
9604         return age_idx;
9605 }
9606
9607 /**
9608  * Fill the flow with DV spec, lock free
9609  * (mutex should be acquired by caller).
9610  *
9611  * @param[in] dev
9612  *   Pointer to rte_eth_dev structure.
9613  * @param[in, out] dev_flow
9614  *   Pointer to the sub flow.
9615  * @param[in] attr
9616  *   Pointer to the flow attributes.
9617  * @param[in] items
9618  *   Pointer to the list of items.
9619  * @param[in] actions
9620  *   Pointer to the list of actions.
9621  * @param[out] error
9622  *   Pointer to the error structure.
9623  *
9624  * @return
9625  *   0 on success, a negative errno value otherwise and rte_errno is set.
9626  */
9627 static int
9628 flow_dv_translate(struct rte_eth_dev *dev,
9629                   struct mlx5_flow *dev_flow,
9630                   const struct rte_flow_attr *attr,
9631                   const struct rte_flow_item items[],
9632                   const struct rte_flow_action actions[],
9633                   struct rte_flow_error *error)
9634 {
9635         struct mlx5_priv *priv = dev->data->dev_private;
9636         struct mlx5_dev_config *dev_conf = &priv->config;
9637         struct rte_flow *flow = dev_flow->flow;
9638         struct mlx5_flow_handle *handle = dev_flow->handle;
9639         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
9640         struct mlx5_flow_rss_desc *rss_desc;
9641         uint64_t item_flags = 0;
9642         uint64_t last_item = 0;
9643         uint64_t action_flags = 0;
9644         uint64_t priority = attr->priority;
9645         struct mlx5_flow_dv_matcher matcher = {
9646                 .mask = {
9647                         .size = sizeof(matcher.mask.buf) -
9648                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
9649                 },
9650         };
9651         int actions_n = 0;
9652         bool actions_end = false;
9653         union {
9654                 struct mlx5_flow_dv_modify_hdr_resource res;
9655                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
9656                             sizeof(struct mlx5_modification_cmd) *
9657                             (MLX5_MAX_MODIFY_NUM + 1)];
9658         } mhdr_dummy;
9659         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
9660         const struct rte_flow_action_count *count = NULL;
9661         const struct rte_flow_action_age *age = NULL;
9662         union flow_dv_attr flow_attr = { .attr = 0 };
9663         uint32_t tag_be;
9664         union mlx5_flow_tbl_key tbl_key;
9665         uint32_t modify_action_position = UINT32_MAX;
9666         void *match_mask = matcher.mask.buf;
9667         void *match_value = dev_flow->dv.value.buf;
9668         uint8_t next_protocol = 0xff;
9669         struct rte_vlan_hdr vlan = { 0 };
9670         struct mlx5_flow_dv_dest_array_resource mdest_res;
9671         struct mlx5_flow_dv_sample_resource sample_res;
9672         void *sample_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
9673         struct mlx5_flow_sub_actions_list *sample_act;
9674         uint32_t sample_act_pos = UINT32_MAX;
9675         uint32_t num_of_dest = 0;
9676         int tmp_actions_n = 0;
9677         uint32_t table;
9678         int ret = 0;
9679         const struct mlx5_flow_tunnel *tunnel;
9680         struct flow_grp_info grp_info = {
9681                 .external = !!dev_flow->external,
9682                 .transfer = !!attr->transfer,
9683                 .fdb_def_rule = !!priv->fdb_def_rule,
9684                 .skip_scale = !!dev_flow->skip_scale,
9685         };
9686
9687         if (!wks)
9688                 return rte_flow_error_set(error, ENOMEM,
9689                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9690                                           NULL,
9691                                           "failed to push flow workspace");
9692         rss_desc = &wks->rss_desc;
9693         memset(&mdest_res, 0, sizeof(struct mlx5_flow_dv_dest_array_resource));
9694         memset(&sample_res, 0, sizeof(struct mlx5_flow_dv_sample_resource));
9695         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9696                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9697         /* update normal path action resource into last index of array */
9698         sample_act = &mdest_res.sample_act[MLX5_MAX_DEST_NUM - 1];
9699         tunnel = is_flow_tunnel_match_rule(dev, attr, items, actions) ?
9700                  flow_items_to_tunnel(items) :
9701                  is_flow_tunnel_steer_rule(dev, attr, items, actions) ?
9702                  flow_actions_to_tunnel(actions) :
9703                  dev_flow->tunnel ? dev_flow->tunnel : NULL;
9704         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
9705                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
9706         grp_info.std_tbl_fix = tunnel_use_standard_attr_group_translate
9707                                 (dev, tunnel, attr, items, actions);
9708         ret = mlx5_flow_group_to_table(dev, tunnel, attr->group, &table,
9709                                        &grp_info, error);
9710         if (ret)
9711                 return ret;
9712         dev_flow->dv.group = table;
9713         if (attr->transfer)
9714                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
9715         if (priority == MLX5_FLOW_PRIO_RSVD)
9716                 priority = dev_conf->flow_prio - 1;
9717         /* number of actions must be set to 0 in case of dirty stack. */
9718         mhdr_res->actions_num = 0;
9719         if (is_flow_tunnel_match_rule(dev, attr, items, actions)) {
9720                 /*
9721                  * do not add decap action if match rule drops packet
9722                  * HW rejects rules with decap & drop
9723                  *
9724                  * if tunnel match rule was inserted before matching tunnel set
9725                  * rule flow table used in the match rule must be registered.
9726                  * current implementation handles that in the
9727                  * flow_dv_match_register() at the function end.
9728                  */
9729                 bool add_decap = true;
9730                 const struct rte_flow_action *ptr = actions;
9731
9732                 for (; ptr->type != RTE_FLOW_ACTION_TYPE_END; ptr++) {
9733                         if (ptr->type == RTE_FLOW_ACTION_TYPE_DROP) {
9734                                 add_decap = false;
9735                                 break;
9736                         }
9737                 }
9738                 if (add_decap) {
9739                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9740                                                            attr->transfer,
9741                                                            error))
9742                                 return -rte_errno;
9743                         dev_flow->dv.actions[actions_n++] =
9744                                         dev_flow->dv.encap_decap->action;
9745                         action_flags |= MLX5_FLOW_ACTION_DECAP;
9746                 }
9747         }
9748         for (; !actions_end ; actions++) {
9749                 const struct rte_flow_action_queue *queue;
9750                 const struct rte_flow_action_rss *rss;
9751                 const struct rte_flow_action *action = actions;
9752                 const uint8_t *rss_key;
9753                 const struct rte_flow_action_meter *mtr;
9754                 struct mlx5_flow_tbl_resource *tbl;
9755                 struct mlx5_aso_age_action *age_act;
9756                 uint32_t port_id = 0;
9757                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
9758                 int action_type = actions->type;
9759                 const struct rte_flow_action *found_action = NULL;
9760                 struct mlx5_flow_meter *fm = NULL;
9761                 uint32_t jump_group = 0;
9762
9763                 if (!mlx5_flow_os_action_supported(action_type))
9764                         return rte_flow_error_set(error, ENOTSUP,
9765                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9766                                                   actions,
9767                                                   "action not supported");
9768                 switch (action_type) {
9769                 case MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET:
9770                         action_flags |= MLX5_FLOW_ACTION_TUNNEL_SET;
9771                         break;
9772                 case RTE_FLOW_ACTION_TYPE_VOID:
9773                         break;
9774                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
9775                         if (flow_dv_translate_action_port_id(dev, action,
9776                                                              &port_id, error))
9777                                 return -rte_errno;
9778                         port_id_resource.port_id = port_id;
9779                         MLX5_ASSERT(!handle->rix_port_id_action);
9780                         if (flow_dv_port_id_action_resource_register
9781                             (dev, &port_id_resource, dev_flow, error))
9782                                 return -rte_errno;
9783                         dev_flow->dv.actions[actions_n++] =
9784                                         dev_flow->dv.port_id_action->action;
9785                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9786                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
9787                         sample_act->action_flags |= MLX5_FLOW_ACTION_PORT_ID;
9788                         num_of_dest++;
9789                         break;
9790                 case RTE_FLOW_ACTION_TYPE_FLAG:
9791                         action_flags |= MLX5_FLOW_ACTION_FLAG;
9792                         dev_flow->handle->mark = 1;
9793                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9794                                 struct rte_flow_action_mark mark = {
9795                                         .id = MLX5_FLOW_MARK_DEFAULT,
9796                                 };
9797
9798                                 if (flow_dv_convert_action_mark(dev, &mark,
9799                                                                 mhdr_res,
9800                                                                 error))
9801                                         return -rte_errno;
9802                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9803                                 break;
9804                         }
9805                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
9806                         /*
9807                          * Only one FLAG or MARK is supported per device flow
9808                          * right now. So the pointer to the tag resource must be
9809                          * zero before the register process.
9810                          */
9811                         MLX5_ASSERT(!handle->dvh.rix_tag);
9812                         if (flow_dv_tag_resource_register(dev, tag_be,
9813                                                           dev_flow, error))
9814                                 return -rte_errno;
9815                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9816                         dev_flow->dv.actions[actions_n++] =
9817                                         dev_flow->dv.tag_resource->action;
9818                         break;
9819                 case RTE_FLOW_ACTION_TYPE_MARK:
9820                         action_flags |= MLX5_FLOW_ACTION_MARK;
9821                         dev_flow->handle->mark = 1;
9822                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
9823                                 const struct rte_flow_action_mark *mark =
9824                                         (const struct rte_flow_action_mark *)
9825                                                 actions->conf;
9826
9827                                 if (flow_dv_convert_action_mark(dev, mark,
9828                                                                 mhdr_res,
9829                                                                 error))
9830                                         return -rte_errno;
9831                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
9832                                 break;
9833                         }
9834                         /* Fall-through */
9835                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
9836                         /* Legacy (non-extensive) MARK action. */
9837                         tag_be = mlx5_flow_mark_set
9838                               (((const struct rte_flow_action_mark *)
9839                                (actions->conf))->id);
9840                         MLX5_ASSERT(!handle->dvh.rix_tag);
9841                         if (flow_dv_tag_resource_register(dev, tag_be,
9842                                                           dev_flow, error))
9843                                 return -rte_errno;
9844                         MLX5_ASSERT(dev_flow->dv.tag_resource);
9845                         dev_flow->dv.actions[actions_n++] =
9846                                         dev_flow->dv.tag_resource->action;
9847                         break;
9848                 case RTE_FLOW_ACTION_TYPE_SET_META:
9849                         if (flow_dv_convert_action_set_meta
9850                                 (dev, mhdr_res, attr,
9851                                  (const struct rte_flow_action_set_meta *)
9852                                   actions->conf, error))
9853                                 return -rte_errno;
9854                         action_flags |= MLX5_FLOW_ACTION_SET_META;
9855                         break;
9856                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
9857                         if (flow_dv_convert_action_set_tag
9858                                 (dev, mhdr_res,
9859                                  (const struct rte_flow_action_set_tag *)
9860                                   actions->conf, error))
9861                                 return -rte_errno;
9862                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
9863                         break;
9864                 case RTE_FLOW_ACTION_TYPE_DROP:
9865                         action_flags |= MLX5_FLOW_ACTION_DROP;
9866                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
9867                         break;
9868                 case RTE_FLOW_ACTION_TYPE_QUEUE:
9869                         queue = actions->conf;
9870                         rss_desc->queue_num = 1;
9871                         rss_desc->queue[0] = queue->index;
9872                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
9873                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
9874                         sample_act->action_flags |= MLX5_FLOW_ACTION_QUEUE;
9875                         num_of_dest++;
9876                         break;
9877                 case RTE_FLOW_ACTION_TYPE_RSS:
9878                         rss = actions->conf;
9879                         memcpy(rss_desc->queue, rss->queue,
9880                                rss->queue_num * sizeof(uint16_t));
9881                         rss_desc->queue_num = rss->queue_num;
9882                         /* NULL RSS key indicates default RSS key. */
9883                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
9884                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
9885                         /*
9886                          * rss->level and rss.types should be set in advance
9887                          * when expanding items for RSS.
9888                          */
9889                         action_flags |= MLX5_FLOW_ACTION_RSS;
9890                         dev_flow->handle->fate_action = rss_desc->shared_rss ?
9891                                 MLX5_FLOW_FATE_SHARED_RSS :
9892                                 MLX5_FLOW_FATE_QUEUE;
9893                         break;
9894                 case MLX5_RTE_FLOW_ACTION_TYPE_AGE:
9895                         flow->age = (uint32_t)(uintptr_t)(action->conf);
9896                         age_act = flow_aso_age_get_by_idx(dev, flow->age);
9897                         __atomic_fetch_add(&age_act->refcnt, 1,
9898                                            __ATOMIC_RELAXED);
9899                         dev_flow->dv.actions[actions_n++] = age_act->dr_action;
9900                         action_flags |= MLX5_FLOW_ACTION_AGE;
9901                         break;
9902                 case RTE_FLOW_ACTION_TYPE_AGE:
9903                         if (priv->sh->flow_hit_aso_en && attr->group) {
9904                                 flow->age = flow_dv_translate_create_aso_age
9905                                                 (dev, action->conf, error);
9906                                 if (!flow->age)
9907                                         return rte_flow_error_set
9908                                                 (error, rte_errno,
9909                                                  RTE_FLOW_ERROR_TYPE_ACTION,
9910                                                  NULL,
9911                                                  "can't create ASO age action");
9912                                 dev_flow->dv.actions[actions_n++] =
9913                                           (flow_aso_age_get_by_idx
9914                                                 (dev, flow->age))->dr_action;
9915                                 action_flags |= MLX5_FLOW_ACTION_AGE;
9916                                 break;
9917                         }
9918                         /* Fall-through */
9919                 case RTE_FLOW_ACTION_TYPE_COUNT:
9920                         if (!dev_conf->devx) {
9921                                 return rte_flow_error_set
9922                                               (error, ENOTSUP,
9923                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9924                                                NULL,
9925                                                "count action not supported");
9926                         }
9927                         /* Save information first, will apply later. */
9928                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
9929                                 count = action->conf;
9930                         else
9931                                 age = action->conf;
9932                         action_flags |= MLX5_FLOW_ACTION_COUNT;
9933                         break;
9934                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
9935                         dev_flow->dv.actions[actions_n++] =
9936                                                 priv->sh->pop_vlan_action;
9937                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
9938                         break;
9939                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
9940                         if (!(action_flags &
9941                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
9942                                 flow_dev_get_vlan_info_from_items(items, &vlan);
9943                         vlan.eth_proto = rte_be_to_cpu_16
9944                              ((((const struct rte_flow_action_of_push_vlan *)
9945                                                    actions->conf)->ethertype));
9946                         found_action = mlx5_flow_find_action
9947                                         (actions + 1,
9948                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
9949                         if (found_action)
9950                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9951                         found_action = mlx5_flow_find_action
9952                                         (actions + 1,
9953                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
9954                         if (found_action)
9955                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
9956                         if (flow_dv_create_action_push_vlan
9957                                             (dev, attr, &vlan, dev_flow, error))
9958                                 return -rte_errno;
9959                         dev_flow->dv.actions[actions_n++] =
9960                                         dev_flow->dv.push_vlan_res->action;
9961                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
9962                         break;
9963                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
9964                         /* of_vlan_push action handled this action */
9965                         MLX5_ASSERT(action_flags &
9966                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
9967                         break;
9968                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
9969                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
9970                                 break;
9971                         flow_dev_get_vlan_info_from_items(items, &vlan);
9972                         mlx5_update_vlan_vid_pcp(actions, &vlan);
9973                         /* If no VLAN push - this is a modify header action */
9974                         if (flow_dv_convert_action_modify_vlan_vid
9975                                                 (mhdr_res, actions, error))
9976                                 return -rte_errno;
9977                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
9978                         break;
9979                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
9980                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
9981                         if (flow_dv_create_action_l2_encap(dev, actions,
9982                                                            dev_flow,
9983                                                            attr->transfer,
9984                                                            error))
9985                                 return -rte_errno;
9986                         dev_flow->dv.actions[actions_n++] =
9987                                         dev_flow->dv.encap_decap->action;
9988                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
9989                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
9990                                 sample_act->action_flags |=
9991                                                         MLX5_FLOW_ACTION_ENCAP;
9992                         break;
9993                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
9994                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
9995                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
9996                                                            attr->transfer,
9997                                                            error))
9998                                 return -rte_errno;
9999                         dev_flow->dv.actions[actions_n++] =
10000                                         dev_flow->dv.encap_decap->action;
10001                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10002                         break;
10003                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
10004                         /* Handle encap with preceding decap. */
10005                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
10006                                 if (flow_dv_create_action_raw_encap
10007                                         (dev, actions, dev_flow, attr, error))
10008                                         return -rte_errno;
10009                                 dev_flow->dv.actions[actions_n++] =
10010                                         dev_flow->dv.encap_decap->action;
10011                         } else {
10012                                 /* Handle encap without preceding decap. */
10013                                 if (flow_dv_create_action_l2_encap
10014                                     (dev, actions, dev_flow, attr->transfer,
10015                                      error))
10016                                         return -rte_errno;
10017                                 dev_flow->dv.actions[actions_n++] =
10018                                         dev_flow->dv.encap_decap->action;
10019                         }
10020                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
10021                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
10022                                 sample_act->action_flags |=
10023                                                         MLX5_FLOW_ACTION_ENCAP;
10024                         break;
10025                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
10026                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
10027                                 ;
10028                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
10029                                 if (flow_dv_create_action_l2_decap
10030                                     (dev, dev_flow, attr->transfer, error))
10031                                         return -rte_errno;
10032                                 dev_flow->dv.actions[actions_n++] =
10033                                         dev_flow->dv.encap_decap->action;
10034                         }
10035                         /* If decap is followed by encap, handle it at encap. */
10036                         action_flags |= MLX5_FLOW_ACTION_DECAP;
10037                         break;
10038                 case RTE_FLOW_ACTION_TYPE_JUMP:
10039                         jump_group = ((const struct rte_flow_action_jump *)
10040                                                         action->conf)->group;
10041                         grp_info.std_tbl_fix = 0;
10042                         grp_info.skip_scale = 0;
10043                         ret = mlx5_flow_group_to_table(dev, tunnel,
10044                                                        jump_group,
10045                                                        &table,
10046                                                        &grp_info, error);
10047                         if (ret)
10048                                 return ret;
10049                         tbl = flow_dv_tbl_resource_get(dev, table, attr->egress,
10050                                                        attr->transfer,
10051                                                        !!dev_flow->external,
10052                                                        tunnel, jump_group, 0,
10053                                                        error);
10054                         if (!tbl)
10055                                 return rte_flow_error_set
10056                                                 (error, errno,
10057                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10058                                                  NULL,
10059                                                  "cannot create jump action.");
10060                         if (flow_dv_jump_tbl_resource_register
10061                             (dev, tbl, dev_flow, error)) {
10062                                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
10063                                 return rte_flow_error_set
10064                                                 (error, errno,
10065                                                  RTE_FLOW_ERROR_TYPE_ACTION,
10066                                                  NULL,
10067                                                  "cannot create jump action.");
10068                         }
10069                         dev_flow->dv.actions[actions_n++] =
10070                                         dev_flow->dv.jump->action;
10071                         action_flags |= MLX5_FLOW_ACTION_JUMP;
10072                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
10073                         break;
10074                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
10075                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
10076                         if (flow_dv_convert_action_modify_mac
10077                                         (mhdr_res, actions, error))
10078                                 return -rte_errno;
10079                         action_flags |= actions->type ==
10080                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
10081                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
10082                                         MLX5_FLOW_ACTION_SET_MAC_DST;
10083                         break;
10084                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
10085                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
10086                         if (flow_dv_convert_action_modify_ipv4
10087                                         (mhdr_res, actions, error))
10088                                 return -rte_errno;
10089                         action_flags |= actions->type ==
10090                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
10091                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
10092                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
10093                         break;
10094                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
10095                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
10096                         if (flow_dv_convert_action_modify_ipv6
10097                                         (mhdr_res, actions, error))
10098                                 return -rte_errno;
10099                         action_flags |= actions->type ==
10100                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
10101                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
10102                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
10103                         break;
10104                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
10105                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
10106                         if (flow_dv_convert_action_modify_tp
10107                                         (mhdr_res, actions, items,
10108                                          &flow_attr, dev_flow, !!(action_flags &
10109                                          MLX5_FLOW_ACTION_DECAP), error))
10110                                 return -rte_errno;
10111                         action_flags |= actions->type ==
10112                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
10113                                         MLX5_FLOW_ACTION_SET_TP_SRC :
10114                                         MLX5_FLOW_ACTION_SET_TP_DST;
10115                         break;
10116                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
10117                         if (flow_dv_convert_action_modify_dec_ttl
10118                                         (mhdr_res, items, &flow_attr, dev_flow,
10119                                          !!(action_flags &
10120                                          MLX5_FLOW_ACTION_DECAP), error))
10121                                 return -rte_errno;
10122                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
10123                         break;
10124                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
10125                         if (flow_dv_convert_action_modify_ttl
10126                                         (mhdr_res, actions, items, &flow_attr,
10127                                          dev_flow, !!(action_flags &
10128                                          MLX5_FLOW_ACTION_DECAP), error))
10129                                 return -rte_errno;
10130                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
10131                         break;
10132                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
10133                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
10134                         if (flow_dv_convert_action_modify_tcp_seq
10135                                         (mhdr_res, actions, error))
10136                                 return -rte_errno;
10137                         action_flags |= actions->type ==
10138                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
10139                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
10140                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
10141                         break;
10142
10143                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
10144                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
10145                         if (flow_dv_convert_action_modify_tcp_ack
10146                                         (mhdr_res, actions, error))
10147                                 return -rte_errno;
10148                         action_flags |= actions->type ==
10149                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
10150                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
10151                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
10152                         break;
10153                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
10154                         if (flow_dv_convert_action_set_reg
10155                                         (mhdr_res, actions, error))
10156                                 return -rte_errno;
10157                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10158                         break;
10159                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
10160                         if (flow_dv_convert_action_copy_mreg
10161                                         (dev, mhdr_res, actions, error))
10162                                 return -rte_errno;
10163                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
10164                         break;
10165                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
10166                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
10167                         dev_flow->handle->fate_action =
10168                                         MLX5_FLOW_FATE_DEFAULT_MISS;
10169                         break;
10170                 case RTE_FLOW_ACTION_TYPE_METER:
10171                         mtr = actions->conf;
10172                         if (!flow->meter) {
10173                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
10174                                                             attr, error);
10175                                 if (!fm)
10176                                         return rte_flow_error_set(error,
10177                                                 rte_errno,
10178                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10179                                                 NULL,
10180                                                 "meter not found "
10181                                                 "or invalid parameters");
10182                                 flow->meter = fm->idx;
10183                         }
10184                         /* Set the meter action. */
10185                         if (!fm) {
10186                                 fm = mlx5_ipool_get(priv->sh->ipool
10187                                                 [MLX5_IPOOL_MTR], flow->meter);
10188                                 if (!fm)
10189                                         return rte_flow_error_set(error,
10190                                                 rte_errno,
10191                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10192                                                 NULL,
10193                                                 "meter not found "
10194                                                 "or invalid parameters");
10195                         }
10196                         dev_flow->dv.actions[actions_n++] =
10197                                 fm->mfts->meter_action;
10198                         action_flags |= MLX5_FLOW_ACTION_METER;
10199                         break;
10200                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
10201                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
10202                                                               actions, error))
10203                                 return -rte_errno;
10204                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
10205                         break;
10206                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
10207                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
10208                                                               actions, error))
10209                                 return -rte_errno;
10210                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
10211                         break;
10212                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
10213                         sample_act_pos = actions_n;
10214                         ret = flow_dv_translate_action_sample(dev,
10215                                                               actions,
10216                                                               dev_flow, attr,
10217                                                               &num_of_dest,
10218                                                               sample_actions,
10219                                                               &sample_res,
10220                                                               error);
10221                         if (ret < 0)
10222                                 return ret;
10223                         actions_n++;
10224                         action_flags |= MLX5_FLOW_ACTION_SAMPLE;
10225                         /* put encap action into group if work with port id */
10226                         if ((action_flags & MLX5_FLOW_ACTION_ENCAP) &&
10227                             (action_flags & MLX5_FLOW_ACTION_PORT_ID))
10228                                 sample_act->action_flags |=
10229                                                         MLX5_FLOW_ACTION_ENCAP;
10230                         break;
10231                 case RTE_FLOW_ACTION_TYPE_END:
10232                         actions_end = true;
10233                         if (mhdr_res->actions_num) {
10234                                 /* create modify action if needed. */
10235                                 if (flow_dv_modify_hdr_resource_register
10236                                         (dev, mhdr_res, dev_flow, error))
10237                                         return -rte_errno;
10238                                 dev_flow->dv.actions[modify_action_position] =
10239                                         handle->dvh.modify_hdr->action;
10240                         }
10241                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
10242                                 flow->counter =
10243                                         flow_dv_translate_create_counter(dev,
10244                                                 dev_flow, count, age);
10245
10246                                 if (!flow->counter)
10247                                         return rte_flow_error_set
10248                                                 (error, rte_errno,
10249                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10250                                                 NULL,
10251                                                 "cannot create counter"
10252                                                 " object.");
10253                                 dev_flow->dv.actions[actions_n] =
10254                                           (flow_dv_counter_get_by_idx(dev,
10255                                           flow->counter, NULL))->action;
10256                                 actions_n++;
10257                         }
10258                         if (action_flags & MLX5_FLOW_ACTION_SAMPLE) {
10259                                 ret = flow_dv_create_action_sample(dev,
10260                                                           dev_flow,
10261                                                           num_of_dest,
10262                                                           &sample_res,
10263                                                           &mdest_res,
10264                                                           sample_actions,
10265                                                           action_flags,
10266                                                           error);
10267                                 if (ret < 0)
10268                                         return rte_flow_error_set
10269                                                 (error, rte_errno,
10270                                                 RTE_FLOW_ERROR_TYPE_ACTION,
10271                                                 NULL,
10272                                                 "cannot create sample action");
10273                                 if (num_of_dest > 1) {
10274                                         dev_flow->dv.actions[sample_act_pos] =
10275                                         dev_flow->dv.dest_array_res->action;
10276                                 } else {
10277                                         dev_flow->dv.actions[sample_act_pos] =
10278                                         dev_flow->dv.sample_res->verbs_action;
10279                                 }
10280                         }
10281                         break;
10282                 default:
10283                         break;
10284                 }
10285                 if (mhdr_res->actions_num &&
10286                     modify_action_position == UINT32_MAX)
10287                         modify_action_position = actions_n++;
10288         }
10289         /*
10290          * For multiple destination (sample action with ratio=1), the encap
10291          * action and port id action will be combined into group action.
10292          * So need remove the original these actions in the flow and only
10293          * use the sample action instead of.
10294          */
10295         if (num_of_dest > 1 && sample_act->dr_port_id_action) {
10296                 int i;
10297                 void *temp_actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS] = {0};
10298
10299                 for (i = 0; i < actions_n; i++) {
10300                         if ((sample_act->dr_encap_action &&
10301                                 sample_act->dr_encap_action ==
10302                                 dev_flow->dv.actions[i]) ||
10303                                 (sample_act->dr_port_id_action &&
10304                                 sample_act->dr_port_id_action ==
10305                                 dev_flow->dv.actions[i]))
10306                                 continue;
10307                         temp_actions[tmp_actions_n++] = dev_flow->dv.actions[i];
10308                 }
10309                 memcpy((void *)dev_flow->dv.actions,
10310                                 (void *)temp_actions,
10311                                 tmp_actions_n * sizeof(void *));
10312                 actions_n = tmp_actions_n;
10313         }
10314         dev_flow->dv.actions_n = actions_n;
10315         dev_flow->act_flags = action_flags;
10316         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
10317                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
10318                 int item_type = items->type;
10319
10320                 if (!mlx5_flow_os_item_supported(item_type))
10321                         return rte_flow_error_set(error, ENOTSUP,
10322                                                   RTE_FLOW_ERROR_TYPE_ITEM,
10323                                                   NULL, "item not supported");
10324                 switch (item_type) {
10325                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
10326                         flow_dv_translate_item_port_id
10327                                 (dev, match_mask, match_value, items, attr);
10328                         last_item = MLX5_FLOW_ITEM_PORT_ID;
10329                         break;
10330                 case RTE_FLOW_ITEM_TYPE_ETH:
10331                         flow_dv_translate_item_eth(match_mask, match_value,
10332                                                    items, tunnel,
10333                                                    dev_flow->dv.group);
10334                         matcher.priority = action_flags &
10335                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
10336                                         !dev_flow->external ?
10337                                         MLX5_PRIORITY_MAP_L3 :
10338                                         MLX5_PRIORITY_MAP_L2;
10339                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
10340                                              MLX5_FLOW_LAYER_OUTER_L2;
10341                         break;
10342                 case RTE_FLOW_ITEM_TYPE_VLAN:
10343                         flow_dv_translate_item_vlan(dev_flow,
10344                                                     match_mask, match_value,
10345                                                     items, tunnel,
10346                                                     dev_flow->dv.group);
10347                         matcher.priority = MLX5_PRIORITY_MAP_L2;
10348                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
10349                                               MLX5_FLOW_LAYER_INNER_VLAN) :
10350                                              (MLX5_FLOW_LAYER_OUTER_L2 |
10351                                               MLX5_FLOW_LAYER_OUTER_VLAN);
10352                         break;
10353                 case RTE_FLOW_ITEM_TYPE_IPV4:
10354                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10355                                                   &item_flags, &tunnel);
10356                         flow_dv_translate_item_ipv4(match_mask, match_value,
10357                                                     items, tunnel,
10358                                                     dev_flow->dv.group);
10359                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10360                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
10361                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
10362                         if (items->mask != NULL &&
10363                             ((const struct rte_flow_item_ipv4 *)
10364                              items->mask)->hdr.next_proto_id) {
10365                                 next_protocol =
10366                                         ((const struct rte_flow_item_ipv4 *)
10367                                          (items->spec))->hdr.next_proto_id;
10368                                 next_protocol &=
10369                                         ((const struct rte_flow_item_ipv4 *)
10370                                          (items->mask))->hdr.next_proto_id;
10371                         } else {
10372                                 /* Reset for inner layer. */
10373                                 next_protocol = 0xff;
10374                         }
10375                         break;
10376                 case RTE_FLOW_ITEM_TYPE_IPV6:
10377                         mlx5_flow_tunnel_ip_check(items, next_protocol,
10378                                                   &item_flags, &tunnel);
10379                         flow_dv_translate_item_ipv6(match_mask, match_value,
10380                                                     items, tunnel,
10381                                                     dev_flow->dv.group);
10382                         matcher.priority = MLX5_PRIORITY_MAP_L3;
10383                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
10384                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
10385                         if (items->mask != NULL &&
10386                             ((const struct rte_flow_item_ipv6 *)
10387                              items->mask)->hdr.proto) {
10388                                 next_protocol =
10389                                         ((const struct rte_flow_item_ipv6 *)
10390                                          items->spec)->hdr.proto;
10391                                 next_protocol &=
10392                                         ((const struct rte_flow_item_ipv6 *)
10393                                          items->mask)->hdr.proto;
10394                         } else {
10395                                 /* Reset for inner layer. */
10396                                 next_protocol = 0xff;
10397                         }
10398                         break;
10399                 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
10400                         flow_dv_translate_item_ipv6_frag_ext(match_mask,
10401                                                              match_value,
10402                                                              items, tunnel);
10403                         last_item = tunnel ?
10404                                         MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT :
10405                                         MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT;
10406                         if (items->mask != NULL &&
10407                             ((const struct rte_flow_item_ipv6_frag_ext *)
10408                              items->mask)->hdr.next_header) {
10409                                 next_protocol =
10410                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10411                                  items->spec)->hdr.next_header;
10412                                 next_protocol &=
10413                                 ((const struct rte_flow_item_ipv6_frag_ext *)
10414                                  items->mask)->hdr.next_header;
10415                         } else {
10416                                 /* Reset for inner layer. */
10417                                 next_protocol = 0xff;
10418                         }
10419                         break;
10420                 case RTE_FLOW_ITEM_TYPE_TCP:
10421                         flow_dv_translate_item_tcp(match_mask, match_value,
10422                                                    items, tunnel);
10423                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10424                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
10425                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
10426                         break;
10427                 case RTE_FLOW_ITEM_TYPE_UDP:
10428                         flow_dv_translate_item_udp(match_mask, match_value,
10429                                                    items, tunnel);
10430                         matcher.priority = MLX5_PRIORITY_MAP_L4;
10431                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
10432                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
10433                         break;
10434                 case RTE_FLOW_ITEM_TYPE_GRE:
10435                         flow_dv_translate_item_gre(match_mask, match_value,
10436                                                    items, tunnel);
10437                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10438                         last_item = MLX5_FLOW_LAYER_GRE;
10439                         break;
10440                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
10441                         flow_dv_translate_item_gre_key(match_mask,
10442                                                        match_value, items);
10443                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
10444                         break;
10445                 case RTE_FLOW_ITEM_TYPE_NVGRE:
10446                         flow_dv_translate_item_nvgre(match_mask, match_value,
10447                                                      items, tunnel);
10448                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10449                         last_item = MLX5_FLOW_LAYER_GRE;
10450                         break;
10451                 case RTE_FLOW_ITEM_TYPE_VXLAN:
10452                         flow_dv_translate_item_vxlan(match_mask, match_value,
10453                                                      items, tunnel);
10454                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10455                         last_item = MLX5_FLOW_LAYER_VXLAN;
10456                         break;
10457                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
10458                         flow_dv_translate_item_vxlan_gpe(match_mask,
10459                                                          match_value, items,
10460                                                          tunnel);
10461                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10462                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
10463                         break;
10464                 case RTE_FLOW_ITEM_TYPE_GENEVE:
10465                         flow_dv_translate_item_geneve(match_mask, match_value,
10466                                                       items, tunnel);
10467                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10468                         last_item = MLX5_FLOW_LAYER_GENEVE;
10469                         break;
10470                 case RTE_FLOW_ITEM_TYPE_MPLS:
10471                         flow_dv_translate_item_mpls(match_mask, match_value,
10472                                                     items, last_item, tunnel);
10473                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10474                         last_item = MLX5_FLOW_LAYER_MPLS;
10475                         break;
10476                 case RTE_FLOW_ITEM_TYPE_MARK:
10477                         flow_dv_translate_item_mark(dev, match_mask,
10478                                                     match_value, items);
10479                         last_item = MLX5_FLOW_ITEM_MARK;
10480                         break;
10481                 case RTE_FLOW_ITEM_TYPE_META:
10482                         flow_dv_translate_item_meta(dev, match_mask,
10483                                                     match_value, attr, items);
10484                         last_item = MLX5_FLOW_ITEM_METADATA;
10485                         break;
10486                 case RTE_FLOW_ITEM_TYPE_ICMP:
10487                         flow_dv_translate_item_icmp(match_mask, match_value,
10488                                                     items, tunnel);
10489                         last_item = MLX5_FLOW_LAYER_ICMP;
10490                         break;
10491                 case RTE_FLOW_ITEM_TYPE_ICMP6:
10492                         flow_dv_translate_item_icmp6(match_mask, match_value,
10493                                                       items, tunnel);
10494                         last_item = MLX5_FLOW_LAYER_ICMP6;
10495                         break;
10496                 case RTE_FLOW_ITEM_TYPE_TAG:
10497                         flow_dv_translate_item_tag(dev, match_mask,
10498                                                    match_value, items);
10499                         last_item = MLX5_FLOW_ITEM_TAG;
10500                         break;
10501                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
10502                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
10503                                                         match_value, items);
10504                         last_item = MLX5_FLOW_ITEM_TAG;
10505                         break;
10506                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
10507                         flow_dv_translate_item_tx_queue(dev, match_mask,
10508                                                         match_value,
10509                                                         items);
10510                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
10511                         break;
10512                 case RTE_FLOW_ITEM_TYPE_GTP:
10513                         flow_dv_translate_item_gtp(match_mask, match_value,
10514                                                    items, tunnel);
10515                         matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
10516                         last_item = MLX5_FLOW_LAYER_GTP;
10517                         break;
10518                 case RTE_FLOW_ITEM_TYPE_ECPRI:
10519                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
10520                                 /* Create it only the first time to be used. */
10521                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
10522                                 if (ret)
10523                                         return rte_flow_error_set
10524                                                 (error, -ret,
10525                                                 RTE_FLOW_ERROR_TYPE_ITEM,
10526                                                 NULL,
10527                                                 "cannot create eCPRI parser");
10528                         }
10529                         /* Adjust the length matcher and device flow value. */
10530                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
10531                         dev_flow->dv.value.size =
10532                                         MLX5_ST_SZ_BYTES(fte_match_param);
10533                         flow_dv_translate_item_ecpri(dev, match_mask,
10534                                                      match_value, items);
10535                         /* No other protocol should follow eCPRI layer. */
10536                         last_item = MLX5_FLOW_LAYER_ECPRI;
10537                         break;
10538                 default:
10539                         break;
10540                 }
10541                 item_flags |= last_item;
10542         }
10543         /*
10544          * When E-Switch mode is enabled, we have two cases where we need to
10545          * set the source port manually.
10546          * The first one, is in case of Nic steering rule, and the second is
10547          * E-Switch rule where no port_id item was found. In both cases
10548          * the source port is set according the current port in use.
10549          */
10550         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
10551             (priv->representor || priv->master)) {
10552                 if (flow_dv_translate_item_port_id(dev, match_mask,
10553                                                    match_value, NULL, attr))
10554                         return -rte_errno;
10555         }
10556 #ifdef RTE_LIBRTE_MLX5_DEBUG
10557         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
10558                                               dev_flow->dv.value.buf));
10559 #endif
10560         /*
10561          * Layers may be already initialized from prefix flow if this dev_flow
10562          * is the suffix flow.
10563          */
10564         handle->layers |= item_flags;
10565         if (action_flags & MLX5_FLOW_ACTION_RSS)
10566                 flow_dv_hashfields_set(dev_flow, rss_desc);
10567         /* Register matcher. */
10568         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
10569                                     matcher.mask.size);
10570         matcher.priority = mlx5_os_flow_adjust_priority(dev,
10571                                                         priority,
10572                                                         matcher.priority);
10573         /* reserved field no needs to be set to 0 here. */
10574         tbl_key.domain = attr->transfer;
10575         tbl_key.direction = attr->egress;
10576         tbl_key.table_id = dev_flow->dv.group;
10577         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow,
10578                                      tunnel, attr->group, error))
10579                 return -rte_errno;
10580         return 0;
10581 }
10582
10583 /**
10584  * Set hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10585  * and tunnel.
10586  *
10587  * @param[in, out] action
10588  *   Shred RSS action holding hash RX queue objects.
10589  * @param[in] hash_fields
10590  *   Defines combination of packet fields to participate in RX hash.
10591  * @param[in] tunnel
10592  *   Tunnel type
10593  * @param[in] hrxq_idx
10594  *   Hash RX queue index to set.
10595  *
10596  * @return
10597  *   0 on success, otherwise negative errno value.
10598  */
10599 static int
10600 __flow_dv_action_rss_hrxq_set(struct mlx5_shared_action_rss *action,
10601                               const uint64_t hash_fields,
10602                               const int tunnel,
10603                               uint32_t hrxq_idx)
10604 {
10605         uint32_t *hrxqs = tunnel ? action->hrxq : action->hrxq_tunnel;
10606
10607         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10608         case MLX5_RSS_HASH_IPV4:
10609                 hrxqs[0] = hrxq_idx;
10610                 return 0;
10611         case MLX5_RSS_HASH_IPV4_TCP:
10612                 hrxqs[1] = hrxq_idx;
10613                 return 0;
10614         case MLX5_RSS_HASH_IPV4_UDP:
10615                 hrxqs[2] = hrxq_idx;
10616                 return 0;
10617         case MLX5_RSS_HASH_IPV6:
10618                 hrxqs[3] = hrxq_idx;
10619                 return 0;
10620         case MLX5_RSS_HASH_IPV6_TCP:
10621                 hrxqs[4] = hrxq_idx;
10622                 return 0;
10623         case MLX5_RSS_HASH_IPV6_UDP:
10624                 hrxqs[5] = hrxq_idx;
10625                 return 0;
10626         case MLX5_RSS_HASH_NONE:
10627                 hrxqs[6] = hrxq_idx;
10628                 return 0;
10629         default:
10630                 return -1;
10631         }
10632 }
10633
10634 /**
10635  * Look up for hash RX queue by hash fields (see enum ibv_rx_hash_fields)
10636  * and tunnel.
10637  *
10638  * @param[in] dev
10639  *   Pointer to the Ethernet device structure.
10640  * @param[in] idx
10641  *   Shared RSS action ID holding hash RX queue objects.
10642  * @param[in] hash_fields
10643  *   Defines combination of packet fields to participate in RX hash.
10644  * @param[in] tunnel
10645  *   Tunnel type
10646  *
10647  * @return
10648  *   Valid hash RX queue index, otherwise 0.
10649  */
10650 static uint32_t
10651 __flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
10652                                  const uint64_t hash_fields,
10653                                  const int tunnel)
10654 {
10655         struct mlx5_priv *priv = dev->data->dev_private;
10656         struct mlx5_shared_action_rss *shared_rss =
10657             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
10658         const uint32_t *hrxqs = tunnel ? shared_rss->hrxq :
10659                                                         shared_rss->hrxq_tunnel;
10660
10661         switch (hash_fields & ~IBV_RX_HASH_INNER) {
10662         case MLX5_RSS_HASH_IPV4:
10663                 return hrxqs[0];
10664         case MLX5_RSS_HASH_IPV4_TCP:
10665                 return hrxqs[1];
10666         case MLX5_RSS_HASH_IPV4_UDP:
10667                 return hrxqs[2];
10668         case MLX5_RSS_HASH_IPV6:
10669                 return hrxqs[3];
10670         case MLX5_RSS_HASH_IPV6_TCP:
10671                 return hrxqs[4];
10672         case MLX5_RSS_HASH_IPV6_UDP:
10673                 return hrxqs[5];
10674         case MLX5_RSS_HASH_NONE:
10675                 return hrxqs[6];
10676         default:
10677                 return 0;
10678         }
10679 }
10680
10681 /**
10682  * Retrieves hash RX queue suitable for the *flow*.
10683  * If shared action configured for *flow* suitable hash RX queue will be
10684  * retrieved from attached shared action.
10685  *
10686  * @param[in] dev
10687  *   Pointer to the Ethernet device structure.
10688  * @param[in] dev_flow
10689  *   Pointer to the sub flow.
10690  * @param[in] rss_desc
10691  *   Pointer to the RSS descriptor.
10692  * @param[out] hrxq
10693  *   Pointer to retrieved hash RX queue object.
10694  *
10695  * @return
10696  *   Valid hash RX queue index, otherwise 0 and rte_errno is set.
10697  */
10698 static uint32_t
10699 __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
10700                        struct mlx5_flow_rss_desc *rss_desc,
10701                        struct mlx5_hrxq **hrxq)
10702 {
10703         struct mlx5_priv *priv = dev->data->dev_private;
10704         uint32_t hrxq_idx;
10705
10706         if (rss_desc->shared_rss) {
10707                 hrxq_idx = __flow_dv_action_rss_hrxq_lookup
10708                                 (dev, rss_desc->shared_rss,
10709                                  dev_flow->hash_fields,
10710                                  !!(dev_flow->handle->layers &
10711                                     MLX5_FLOW_LAYER_TUNNEL));
10712                 if (hrxq_idx)
10713                         *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
10714                                                hrxq_idx);
10715         } else {
10716                 *hrxq = flow_dv_hrxq_prepare(dev, dev_flow, rss_desc,
10717                                              &hrxq_idx);
10718         }
10719         return hrxq_idx;
10720 }
10721
10722 /**
10723  * Apply the flow to the NIC, lock free,
10724  * (mutex should be acquired by caller).
10725  *
10726  * @param[in] dev
10727  *   Pointer to the Ethernet device structure.
10728  * @param[in, out] flow
10729  *   Pointer to flow structure.
10730  * @param[out] error
10731  *   Pointer to error structure.
10732  *
10733  * @return
10734  *   0 on success, a negative errno value otherwise and rte_errno is set.
10735  */
10736 static int
10737 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
10738               struct rte_flow_error *error)
10739 {
10740         struct mlx5_flow_dv_workspace *dv;
10741         struct mlx5_flow_handle *dh;
10742         struct mlx5_flow_handle_dv *dv_h;
10743         struct mlx5_flow *dev_flow;
10744         struct mlx5_priv *priv = dev->data->dev_private;
10745         uint32_t handle_idx;
10746         int n;
10747         int err;
10748         int idx;
10749         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
10750         struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc;
10751
10752         MLX5_ASSERT(wks);
10753         if (rss_desc->shared_rss) {
10754                 dh = wks->flows[wks->flow_idx - 1].handle;
10755                 MLX5_ASSERT(dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS);
10756                 dh->rix_srss = rss_desc->shared_rss;
10757         }
10758         for (idx = wks->flow_idx - 1; idx >= 0; idx--) {
10759                 dev_flow = &wks->flows[idx];
10760                 dv = &dev_flow->dv;
10761                 dh = dev_flow->handle;
10762                 dv_h = &dh->dvh;
10763                 n = dv->actions_n;
10764                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
10765                         if (dv->transfer) {
10766                                 dv->actions[n++] = priv->sh->esw_drop_action;
10767                         } else {
10768                                 MLX5_ASSERT(priv->drop_queue.hrxq);
10769                                 dv->actions[n++] =
10770                                                 priv->drop_queue.hrxq->action;
10771                         }
10772                 } else if ((dh->fate_action == MLX5_FLOW_FATE_QUEUE &&
10773                            !dv_h->rix_sample && !dv_h->rix_dest_array) ||
10774                             (dh->fate_action == MLX5_FLOW_FATE_SHARED_RSS)) {
10775                         struct mlx5_hrxq *hrxq = NULL;
10776                         uint32_t hrxq_idx = __flow_dv_rss_get_hrxq
10777                                         (dev, dev_flow, rss_desc, &hrxq);
10778                         if (!hrxq) {
10779                                 rte_flow_error_set
10780                                         (error, rte_errno,
10781                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10782                                          "cannot get hash queue");
10783                                 goto error;
10784                         }
10785                         if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
10786                                 dh->rix_hrxq = hrxq_idx;
10787                         dv->actions[n++] = hrxq->action;
10788                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
10789                         if (!priv->sh->default_miss_action) {
10790                                 rte_flow_error_set
10791                                         (error, rte_errno,
10792                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
10793                                          "default miss action not be created.");
10794                                 goto error;
10795                         }
10796                         dv->actions[n++] = priv->sh->default_miss_action;
10797                 }
10798                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
10799                                                (void *)&dv->value, n,
10800                                                dv->actions, &dh->drv_flow);
10801                 if (err) {
10802                         rte_flow_error_set(error, errno,
10803                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
10804                                            NULL,
10805                                            "hardware refuses to create flow");
10806                         goto error;
10807                 }
10808                 if (priv->vmwa_context &&
10809                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
10810                         /*
10811                          * The rule contains the VLAN pattern.
10812                          * For VF we are going to create VLAN
10813                          * interface to make hypervisor set correct
10814                          * e-Switch vport context.
10815                          */
10816                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
10817                 }
10818         }
10819         return 0;
10820 error:
10821         err = rte_errno; /* Save rte_errno before cleanup. */
10822         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
10823                        handle_idx, dh, next) {
10824                 /* hrxq is union, don't clear it if the flag is not set. */
10825                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE && dh->rix_hrxq) {
10826                         mlx5_hrxq_release(dev, dh->rix_hrxq);
10827                         dh->rix_hrxq = 0;
10828                 }
10829                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
10830                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
10831         }
10832         if (rss_desc->shared_rss)
10833                 wks->flows[wks->flow_idx - 1].handle->rix_srss = 0;
10834         rte_errno = err; /* Restore rte_errno. */
10835         return -rte_errno;
10836 }
10837
10838 void
10839 flow_dv_matcher_remove_cb(struct mlx5_cache_list *list __rte_unused,
10840                           struct mlx5_cache_entry *entry)
10841 {
10842         struct mlx5_flow_dv_matcher *cache = container_of(entry, typeof(*cache),
10843                                                           entry);
10844
10845         claim_zero(mlx5_flow_os_destroy_flow_matcher(cache->matcher_object));
10846         mlx5_free(cache);
10847 }
10848
10849 /**
10850  * Release the flow matcher.
10851  *
10852  * @param dev
10853  *   Pointer to Ethernet device.
10854  * @param handle
10855  *   Pointer to mlx5_flow_handle.
10856  *
10857  * @return
10858  *   1 while a reference on it exists, 0 when freed.
10859  */
10860 static int
10861 flow_dv_matcher_release(struct rte_eth_dev *dev,
10862                         struct mlx5_flow_handle *handle)
10863 {
10864         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
10865         struct mlx5_flow_tbl_data_entry *tbl = container_of(matcher->tbl,
10866                                                             typeof(*tbl), tbl);
10867         int ret;
10868
10869         MLX5_ASSERT(matcher->matcher_object);
10870         ret = mlx5_cache_unregister(&tbl->matchers, &matcher->entry);
10871         flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl->tbl);
10872         return ret;
10873 }
10874
10875 /**
10876  * Release encap_decap resource.
10877  *
10878  * @param list
10879  *   Pointer to the hash list.
10880  * @param entry
10881  *   Pointer to exist resource entry object.
10882  */
10883 void
10884 flow_dv_encap_decap_remove_cb(struct mlx5_hlist *list,
10885                               struct mlx5_hlist_entry *entry)
10886 {
10887         struct mlx5_dev_ctx_shared *sh = list->ctx;
10888         struct mlx5_flow_dv_encap_decap_resource *res =
10889                 container_of(entry, typeof(*res), entry);
10890
10891         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10892         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], res->idx);
10893 }
10894
10895 /**
10896  * Release an encap/decap resource.
10897  *
10898  * @param dev
10899  *   Pointer to Ethernet device.
10900  * @param encap_decap_idx
10901  *   Index of encap decap resource.
10902  *
10903  * @return
10904  *   1 while a reference on it exists, 0 when freed.
10905  */
10906 static int
10907 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
10908                                      uint32_t encap_decap_idx)
10909 {
10910         struct mlx5_priv *priv = dev->data->dev_private;
10911         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
10912
10913         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
10914                                         encap_decap_idx);
10915         if (!cache_resource)
10916                 return 0;
10917         MLX5_ASSERT(cache_resource->action);
10918         return mlx5_hlist_unregister(priv->sh->encaps_decaps,
10919                                      &cache_resource->entry);
10920 }
10921
10922 /**
10923  * Release an jump to table action resource.
10924  *
10925  * @param dev
10926  *   Pointer to Ethernet device.
10927  * @param handle
10928  *   Pointer to mlx5_flow_handle.
10929  *
10930  * @return
10931  *   1 while a reference on it exists, 0 when freed.
10932  */
10933 static int
10934 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
10935                                   struct mlx5_flow_handle *handle)
10936 {
10937         struct mlx5_priv *priv = dev->data->dev_private;
10938         struct mlx5_flow_tbl_data_entry *tbl_data;
10939
10940         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
10941                              handle->rix_jump);
10942         if (!tbl_data)
10943                 return 0;
10944         return flow_dv_tbl_resource_release(MLX5_SH(dev), &tbl_data->tbl);
10945 }
10946
10947 void
10948 flow_dv_modify_remove_cb(struct mlx5_hlist *list __rte_unused,
10949                          struct mlx5_hlist_entry *entry)
10950 {
10951         struct mlx5_flow_dv_modify_hdr_resource *res =
10952                 container_of(entry, typeof(*res), entry);
10953
10954         claim_zero(mlx5_flow_os_destroy_flow_action(res->action));
10955         mlx5_free(entry);
10956 }
10957
10958 /**
10959  * Release a modify-header resource.
10960  *
10961  * @param dev
10962  *   Pointer to Ethernet device.
10963  * @param handle
10964  *   Pointer to mlx5_flow_handle.
10965  *
10966  * @return
10967  *   1 while a reference on it exists, 0 when freed.
10968  */
10969 static int
10970 flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev,
10971                                     struct mlx5_flow_handle *handle)
10972 {
10973         struct mlx5_priv *priv = dev->data->dev_private;
10974         struct mlx5_flow_dv_modify_hdr_resource *entry = handle->dvh.modify_hdr;
10975
10976         MLX5_ASSERT(entry->action);
10977         return mlx5_hlist_unregister(priv->sh->modify_cmds, &entry->entry);
10978 }
10979
10980 void
10981 flow_dv_port_id_remove_cb(struct mlx5_cache_list *list,
10982                           struct mlx5_cache_entry *entry)
10983 {
10984         struct mlx5_dev_ctx_shared *sh = list->ctx;
10985         struct mlx5_flow_dv_port_id_action_resource *cache =
10986                         container_of(entry, typeof(*cache), entry);
10987
10988         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
10989         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PORT_ID], cache->idx);
10990 }
10991
10992 /**
10993  * Release port ID action resource.
10994  *
10995  * @param dev
10996  *   Pointer to Ethernet device.
10997  * @param handle
10998  *   Pointer to mlx5_flow_handle.
10999  *
11000  * @return
11001  *   1 while a reference on it exists, 0 when freed.
11002  */
11003 static int
11004 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
11005                                         uint32_t port_id)
11006 {
11007         struct mlx5_priv *priv = dev->data->dev_private;
11008         struct mlx5_flow_dv_port_id_action_resource *cache;
11009
11010         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID], port_id);
11011         if (!cache)
11012                 return 0;
11013         MLX5_ASSERT(cache->action);
11014         return mlx5_cache_unregister(&priv->sh->port_id_action_list,
11015                                      &cache->entry);
11016 }
11017
11018 /**
11019  * Release shared RSS action resource.
11020  *
11021  * @param dev
11022  *   Pointer to Ethernet device.
11023  * @param srss
11024  *   Shared RSS action index.
11025  */
11026 static void
11027 flow_dv_shared_rss_action_release(struct rte_eth_dev *dev, uint32_t srss)
11028 {
11029         struct mlx5_priv *priv = dev->data->dev_private;
11030         struct mlx5_shared_action_rss *shared_rss;
11031
11032         shared_rss = mlx5_ipool_get
11033                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], srss);
11034         __atomic_sub_fetch(&shared_rss->refcnt, 1, __ATOMIC_RELAXED);
11035 }
11036
11037 void
11038 flow_dv_push_vlan_remove_cb(struct mlx5_cache_list *list,
11039                             struct mlx5_cache_entry *entry)
11040 {
11041         struct mlx5_dev_ctx_shared *sh = list->ctx;
11042         struct mlx5_flow_dv_push_vlan_action_resource *cache =
11043                         container_of(entry, typeof(*cache), entry);
11044
11045         claim_zero(mlx5_flow_os_destroy_flow_action(cache->action));
11046         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_PUSH_VLAN], cache->idx);
11047 }
11048
11049 /**
11050  * Release push vlan action resource.
11051  *
11052  * @param dev
11053  *   Pointer to Ethernet device.
11054  * @param handle
11055  *   Pointer to mlx5_flow_handle.
11056  *
11057  * @return
11058  *   1 while a reference on it exists, 0 when freed.
11059  */
11060 static int
11061 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
11062                                           struct mlx5_flow_handle *handle)
11063 {
11064         struct mlx5_priv *priv = dev->data->dev_private;
11065         struct mlx5_flow_dv_push_vlan_action_resource *cache;
11066         uint32_t idx = handle->dvh.rix_push_vlan;
11067
11068         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
11069         if (!cache)
11070                 return 0;
11071         MLX5_ASSERT(cache->action);
11072         return mlx5_cache_unregister(&priv->sh->push_vlan_action_list,
11073                                      &cache->entry);
11074 }
11075
11076 /**
11077  * Release the fate resource.
11078  *
11079  * @param dev
11080  *   Pointer to Ethernet device.
11081  * @param handle
11082  *   Pointer to mlx5_flow_handle.
11083  */
11084 static void
11085 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
11086                                struct mlx5_flow_handle *handle)
11087 {
11088         if (!handle->rix_fate)
11089                 return;
11090         switch (handle->fate_action) {
11091         case MLX5_FLOW_FATE_QUEUE:
11092                 mlx5_hrxq_release(dev, handle->rix_hrxq);
11093                 break;
11094         case MLX5_FLOW_FATE_JUMP:
11095                 flow_dv_jump_tbl_resource_release(dev, handle);
11096                 break;
11097         case MLX5_FLOW_FATE_PORT_ID:
11098                 flow_dv_port_id_action_resource_release(dev,
11099                                 handle->rix_port_id_action);
11100                 break;
11101         case MLX5_FLOW_FATE_SHARED_RSS:
11102                 flow_dv_shared_rss_action_release(dev, handle->rix_srss);
11103                 break;
11104         default:
11105                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
11106                 break;
11107         }
11108         handle->rix_fate = 0;
11109 }
11110
11111 void
11112 flow_dv_sample_remove_cb(struct mlx5_cache_list *list __rte_unused,
11113                          struct mlx5_cache_entry *entry)
11114 {
11115         struct mlx5_flow_dv_sample_resource *cache_resource =
11116                         container_of(entry, typeof(*cache_resource), entry);
11117         struct rte_eth_dev *dev = cache_resource->dev;
11118         struct mlx5_priv *priv = dev->data->dev_private;
11119
11120         if (cache_resource->verbs_action)
11121                 claim_zero(mlx5_flow_os_destroy_flow_action
11122                                 (cache_resource->verbs_action));
11123         if (cache_resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB) {
11124                 if (cache_resource->default_miss)
11125                         claim_zero(mlx5_flow_os_destroy_flow_action
11126                           (cache_resource->default_miss));
11127         }
11128         if (cache_resource->normal_path_tbl)
11129                 flow_dv_tbl_resource_release(MLX5_SH(dev),
11130                         cache_resource->normal_path_tbl);
11131         flow_dv_sample_sub_actions_release(dev,
11132                                 &cache_resource->sample_idx);
11133         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11134                         cache_resource->idx);
11135         DRV_LOG(DEBUG, "sample resource %p: removed",
11136                 (void *)cache_resource);
11137 }
11138
11139 /**
11140  * Release an sample resource.
11141  *
11142  * @param dev
11143  *   Pointer to Ethernet device.
11144  * @param handle
11145  *   Pointer to mlx5_flow_handle.
11146  *
11147  * @return
11148  *   1 while a reference on it exists, 0 when freed.
11149  */
11150 static int
11151 flow_dv_sample_resource_release(struct rte_eth_dev *dev,
11152                                      struct mlx5_flow_handle *handle)
11153 {
11154         struct mlx5_priv *priv = dev->data->dev_private;
11155         struct mlx5_flow_dv_sample_resource *cache_resource;
11156
11157         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_SAMPLE],
11158                          handle->dvh.rix_sample);
11159         if (!cache_resource)
11160                 return 0;
11161         MLX5_ASSERT(cache_resource->verbs_action);
11162         return mlx5_cache_unregister(&priv->sh->sample_action_list,
11163                                      &cache_resource->entry);
11164 }
11165
11166 void
11167 flow_dv_dest_array_remove_cb(struct mlx5_cache_list *list __rte_unused,
11168                              struct mlx5_cache_entry *entry)
11169 {
11170         struct mlx5_flow_dv_dest_array_resource *cache_resource =
11171                         container_of(entry, typeof(*cache_resource), entry);
11172         struct rte_eth_dev *dev = cache_resource->dev;
11173         struct mlx5_priv *priv = dev->data->dev_private;
11174         uint32_t i = 0;
11175
11176         MLX5_ASSERT(cache_resource->action);
11177         if (cache_resource->action)
11178                 claim_zero(mlx5_flow_os_destroy_flow_action
11179                                         (cache_resource->action));
11180         for (; i < cache_resource->num_of_dest; i++)
11181                 flow_dv_sample_sub_actions_release(dev,
11182                                 &cache_resource->sample_idx[i]);
11183         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11184                         cache_resource->idx);
11185         DRV_LOG(DEBUG, "destination array resource %p: removed",
11186                 (void *)cache_resource);
11187 }
11188
11189 /**
11190  * Release an destination array resource.
11191  *
11192  * @param dev
11193  *   Pointer to Ethernet device.
11194  * @param handle
11195  *   Pointer to mlx5_flow_handle.
11196  *
11197  * @return
11198  *   1 while a reference on it exists, 0 when freed.
11199  */
11200 static int
11201 flow_dv_dest_array_resource_release(struct rte_eth_dev *dev,
11202                                     struct mlx5_flow_handle *handle)
11203 {
11204         struct mlx5_priv *priv = dev->data->dev_private;
11205         struct mlx5_flow_dv_dest_array_resource *cache;
11206
11207         cache = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DEST_ARRAY],
11208                                handle->dvh.rix_dest_array);
11209         if (!cache)
11210                 return 0;
11211         MLX5_ASSERT(cache->action);
11212         return mlx5_cache_unregister(&priv->sh->dest_array_list,
11213                                      &cache->entry);
11214 }
11215
11216 /**
11217  * Remove the flow from the NIC but keeps it in memory.
11218  * Lock free, (mutex should be acquired by caller).
11219  *
11220  * @param[in] dev
11221  *   Pointer to Ethernet device.
11222  * @param[in, out] flow
11223  *   Pointer to flow structure.
11224  */
11225 static void
11226 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
11227 {
11228         struct mlx5_flow_handle *dh;
11229         uint32_t handle_idx;
11230         struct mlx5_priv *priv = dev->data->dev_private;
11231
11232         if (!flow)
11233                 return;
11234         handle_idx = flow->dev_handles;
11235         while (handle_idx) {
11236                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11237                                     handle_idx);
11238                 if (!dh)
11239                         return;
11240                 if (dh->drv_flow) {
11241                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
11242                         dh->drv_flow = NULL;
11243                 }
11244                 if (dh->fate_action == MLX5_FLOW_FATE_QUEUE)
11245                         flow_dv_fate_resource_release(dev, dh);
11246                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
11247                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
11248                 handle_idx = dh->next.next;
11249         }
11250 }
11251
11252 /**
11253  * Remove the flow from the NIC and the memory.
11254  * Lock free, (mutex should be acquired by caller).
11255  *
11256  * @param[in] dev
11257  *   Pointer to the Ethernet device structure.
11258  * @param[in, out] flow
11259  *   Pointer to flow structure.
11260  */
11261 static void
11262 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
11263 {
11264         struct mlx5_flow_handle *dev_handle;
11265         struct mlx5_priv *priv = dev->data->dev_private;
11266
11267         if (!flow)
11268                 return;
11269         flow_dv_remove(dev, flow);
11270         if (flow->counter) {
11271                 flow_dv_counter_free(dev, flow->counter);
11272                 flow->counter = 0;
11273         }
11274         if (flow->meter) {
11275                 struct mlx5_flow_meter *fm;
11276
11277                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
11278                                     flow->meter);
11279                 if (fm)
11280                         mlx5_flow_meter_detach(fm);
11281                 flow->meter = 0;
11282         }
11283         if (flow->age)
11284                 flow_dv_aso_age_release(dev, flow->age);
11285         while (flow->dev_handles) {
11286                 uint32_t tmp_idx = flow->dev_handles;
11287
11288                 dev_handle = mlx5_ipool_get(priv->sh->ipool
11289                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
11290                 if (!dev_handle)
11291                         return;
11292                 flow->dev_handles = dev_handle->next.next;
11293                 if (dev_handle->dvh.matcher)
11294                         flow_dv_matcher_release(dev, dev_handle);
11295                 if (dev_handle->dvh.rix_sample)
11296                         flow_dv_sample_resource_release(dev, dev_handle);
11297                 if (dev_handle->dvh.rix_dest_array)
11298                         flow_dv_dest_array_resource_release(dev, dev_handle);
11299                 if (dev_handle->dvh.rix_encap_decap)
11300                         flow_dv_encap_decap_resource_release(dev,
11301                                 dev_handle->dvh.rix_encap_decap);
11302                 if (dev_handle->dvh.modify_hdr)
11303                         flow_dv_modify_hdr_resource_release(dev, dev_handle);
11304                 if (dev_handle->dvh.rix_push_vlan)
11305                         flow_dv_push_vlan_action_resource_release(dev,
11306                                                                   dev_handle);
11307                 if (dev_handle->dvh.rix_tag)
11308                         flow_dv_tag_release(dev,
11309                                             dev_handle->dvh.rix_tag);
11310                 flow_dv_fate_resource_release(dev, dev_handle);
11311                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
11312                            tmp_idx);
11313         }
11314 }
11315
11316 /**
11317  * Release array of hash RX queue objects.
11318  * Helper function.
11319  *
11320  * @param[in] dev
11321  *   Pointer to the Ethernet device structure.
11322  * @param[in, out] hrxqs
11323  *   Array of hash RX queue objects.
11324  *
11325  * @return
11326  *   Total number of references to hash RX queue objects in *hrxqs* array
11327  *   after this operation.
11328  */
11329 static int
11330 __flow_dv_hrxqs_release(struct rte_eth_dev *dev,
11331                         uint32_t (*hrxqs)[MLX5_RSS_HASH_FIELDS_LEN])
11332 {
11333         size_t i;
11334         int remaining = 0;
11335
11336         for (i = 0; i < RTE_DIM(*hrxqs); i++) {
11337                 int ret = mlx5_hrxq_release(dev, (*hrxqs)[i]);
11338
11339                 if (!ret)
11340                         (*hrxqs)[i] = 0;
11341                 remaining += ret;
11342         }
11343         return remaining;
11344 }
11345
11346 /**
11347  * Release all hash RX queue objects representing shared RSS action.
11348  *
11349  * @param[in] dev
11350  *   Pointer to the Ethernet device structure.
11351  * @param[in, out] action
11352  *   Shared RSS action to remove hash RX queue objects from.
11353  *
11354  * @return
11355  *   Total number of references to hash RX queue objects stored in *action*
11356  *   after this operation.
11357  *   Expected to be 0 if no external references held.
11358  */
11359 static int
11360 __flow_dv_action_rss_hrxqs_release(struct rte_eth_dev *dev,
11361                                  struct mlx5_shared_action_rss *action)
11362 {
11363         return __flow_dv_hrxqs_release(dev, &action->hrxq) +
11364                 __flow_dv_hrxqs_release(dev, &action->hrxq_tunnel);
11365 }
11366
11367 /**
11368  * Setup shared RSS action.
11369  * Prepare set of hash RX queue objects sufficient to handle all valid
11370  * hash_fields combinations (see enum ibv_rx_hash_fields).
11371  *
11372  * @param[in] dev
11373  *   Pointer to the Ethernet device structure.
11374  * @param[in] action_idx
11375  *   Shared RSS action ipool index.
11376  * @param[in, out] action
11377  *   Partially initialized shared RSS action.
11378  * @param[out] error
11379  *   Perform verbose error reporting if not NULL. Initialized in case of
11380  *   error only.
11381  *
11382  * @return
11383  *   0 on success, otherwise negative errno value.
11384  */
11385 static int
11386 __flow_dv_action_rss_setup(struct rte_eth_dev *dev,
11387                            uint32_t action_idx,
11388                            struct mlx5_shared_action_rss *action,
11389                            struct rte_flow_error *error)
11390 {
11391         struct mlx5_flow_rss_desc rss_desc = { 0 };
11392         size_t i;
11393         int err;
11394
11395         if (mlx5_ind_table_obj_setup(dev, action->ind_tbl)) {
11396                 return rte_flow_error_set(error, rte_errno,
11397                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11398                                           "cannot setup indirection table");
11399         }
11400         memcpy(rss_desc.key, action->origin.key, MLX5_RSS_HASH_KEY_LEN);
11401         rss_desc.key_len = MLX5_RSS_HASH_KEY_LEN;
11402         rss_desc.const_q = action->origin.queue;
11403         rss_desc.queue_num = action->origin.queue_num;
11404         /* Set non-zero value to indicate a shared RSS. */
11405         rss_desc.shared_rss = action_idx;
11406         rss_desc.ind_tbl = action->ind_tbl;
11407         for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
11408                 uint32_t hrxq_idx;
11409                 uint64_t hash_fields = mlx5_rss_hash_fields[i];
11410                 int tunnel;
11411
11412                 for (tunnel = 0; tunnel < 2; tunnel++) {
11413                         rss_desc.tunnel = tunnel;
11414                         rss_desc.hash_fields = hash_fields;
11415                         hrxq_idx = mlx5_hrxq_get(dev, &rss_desc);
11416                         if (!hrxq_idx) {
11417                                 rte_flow_error_set
11418                                         (error, rte_errno,
11419                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11420                                          "cannot get hash queue");
11421                                 goto error_hrxq_new;
11422                         }
11423                         err = __flow_dv_action_rss_hrxq_set
11424                                 (action, hash_fields, tunnel, hrxq_idx);
11425                         MLX5_ASSERT(!err);
11426                 }
11427         }
11428         return 0;
11429 error_hrxq_new:
11430         err = rte_errno;
11431         __flow_dv_action_rss_hrxqs_release(dev, action);
11432         if (!mlx5_ind_table_obj_release(dev, action->ind_tbl, true))
11433                 action->ind_tbl = NULL;
11434         rte_errno = err;
11435         return -rte_errno;
11436 }
11437
11438 /**
11439  * Create shared RSS action.
11440  *
11441  * @param[in] dev
11442  *   Pointer to the Ethernet device structure.
11443  * @param[in] conf
11444  *   Shared action configuration.
11445  * @param[in] rss
11446  *   RSS action specification used to create shared action.
11447  * @param[out] error
11448  *   Perform verbose error reporting if not NULL. Initialized in case of
11449  *   error only.
11450  *
11451  * @return
11452  *   A valid shared action ID in case of success, 0 otherwise and
11453  *   rte_errno is set.
11454  */
11455 static uint32_t
11456 __flow_dv_action_rss_create(struct rte_eth_dev *dev,
11457                             const struct rte_flow_shared_action_conf *conf,
11458                             const struct rte_flow_action_rss *rss,
11459                             struct rte_flow_error *error)
11460 {
11461         struct mlx5_priv *priv = dev->data->dev_private;
11462         struct mlx5_shared_action_rss *shared_action = NULL;
11463         void *queue = NULL;
11464         struct rte_flow_action_rss *origin;
11465         const uint8_t *rss_key;
11466         uint32_t queue_size = rss->queue_num * sizeof(uint16_t);
11467         uint32_t idx;
11468
11469         RTE_SET_USED(conf);
11470         queue = mlx5_malloc(0, RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11471                             0, SOCKET_ID_ANY);
11472         shared_action = mlx5_ipool_zmalloc
11473                          (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], &idx);
11474         if (!shared_action || !queue) {
11475                 rte_flow_error_set(error, ENOMEM,
11476                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11477                                    "cannot allocate resource memory");
11478                 goto error_rss_init;
11479         }
11480         if (idx > (1u << MLX5_SHARED_ACTION_TYPE_OFFSET)) {
11481                 rte_flow_error_set(error, E2BIG,
11482                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11483                                    "rss action number out of range");
11484                 goto error_rss_init;
11485         }
11486         shared_action->ind_tbl = mlx5_malloc(MLX5_MEM_ZERO,
11487                                              sizeof(*shared_action->ind_tbl),
11488                                              0, SOCKET_ID_ANY);
11489         if (!shared_action->ind_tbl) {
11490                 rte_flow_error_set(error, ENOMEM,
11491                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
11492                                    "cannot allocate resource memory");
11493                 goto error_rss_init;
11494         }
11495         memcpy(queue, rss->queue, queue_size);
11496         shared_action->ind_tbl->queues = queue;
11497         shared_action->ind_tbl->queues_n = rss->queue_num;
11498         origin = &shared_action->origin;
11499         origin->func = rss->func;
11500         origin->level = rss->level;
11501         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
11502         origin->types = !rss->types ? ETH_RSS_IP : rss->types;
11503         /* NULL RSS key indicates default RSS key. */
11504         rss_key = !rss->key ? rss_hash_default_key : rss->key;
11505         memcpy(shared_action->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
11506         origin->key = &shared_action->key[0];
11507         origin->key_len = MLX5_RSS_HASH_KEY_LEN;
11508         origin->queue = queue;
11509         origin->queue_num = rss->queue_num;
11510         if (__flow_dv_action_rss_setup(dev, idx, shared_action, error))
11511                 goto error_rss_init;
11512         rte_spinlock_init(&shared_action->action_rss_sl);
11513         __atomic_add_fetch(&shared_action->refcnt, 1, __ATOMIC_RELAXED);
11514         rte_spinlock_lock(&priv->shared_act_sl);
11515         ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11516                      &priv->rss_shared_actions, idx, shared_action, next);
11517         rte_spinlock_unlock(&priv->shared_act_sl);
11518         return idx;
11519 error_rss_init:
11520         if (shared_action) {
11521                 if (shared_action->ind_tbl)
11522                         mlx5_free(shared_action->ind_tbl);
11523                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11524                                 idx);
11525         }
11526         if (queue)
11527                 mlx5_free(queue);
11528         return 0;
11529 }
11530
11531 /**
11532  * Destroy the shared RSS action.
11533  * Release related hash RX queue objects.
11534  *
11535  * @param[in] dev
11536  *   Pointer to the Ethernet device structure.
11537  * @param[in] idx
11538  *   The shared RSS action object ID to be removed.
11539  * @param[out] error
11540  *   Perform verbose error reporting if not NULL. Initialized in case of
11541  *   error only.
11542  *
11543  * @return
11544  *   0 on success, otherwise negative errno value.
11545  */
11546 static int
11547 __flow_dv_action_rss_release(struct rte_eth_dev *dev, uint32_t idx,
11548                              struct rte_flow_error *error)
11549 {
11550         struct mlx5_priv *priv = dev->data->dev_private;
11551         struct mlx5_shared_action_rss *shared_rss =
11552             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11553         uint32_t old_refcnt = 1;
11554         int remaining;
11555         uint16_t *queue = NULL;
11556
11557         if (!shared_rss)
11558                 return rte_flow_error_set(error, EINVAL,
11559                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11560                                           "invalid shared action");
11561         remaining = __flow_dv_action_rss_hrxqs_release(dev, shared_rss);
11562         if (remaining)
11563                 return rte_flow_error_set(error, EBUSY,
11564                                           RTE_FLOW_ERROR_TYPE_ACTION,
11565                                           NULL,
11566                                           "shared rss hrxq has references");
11567         queue = shared_rss->ind_tbl->queues;
11568         remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
11569         if (remaining)
11570                 return rte_flow_error_set(error, EBUSY,
11571                                           RTE_FLOW_ERROR_TYPE_ACTION,
11572                                           NULL,
11573                                           "shared rss indirection table has"
11574                                           " references");
11575         if (!__atomic_compare_exchange_n(&shared_rss->refcnt, &old_refcnt,
11576                                          0, 0, __ATOMIC_ACQUIRE,
11577                                          __ATOMIC_RELAXED))
11578                 return rte_flow_error_set(error, EBUSY,
11579                                           RTE_FLOW_ERROR_TYPE_ACTION,
11580                                           NULL,
11581                                           "shared rss has references");
11582         mlx5_free(queue);
11583         rte_spinlock_lock(&priv->shared_act_sl);
11584         ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11585                      &priv->rss_shared_actions, idx, shared_rss, next);
11586         rte_spinlock_unlock(&priv->shared_act_sl);
11587         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
11588                         idx);
11589         return 0;
11590 }
11591
11592 /**
11593  * Create shared action, lock free,
11594  * (mutex should be acquired by caller).
11595  * Dispatcher for action type specific call.
11596  *
11597  * @param[in] dev
11598  *   Pointer to the Ethernet device structure.
11599  * @param[in] conf
11600  *   Shared action configuration.
11601  * @param[in] action
11602  *   Action specification used to create shared action.
11603  * @param[out] error
11604  *   Perform verbose error reporting if not NULL. Initialized in case of
11605  *   error only.
11606  *
11607  * @return
11608  *   A valid shared action handle in case of success, NULL otherwise and
11609  *   rte_errno is set.
11610  */
11611 static struct rte_flow_shared_action *
11612 flow_dv_action_create(struct rte_eth_dev *dev,
11613                       const struct rte_flow_shared_action_conf *conf,
11614                       const struct rte_flow_action *action,
11615                       struct rte_flow_error *err)
11616 {
11617         uint32_t idx = 0;
11618         uint32_t ret = 0;
11619
11620         switch (action->type) {
11621         case RTE_FLOW_ACTION_TYPE_RSS:
11622                 ret = __flow_dv_action_rss_create(dev, conf, action->conf, err);
11623                 idx = (MLX5_SHARED_ACTION_TYPE_RSS <<
11624                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
11625                 break;
11626         case RTE_FLOW_ACTION_TYPE_AGE:
11627                 ret = flow_dv_translate_create_aso_age(dev, action->conf, err);
11628                 idx = (MLX5_SHARED_ACTION_TYPE_AGE <<
11629                        MLX5_SHARED_ACTION_TYPE_OFFSET) | ret;
11630                 if (ret) {
11631                         struct mlx5_aso_age_action *aso_age =
11632                                               flow_aso_age_get_by_idx(dev, ret);
11633
11634                         if (!aso_age->age_params.context)
11635                                 aso_age->age_params.context =
11636                                                          (void *)(uintptr_t)idx;
11637                 }
11638                 break;
11639         default:
11640                 rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
11641                                    NULL, "action type not supported");
11642                 break;
11643         }
11644         return ret ? (struct rte_flow_shared_action *)(uintptr_t)idx : NULL;
11645 }
11646
11647 /**
11648  * Destroy the shared action.
11649  * Release action related resources on the NIC and the memory.
11650  * Lock free, (mutex should be acquired by caller).
11651  * Dispatcher for action type specific call.
11652  *
11653  * @param[in] dev
11654  *   Pointer to the Ethernet device structure.
11655  * @param[in] action
11656  *   The shared action object to be removed.
11657  * @param[out] error
11658  *   Perform verbose error reporting if not NULL. Initialized in case of
11659  *   error only.
11660  *
11661  * @return
11662  *   0 on success, otherwise negative errno value.
11663  */
11664 static int
11665 flow_dv_action_destroy(struct rte_eth_dev *dev,
11666                        struct rte_flow_shared_action *action,
11667                        struct rte_flow_error *error)
11668 {
11669         uint32_t act_idx = (uint32_t)(uintptr_t)action;
11670         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11671         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11672         int ret;
11673
11674         switch (type) {
11675         case MLX5_SHARED_ACTION_TYPE_RSS:
11676                 return __flow_dv_action_rss_release(dev, idx, error);
11677         case MLX5_SHARED_ACTION_TYPE_AGE:
11678                 ret = flow_dv_aso_age_release(dev, idx);
11679                 if (ret)
11680                         /*
11681                          * In this case, the last flow has a reference will
11682                          * actually release the age action.
11683                          */
11684                         DRV_LOG(DEBUG, "Shared age action %" PRIu32 " was"
11685                                 " released with references %d.", idx, ret);
11686                 return 0;
11687         default:
11688                 return rte_flow_error_set(error, ENOTSUP,
11689                                           RTE_FLOW_ERROR_TYPE_ACTION,
11690                                           NULL,
11691                                           "action type not supported");
11692         }
11693 }
11694
11695 /**
11696  * Updates in place shared RSS action configuration.
11697  *
11698  * @param[in] dev
11699  *   Pointer to the Ethernet device structure.
11700  * @param[in] idx
11701  *   The shared RSS action object ID to be updated.
11702  * @param[in] action_conf
11703  *   RSS action specification used to modify *shared_rss*.
11704  * @param[out] error
11705  *   Perform verbose error reporting if not NULL. Initialized in case of
11706  *   error only.
11707  *
11708  * @return
11709  *   0 on success, otherwise negative errno value.
11710  * @note: currently only support update of RSS queues.
11711  */
11712 static int
11713 __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx,
11714                             const struct rte_flow_action_rss *action_conf,
11715                             struct rte_flow_error *error)
11716 {
11717         struct mlx5_priv *priv = dev->data->dev_private;
11718         struct mlx5_shared_action_rss *shared_rss =
11719             mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
11720         int ret = 0;
11721         void *queue = NULL;
11722         uint16_t *queue_old = NULL;
11723         uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
11724
11725         if (!shared_rss)
11726                 return rte_flow_error_set(error, EINVAL,
11727                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11728                                           "invalid shared action to update");
11729         queue = mlx5_malloc(MLX5_MEM_ZERO,
11730                             RTE_ALIGN_CEIL(queue_size, sizeof(void *)),
11731                             0, SOCKET_ID_ANY);
11732         if (!queue)
11733                 return rte_flow_error_set(error, ENOMEM,
11734                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11735                                           NULL,
11736                                           "cannot allocate resource memory");
11737         memcpy(queue, action_conf->queue, queue_size);
11738         MLX5_ASSERT(shared_rss->ind_tbl);
11739         rte_spinlock_lock(&shared_rss->action_rss_sl);
11740         queue_old = shared_rss->ind_tbl->queues;
11741         ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
11742                                         queue, action_conf->queue_num, true);
11743         if (ret) {
11744                 mlx5_free(queue);
11745                 ret = rte_flow_error_set(error, rte_errno,
11746                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
11747                                           "cannot update indirection table");
11748         } else {
11749                 mlx5_free(queue_old);
11750                 shared_rss->origin.queue = queue;
11751                 shared_rss->origin.queue_num = action_conf->queue_num;
11752         }
11753         rte_spinlock_unlock(&shared_rss->action_rss_sl);
11754         return ret;
11755 }
11756
11757 /**
11758  * Updates in place shared action configuration, lock free,
11759  * (mutex should be acquired by caller).
11760  *
11761  * @param[in] dev
11762  *   Pointer to the Ethernet device structure.
11763  * @param[in] action
11764  *   The shared action object to be updated.
11765  * @param[in] action_conf
11766  *   Action specification used to modify *action*.
11767  *   *action_conf* should be of type correlating with type of the *action*,
11768  *   otherwise considered as invalid.
11769  * @param[out] error
11770  *   Perform verbose error reporting if not NULL. Initialized in case of
11771  *   error only.
11772  *
11773  * @return
11774  *   0 on success, otherwise negative errno value.
11775  */
11776 static int
11777 flow_dv_action_update(struct rte_eth_dev *dev,
11778                         struct rte_flow_shared_action *action,
11779                         const void *action_conf,
11780                         struct rte_flow_error *err)
11781 {
11782         uint32_t act_idx = (uint32_t)(uintptr_t)action;
11783         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11784         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11785
11786         switch (type) {
11787         case MLX5_SHARED_ACTION_TYPE_RSS:
11788                 return __flow_dv_action_rss_update(dev, idx, action_conf, err);
11789         default:
11790                 return rte_flow_error_set(err, ENOTSUP,
11791                                           RTE_FLOW_ERROR_TYPE_ACTION,
11792                                           NULL,
11793                                           "action type update not supported");
11794         }
11795 }
11796
11797 static int
11798 flow_dv_action_query(struct rte_eth_dev *dev,
11799                      const struct rte_flow_shared_action *action, void *data,
11800                      struct rte_flow_error *error)
11801 {
11802         struct mlx5_age_param *age_param;
11803         struct rte_flow_query_age *resp;
11804         uint32_t act_idx = (uint32_t)(uintptr_t)action;
11805         uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
11806         uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
11807
11808         switch (type) {
11809         case MLX5_SHARED_ACTION_TYPE_AGE:
11810                 age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params;
11811                 resp = data;
11812                 resp->aged = __atomic_load_n(&age_param->state,
11813                                               __ATOMIC_RELAXED) == AGE_TMOUT ?
11814                                                                           1 : 0;
11815                 resp->sec_since_last_hit_valid = !resp->aged;
11816                 if (resp->sec_since_last_hit_valid)
11817                         resp->sec_since_last_hit = __atomic_load_n
11818                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
11819                 return 0;
11820         default:
11821                 return rte_flow_error_set(error, ENOTSUP,
11822                                           RTE_FLOW_ERROR_TYPE_ACTION,
11823                                           NULL,
11824                                           "action type query not supported");
11825         }
11826 }
11827
11828 /**
11829  * Query a dv flow  rule for its statistics via devx.
11830  *
11831  * @param[in] dev
11832  *   Pointer to Ethernet device.
11833  * @param[in] flow
11834  *   Pointer to the sub flow.
11835  * @param[out] data
11836  *   data retrieved by the query.
11837  * @param[out] error
11838  *   Perform verbose error reporting if not NULL.
11839  *
11840  * @return
11841  *   0 on success, a negative errno value otherwise and rte_errno is set.
11842  */
11843 static int
11844 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
11845                     void *data, struct rte_flow_error *error)
11846 {
11847         struct mlx5_priv *priv = dev->data->dev_private;
11848         struct rte_flow_query_count *qc = data;
11849
11850         if (!priv->config.devx)
11851                 return rte_flow_error_set(error, ENOTSUP,
11852                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11853                                           NULL,
11854                                           "counters are not supported");
11855         if (flow->counter) {
11856                 uint64_t pkts, bytes;
11857                 struct mlx5_flow_counter *cnt;
11858
11859                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
11860                                                  NULL);
11861                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
11862                                                &bytes);
11863
11864                 if (err)
11865                         return rte_flow_error_set(error, -err,
11866                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11867                                         NULL, "cannot read counters");
11868                 qc->hits_set = 1;
11869                 qc->bytes_set = 1;
11870                 qc->hits = pkts - cnt->hits;
11871                 qc->bytes = bytes - cnt->bytes;
11872                 if (qc->reset) {
11873                         cnt->hits = pkts;
11874                         cnt->bytes = bytes;
11875                 }
11876                 return 0;
11877         }
11878         return rte_flow_error_set(error, EINVAL,
11879                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11880                                   NULL,
11881                                   "counters are not available");
11882 }
11883
11884 /**
11885  * Query a flow rule AGE action for aging information.
11886  *
11887  * @param[in] dev
11888  *   Pointer to Ethernet device.
11889  * @param[in] flow
11890  *   Pointer to the sub flow.
11891  * @param[out] data
11892  *   data retrieved by the query.
11893  * @param[out] error
11894  *   Perform verbose error reporting if not NULL.
11895  *
11896  * @return
11897  *   0 on success, a negative errno value otherwise and rte_errno is set.
11898  */
11899 static int
11900 flow_dv_query_age(struct rte_eth_dev *dev, struct rte_flow *flow,
11901                   void *data, struct rte_flow_error *error)
11902 {
11903         struct rte_flow_query_age *resp = data;
11904         struct mlx5_age_param *age_param;
11905
11906         if (flow->age) {
11907                 struct mlx5_aso_age_action *act =
11908                                      flow_aso_age_get_by_idx(dev, flow->age);
11909
11910                 age_param = &act->age_params;
11911         } else if (flow->counter) {
11912                 age_param = flow_dv_counter_idx_get_age(dev, flow->counter);
11913
11914                 if (!age_param || !age_param->timeout)
11915                         return rte_flow_error_set
11916                                         (error, EINVAL,
11917                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11918                                          NULL, "cannot read age data");
11919         } else {
11920                 return rte_flow_error_set(error, EINVAL,
11921                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
11922                                           NULL, "age data not available");
11923         }
11924         resp->aged = __atomic_load_n(&age_param->state, __ATOMIC_RELAXED) ==
11925                                      AGE_TMOUT ? 1 : 0;
11926         resp->sec_since_last_hit_valid = !resp->aged;
11927         if (resp->sec_since_last_hit_valid)
11928                 resp->sec_since_last_hit = __atomic_load_n
11929                              (&age_param->sec_since_last_hit, __ATOMIC_RELAXED);
11930         return 0;
11931 }
11932
11933 /**
11934  * Query a flow.
11935  *
11936  * @see rte_flow_query()
11937  * @see rte_flow_ops
11938  */
11939 static int
11940 flow_dv_query(struct rte_eth_dev *dev,
11941               struct rte_flow *flow __rte_unused,
11942               const struct rte_flow_action *actions __rte_unused,
11943               void *data __rte_unused,
11944               struct rte_flow_error *error __rte_unused)
11945 {
11946         int ret = -EINVAL;
11947
11948         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
11949                 switch (actions->type) {
11950                 case RTE_FLOW_ACTION_TYPE_VOID:
11951                         break;
11952                 case RTE_FLOW_ACTION_TYPE_COUNT:
11953                         ret = flow_dv_query_count(dev, flow, data, error);
11954                         break;
11955                 case RTE_FLOW_ACTION_TYPE_AGE:
11956                         ret = flow_dv_query_age(dev, flow, data, error);
11957                         break;
11958                 default:
11959                         return rte_flow_error_set(error, ENOTSUP,
11960                                                   RTE_FLOW_ERROR_TYPE_ACTION,
11961                                                   actions,
11962                                                   "action not supported");
11963                 }
11964         }
11965         return ret;
11966 }
11967
11968 /**
11969  * Destroy the meter table set.
11970  * Lock free, (mutex should be acquired by caller).
11971  *
11972  * @param[in] dev
11973  *   Pointer to Ethernet device.
11974  * @param[in] tbl
11975  *   Pointer to the meter table set.
11976  *
11977  * @return
11978  *   Always 0.
11979  */
11980 static int
11981 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
11982                         struct mlx5_meter_domains_infos *tbl)
11983 {
11984         struct mlx5_priv *priv = dev->data->dev_private;
11985         struct mlx5_meter_domains_infos *mtd =
11986                                 (struct mlx5_meter_domains_infos *)tbl;
11987
11988         if (!mtd || !priv->config.dv_flow_en)
11989                 return 0;
11990         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
11991                 claim_zero(mlx5_flow_os_destroy_flow
11992                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
11993         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
11994                 claim_zero(mlx5_flow_os_destroy_flow
11995                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
11996         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
11997                 claim_zero(mlx5_flow_os_destroy_flow
11998                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
11999         if (mtd->egress.color_matcher)
12000                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12001                            (mtd->egress.color_matcher));
12002         if (mtd->egress.any_matcher)
12003                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12004                            (mtd->egress.any_matcher));
12005         if (mtd->egress.tbl)
12006                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.tbl);
12007         if (mtd->egress.sfx_tbl)
12008                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->egress.sfx_tbl);
12009         if (mtd->ingress.color_matcher)
12010                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12011                            (mtd->ingress.color_matcher));
12012         if (mtd->ingress.any_matcher)
12013                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12014                            (mtd->ingress.any_matcher));
12015         if (mtd->ingress.tbl)
12016                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->ingress.tbl);
12017         if (mtd->ingress.sfx_tbl)
12018                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12019                                              mtd->ingress.sfx_tbl);
12020         if (mtd->transfer.color_matcher)
12021                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12022                            (mtd->transfer.color_matcher));
12023         if (mtd->transfer.any_matcher)
12024                 claim_zero(mlx5_flow_os_destroy_flow_matcher
12025                            (mtd->transfer.any_matcher));
12026         if (mtd->transfer.tbl)
12027                 flow_dv_tbl_resource_release(MLX5_SH(dev), mtd->transfer.tbl);
12028         if (mtd->transfer.sfx_tbl)
12029                 flow_dv_tbl_resource_release(MLX5_SH(dev),
12030                                              mtd->transfer.sfx_tbl);
12031         if (mtd->drop_actn)
12032                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
12033         mlx5_free(mtd);
12034         return 0;
12035 }
12036
12037 /* Number of meter flow actions, count and jump or count and drop. */
12038 #define METER_ACTIONS 2
12039
12040 /**
12041  * Create specify domain meter table and suffix table.
12042  *
12043  * @param[in] dev
12044  *   Pointer to Ethernet device.
12045  * @param[in,out] mtb
12046  *   Pointer to DV meter table set.
12047  * @param[in] egress
12048  *   Table attribute.
12049  * @param[in] transfer
12050  *   Table attribute.
12051  * @param[in] color_reg_c_idx
12052  *   Reg C index for color match.
12053  *
12054  * @return
12055  *   0 on success, -1 otherwise and rte_errno is set.
12056  */
12057 static int
12058 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
12059                            struct mlx5_meter_domains_infos *mtb,
12060                            uint8_t egress, uint8_t transfer,
12061                            uint32_t color_reg_c_idx)
12062 {
12063         struct mlx5_priv *priv = dev->data->dev_private;
12064         struct mlx5_dev_ctx_shared *sh = priv->sh;
12065         struct mlx5_flow_dv_match_params mask = {
12066                 .size = sizeof(mask.buf),
12067         };
12068         struct mlx5_flow_dv_match_params value = {
12069                 .size = sizeof(value.buf),
12070         };
12071         struct mlx5dv_flow_matcher_attr dv_attr = {
12072                 .type = IBV_FLOW_ATTR_NORMAL,
12073                 .priority = 0,
12074                 .match_criteria_enable = 0,
12075                 .match_mask = (void *)&mask,
12076         };
12077         void *actions[METER_ACTIONS];
12078         struct mlx5_meter_domain_info *dtb;
12079         struct rte_flow_error error;
12080         int i = 0;
12081         int ret;
12082
12083         if (transfer)
12084                 dtb = &mtb->transfer;
12085         else if (egress)
12086                 dtb = &mtb->egress;
12087         else
12088                 dtb = &mtb->ingress;
12089         /* Create the meter table with METER level. */
12090         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
12091                                             egress, transfer, false, NULL, 0,
12092                                             0, &error);
12093         if (!dtb->tbl) {
12094                 DRV_LOG(ERR, "Failed to create meter policer table.");
12095                 return -1;
12096         }
12097         /* Create the meter suffix table with SUFFIX level. */
12098         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
12099                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
12100                                             egress, transfer, false, NULL, 0,
12101                                             0, &error);
12102         if (!dtb->sfx_tbl) {
12103                 DRV_LOG(ERR, "Failed to create meter suffix table.");
12104                 return -1;
12105         }
12106         /* Create matchers, Any and Color. */
12107         dv_attr.priority = 3;
12108         dv_attr.match_criteria_enable = 0;
12109         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12110                                                &dtb->any_matcher);
12111         if (ret) {
12112                 DRV_LOG(ERR, "Failed to create meter"
12113                              " policer default matcher.");
12114                 goto error_exit;
12115         }
12116         dv_attr.priority = 0;
12117         dv_attr.match_criteria_enable =
12118                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
12119         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
12120                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
12121         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
12122                                                &dtb->color_matcher);
12123         if (ret) {
12124                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
12125                 goto error_exit;
12126         }
12127         if (mtb->count_actns[RTE_MTR_DROPPED])
12128                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
12129         actions[i++] = mtb->drop_actn;
12130         /* Default rule: lowest priority, match any, actions: drop. */
12131         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
12132                                        actions,
12133                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
12134         if (ret) {
12135                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
12136                 goto error_exit;
12137         }
12138         return 0;
12139 error_exit:
12140         return -1;
12141 }
12142
12143 /**
12144  * Create the needed meter and suffix tables.
12145  * Lock free, (mutex should be acquired by caller).
12146  *
12147  * @param[in] dev
12148  *   Pointer to Ethernet device.
12149  * @param[in] fm
12150  *   Pointer to the flow meter.
12151  *
12152  * @return
12153  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
12154  */
12155 static struct mlx5_meter_domains_infos *
12156 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
12157                        const struct mlx5_flow_meter *fm)
12158 {
12159         struct mlx5_priv *priv = dev->data->dev_private;
12160         struct mlx5_meter_domains_infos *mtb;
12161         int ret;
12162         int i;
12163
12164         if (!priv->mtr_en) {
12165                 rte_errno = ENOTSUP;
12166                 return NULL;
12167         }
12168         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
12169         if (!mtb) {
12170                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
12171                 return NULL;
12172         }
12173         /* Create meter count actions */
12174         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
12175                 struct mlx5_flow_counter *cnt;
12176                 if (!fm->policer_stats.cnt[i])
12177                         continue;
12178                 cnt = flow_dv_counter_get_by_idx(dev,
12179                       fm->policer_stats.cnt[i], NULL);
12180                 mtb->count_actns[i] = cnt->action;
12181         }
12182         /* Create drop action. */
12183         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
12184         if (ret) {
12185                 DRV_LOG(ERR, "Failed to create drop action.");
12186                 goto error_exit;
12187         }
12188         /* Egress meter table. */
12189         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
12190         if (ret) {
12191                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
12192                 goto error_exit;
12193         }
12194         /* Ingress meter table. */
12195         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
12196         if (ret) {
12197                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
12198                 goto error_exit;
12199         }
12200         /* FDB meter table. */
12201         if (priv->config.dv_esw_en) {
12202                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
12203                                                  priv->mtr_color_reg);
12204                 if (ret) {
12205                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
12206                         goto error_exit;
12207                 }
12208         }
12209         return mtb;
12210 error_exit:
12211         flow_dv_destroy_mtr_tbl(dev, mtb);
12212         return NULL;
12213 }
12214
12215 /**
12216  * Destroy domain policer rule.
12217  *
12218  * @param[in] dt
12219  *   Pointer to domain table.
12220  */
12221 static void
12222 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
12223 {
12224         int i;
12225
12226         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12227                 if (dt->policer_rules[i]) {
12228                         claim_zero(mlx5_flow_os_destroy_flow
12229                                    (dt->policer_rules[i]));
12230                         dt->policer_rules[i] = NULL;
12231                 }
12232         }
12233         if (dt->jump_actn) {
12234                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
12235                 dt->jump_actn = NULL;
12236         }
12237 }
12238
12239 /**
12240  * Destroy policer rules.
12241  *
12242  * @param[in] dev
12243  *   Pointer to Ethernet device.
12244  * @param[in] fm
12245  *   Pointer to flow meter structure.
12246  * @param[in] attr
12247  *   Pointer to flow attributes.
12248  *
12249  * @return
12250  *   Always 0.
12251  */
12252 static int
12253 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
12254                               const struct mlx5_flow_meter *fm,
12255                               const struct rte_flow_attr *attr)
12256 {
12257         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
12258
12259         if (!mtb)
12260                 return 0;
12261         if (attr->egress)
12262                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
12263         if (attr->ingress)
12264                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
12265         if (attr->transfer)
12266                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
12267         return 0;
12268 }
12269
12270 /**
12271  * Create specify domain meter policer rule.
12272  *
12273  * @param[in] fm
12274  *   Pointer to flow meter structure.
12275  * @param[in] mtb
12276  *   Pointer to DV meter table set.
12277  * @param[in] mtr_reg_c
12278  *   Color match REG_C.
12279  *
12280  * @return
12281  *   0 on success, -1 otherwise.
12282  */
12283 static int
12284 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
12285                                     struct mlx5_meter_domain_info *dtb,
12286                                     uint8_t mtr_reg_c)
12287 {
12288         struct mlx5_flow_dv_match_params matcher = {
12289                 .size = sizeof(matcher.buf),
12290         };
12291         struct mlx5_flow_dv_match_params value = {
12292                 .size = sizeof(value.buf),
12293         };
12294         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12295         void *actions[METER_ACTIONS];
12296         int i;
12297         int ret = 0;
12298
12299         /* Create jump action. */
12300         if (!dtb->jump_actn)
12301                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
12302                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
12303         if (ret) {
12304                 DRV_LOG(ERR, "Failed to create policer jump action.");
12305                 goto error;
12306         }
12307         for (i = 0; i < RTE_MTR_DROPPED; i++) {
12308                 int j = 0;
12309
12310                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
12311                                        rte_col_2_mlx5_col(i), UINT8_MAX);
12312                 if (mtb->count_actns[i])
12313                         actions[j++] = mtb->count_actns[i];
12314                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
12315                         actions[j++] = mtb->drop_actn;
12316                 else
12317                         actions[j++] = dtb->jump_actn;
12318                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
12319                                                (void *)&value, j, actions,
12320                                                &dtb->policer_rules[i]);
12321                 if (ret) {
12322                         DRV_LOG(ERR, "Failed to create policer rule.");
12323                         goto error;
12324                 }
12325         }
12326         return 0;
12327 error:
12328         rte_errno = errno;
12329         return -1;
12330 }
12331
12332 /**
12333  * Create policer rules.
12334  *
12335  * @param[in] dev
12336  *   Pointer to Ethernet device.
12337  * @param[in] fm
12338  *   Pointer to flow meter structure.
12339  * @param[in] attr
12340  *   Pointer to flow attributes.
12341  *
12342  * @return
12343  *   0 on success, -1 otherwise.
12344  */
12345 static int
12346 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
12347                              struct mlx5_flow_meter *fm,
12348                              const struct rte_flow_attr *attr)
12349 {
12350         struct mlx5_priv *priv = dev->data->dev_private;
12351         struct mlx5_meter_domains_infos *mtb = fm->mfts;
12352         int ret;
12353
12354         if (attr->egress) {
12355                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
12356                                                 priv->mtr_color_reg);
12357                 if (ret) {
12358                         DRV_LOG(ERR, "Failed to create egress policer.");
12359                         goto error;
12360                 }
12361         }
12362         if (attr->ingress) {
12363                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
12364                                                 priv->mtr_color_reg);
12365                 if (ret) {
12366                         DRV_LOG(ERR, "Failed to create ingress policer.");
12367                         goto error;
12368                 }
12369         }
12370         if (attr->transfer) {
12371                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
12372                                                 priv->mtr_color_reg);
12373                 if (ret) {
12374                         DRV_LOG(ERR, "Failed to create transfer policer.");
12375                         goto error;
12376                 }
12377         }
12378         return 0;
12379 error:
12380         flow_dv_destroy_policer_rules(dev, fm, attr);
12381         return -1;
12382 }
12383
12384 /**
12385  * Validate the batch counter support in root table.
12386  *
12387  * Create a simple flow with invalid counter and drop action on root table to
12388  * validate if batch counter with offset on root table is supported or not.
12389  *
12390  * @param[in] dev
12391  *   Pointer to rte_eth_dev structure.
12392  *
12393  * @return
12394  *   0 on success, a negative errno value otherwise and rte_errno is set.
12395  */
12396 int
12397 mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev)
12398 {
12399         struct mlx5_priv *priv = dev->data->dev_private;
12400         struct mlx5_dev_ctx_shared *sh = priv->sh;
12401         struct mlx5_flow_dv_match_params mask = {
12402                 .size = sizeof(mask.buf),
12403         };
12404         struct mlx5_flow_dv_match_params value = {
12405                 .size = sizeof(value.buf),
12406         };
12407         struct mlx5dv_flow_matcher_attr dv_attr = {
12408                 .type = IBV_FLOW_ATTR_NORMAL,
12409                 .priority = 0,
12410                 .match_criteria_enable = 0,
12411                 .match_mask = (void *)&mask,
12412         };
12413         void *actions[2] = { 0 };
12414         struct mlx5_flow_tbl_resource *tbl = NULL;
12415         struct mlx5_devx_obj *dcs = NULL;
12416         void *matcher = NULL;
12417         void *flow = NULL;
12418         int ret = -1;
12419
12420         tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL, 0, 0, NULL);
12421         if (!tbl)
12422                 goto err;
12423         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
12424         if (!dcs)
12425                 goto err;
12426         ret = mlx5_flow_os_create_flow_action_count(dcs->obj, UINT16_MAX,
12427                                                     &actions[0]);
12428         if (ret)
12429                 goto err;
12430         actions[1] = priv->drop_queue.hrxq->action;
12431         dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
12432         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
12433                                                &matcher);
12434         if (ret)
12435                 goto err;
12436         ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 2,
12437                                        actions, &flow);
12438 err:
12439         /*
12440          * If batch counter with offset is not supported, the driver will not
12441          * validate the invalid offset value, flow create should success.
12442          * In this case, it means batch counter is not supported in root table.
12443          *
12444          * Otherwise, if flow create is failed, counter offset is supported.
12445          */
12446         if (flow) {
12447                 DRV_LOG(INFO, "Batch counter is not supported in root "
12448                               "table. Switch to fallback mode.");
12449                 rte_errno = ENOTSUP;
12450                 ret = -rte_errno;
12451                 claim_zero(mlx5_flow_os_destroy_flow(flow));
12452         } else {
12453                 /* Check matcher to make sure validate fail at flow create. */
12454                 if (!matcher || (matcher && errno != EINVAL))
12455                         DRV_LOG(ERR, "Unexpected error in counter offset "
12456                                      "support detection");
12457                 ret = 0;
12458         }
12459         if (actions[0])
12460                 claim_zero(mlx5_flow_os_destroy_flow_action(actions[0]));
12461         if (matcher)
12462                 claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
12463         if (tbl)
12464                 flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
12465         if (dcs)
12466                 claim_zero(mlx5_devx_cmd_destroy(dcs));
12467         return ret;
12468 }
12469
12470 /**
12471  * Query a devx counter.
12472  *
12473  * @param[in] dev
12474  *   Pointer to the Ethernet device structure.
12475  * @param[in] cnt
12476  *   Index to the flow counter.
12477  * @param[in] clear
12478  *   Set to clear the counter statistics.
12479  * @param[out] pkts
12480  *   The statistics value of packets.
12481  * @param[out] bytes
12482  *   The statistics value of bytes.
12483  *
12484  * @return
12485  *   0 on success, otherwise return -1.
12486  */
12487 static int
12488 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
12489                       uint64_t *pkts, uint64_t *bytes)
12490 {
12491         struct mlx5_priv *priv = dev->data->dev_private;
12492         struct mlx5_flow_counter *cnt;
12493         uint64_t inn_pkts, inn_bytes;
12494         int ret;
12495
12496         if (!priv->config.devx)
12497                 return -1;
12498
12499         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
12500         if (ret)
12501                 return -1;
12502         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
12503         *pkts = inn_pkts - cnt->hits;
12504         *bytes = inn_bytes - cnt->bytes;
12505         if (clear) {
12506                 cnt->hits = inn_pkts;
12507                 cnt->bytes = inn_bytes;
12508         }
12509         return 0;
12510 }
12511
12512 /**
12513  * Get aged-out flows.
12514  *
12515  * @param[in] dev
12516  *   Pointer to the Ethernet device structure.
12517  * @param[in] context
12518  *   The address of an array of pointers to the aged-out flows contexts.
12519  * @param[in] nb_contexts
12520  *   The length of context array pointers.
12521  * @param[out] error
12522  *   Perform verbose error reporting if not NULL. Initialized in case of
12523  *   error only.
12524  *
12525  * @return
12526  *   how many contexts get in success, otherwise negative errno value.
12527  *   if nb_contexts is 0, return the amount of all aged contexts.
12528  *   if nb_contexts is not 0 , return the amount of aged flows reported
12529  *   in the context array.
12530  * @note: only stub for now
12531  */
12532 static int
12533 flow_get_aged_flows(struct rte_eth_dev *dev,
12534                     void **context,
12535                     uint32_t nb_contexts,
12536                     struct rte_flow_error *error)
12537 {
12538         struct mlx5_priv *priv = dev->data->dev_private;
12539         struct mlx5_age_info *age_info;
12540         struct mlx5_age_param *age_param;
12541         struct mlx5_flow_counter *counter;
12542         struct mlx5_aso_age_action *act;
12543         int nb_flows = 0;
12544
12545         if (nb_contexts && !context)
12546                 return rte_flow_error_set(error, EINVAL,
12547                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12548                                           NULL, "empty context");
12549         age_info = GET_PORT_AGE_INFO(priv);
12550         rte_spinlock_lock(&age_info->aged_sl);
12551         LIST_FOREACH(act, &age_info->aged_aso, next) {
12552                 nb_flows++;
12553                 if (nb_contexts) {
12554                         context[nb_flows - 1] =
12555                                                 act->age_params.context;
12556                         if (!(--nb_contexts))
12557                                 break;
12558                 }
12559         }
12560         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
12561                 nb_flows++;
12562                 if (nb_contexts) {
12563                         age_param = MLX5_CNT_TO_AGE(counter);
12564                         context[nb_flows - 1] = age_param->context;
12565                         if (!(--nb_contexts))
12566                                 break;
12567                 }
12568         }
12569         rte_spinlock_unlock(&age_info->aged_sl);
12570         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
12571         return nb_flows;
12572 }
12573
12574 /*
12575  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
12576  */
12577 static uint32_t
12578 flow_dv_counter_allocate(struct rte_eth_dev *dev)
12579 {
12580         return flow_dv_counter_alloc(dev, 0);
12581 }
12582
12583 /**
12584  * Validate shared action.
12585  * Dispatcher for action type specific validation.
12586  *
12587  * @param[in] dev
12588  *   Pointer to the Ethernet device structure.
12589  * @param[in] conf
12590  *   Shared action configuration.
12591  * @param[in] action
12592  *   The shared action object to validate.
12593  * @param[out] error
12594  *   Perform verbose error reporting if not NULL. Initialized in case of
12595  *   error only.
12596  *
12597  * @return
12598  *   0 on success, otherwise negative errno value.
12599  */
12600 static int
12601 flow_dv_action_validate(struct rte_eth_dev *dev,
12602                         const struct rte_flow_shared_action_conf *conf,
12603                         const struct rte_flow_action *action,
12604                         struct rte_flow_error *err)
12605 {
12606         struct mlx5_priv *priv = dev->data->dev_private;
12607
12608         RTE_SET_USED(conf);
12609         switch (action->type) {
12610         case RTE_FLOW_ACTION_TYPE_RSS:
12611                 return mlx5_validate_action_rss(dev, action, err);
12612         case RTE_FLOW_ACTION_TYPE_AGE:
12613                 if (!priv->sh->aso_age_mng)
12614                         return rte_flow_error_set(err, ENOTSUP,
12615                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
12616                                                 NULL,
12617                                              "shared age action not supported");
12618                 return flow_dv_validate_action_age(0, action, dev, err);
12619         default:
12620                 return rte_flow_error_set(err, ENOTSUP,
12621                                           RTE_FLOW_ERROR_TYPE_ACTION,
12622                                           NULL,
12623                                           "action type not supported");
12624         }
12625 }
12626
12627 static int
12628 flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags)
12629 {
12630         struct mlx5_priv *priv = dev->data->dev_private;
12631         int ret = 0;
12632
12633         if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) {
12634                 ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain,
12635                                                 flags);
12636                 if (ret != 0)
12637                         return ret;
12638         }
12639         if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) {
12640                 ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags);
12641                 if (ret != 0)
12642                         return ret;
12643         }
12644         if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) {
12645                 ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags);
12646                 if (ret != 0)
12647                         return ret;
12648         }
12649         return 0;
12650 }
12651
12652 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
12653         .validate = flow_dv_validate,
12654         .prepare = flow_dv_prepare,
12655         .translate = flow_dv_translate,
12656         .apply = flow_dv_apply,
12657         .remove = flow_dv_remove,
12658         .destroy = flow_dv_destroy,
12659         .query = flow_dv_query,
12660         .create_mtr_tbls = flow_dv_create_mtr_tbl,
12661         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
12662         .create_policer_rules = flow_dv_create_policer_rules,
12663         .destroy_policer_rules = flow_dv_destroy_policer_rules,
12664         .counter_alloc = flow_dv_counter_allocate,
12665         .counter_free = flow_dv_counter_free,
12666         .counter_query = flow_dv_counter_query,
12667         .get_aged_flows = flow_get_aged_flows,
12668         .action_validate = flow_dv_action_validate,
12669         .action_create = flow_dv_action_create,
12670         .action_destroy = flow_dv_action_destroy,
12671         .action_update = flow_dv_action_update,
12672         .action_query = flow_dv_action_query,
12673         .sync_domain = flow_dv_sync_domain,
12674 };
12675
12676 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
12677