net/mlx5: convert control path memory to unified malloc
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_cycles.h>
28 #include <rte_ip.h>
29 #include <rte_gre.h>
30 #include <rte_vxlan.h>
31 #include <rte_gtp.h>
32
33 #include <mlx5_devx_cmds.h>
34 #include <mlx5_prm.h>
35 #include <mlx5_malloc.h>
36
37 #include "mlx5_defs.h"
38 #include "mlx5.h"
39 #include "mlx5_common_os.h"
40 #include "mlx5_flow.h"
41 #include "mlx5_flow_os.h"
42 #include "mlx5_rxtx.h"
43
44 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
45
46 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
47 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR_ESWITCH
51 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
52 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
53 #endif
54 #endif
55
56 #ifndef HAVE_MLX5DV_DR
57 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
58 #endif
59
60 /* VLAN header definitions */
61 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
62 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
63 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
64 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
65 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
66
67 union flow_dv_attr {
68         struct {
69                 uint32_t valid:1;
70                 uint32_t ipv4:1;
71                 uint32_t ipv6:1;
72                 uint32_t tcp:1;
73                 uint32_t udp:1;
74                 uint32_t reserved:27;
75         };
76         uint32_t attr;
77 };
78
79 static int
80 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
81                              struct mlx5_flow_tbl_resource *tbl);
82
83 static int
84 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev);
85
86 /**
87  * Initialize flow attributes structure according to flow items' types.
88  *
89  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
90  * mode. For tunnel mode, the items to be modified are the outermost ones.
91  *
92  * @param[in] item
93  *   Pointer to item specification.
94  * @param[out] attr
95  *   Pointer to flow attributes structure.
96  * @param[in] dev_flow
97  *   Pointer to the sub flow.
98  * @param[in] tunnel_decap
99  *   Whether action is after tunnel decapsulation.
100  */
101 static void
102 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
103                   struct mlx5_flow *dev_flow, bool tunnel_decap)
104 {
105         uint64_t layers = dev_flow->handle->layers;
106
107         /*
108          * If layers is already initialized, it means this dev_flow is the
109          * suffix flow, the layers flags is set by the prefix flow. Need to
110          * use the layer flags from prefix flow as the suffix flow may not
111          * have the user defined items as the flow is split.
112          */
113         if (layers) {
114                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
115                         attr->ipv4 = 1;
116                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
117                         attr->ipv6 = 1;
118                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
119                         attr->tcp = 1;
120                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
121                         attr->udp = 1;
122                 attr->valid = 1;
123                 return;
124         }
125         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
126                 uint8_t next_protocol = 0xff;
127                 switch (item->type) {
128                 case RTE_FLOW_ITEM_TYPE_GRE:
129                 case RTE_FLOW_ITEM_TYPE_NVGRE:
130                 case RTE_FLOW_ITEM_TYPE_VXLAN:
131                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
132                 case RTE_FLOW_ITEM_TYPE_GENEVE:
133                 case RTE_FLOW_ITEM_TYPE_MPLS:
134                         if (tunnel_decap)
135                                 attr->attr = 0;
136                         break;
137                 case RTE_FLOW_ITEM_TYPE_IPV4:
138                         if (!attr->ipv6)
139                                 attr->ipv4 = 1;
140                         if (item->mask != NULL &&
141                             ((const struct rte_flow_item_ipv4 *)
142                             item->mask)->hdr.next_proto_id)
143                                 next_protocol =
144                                     ((const struct rte_flow_item_ipv4 *)
145                                       (item->spec))->hdr.next_proto_id &
146                                     ((const struct rte_flow_item_ipv4 *)
147                                       (item->mask))->hdr.next_proto_id;
148                         if ((next_protocol == IPPROTO_IPIP ||
149                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
150                                 attr->attr = 0;
151                         break;
152                 case RTE_FLOW_ITEM_TYPE_IPV6:
153                         if (!attr->ipv4)
154                                 attr->ipv6 = 1;
155                         if (item->mask != NULL &&
156                             ((const struct rte_flow_item_ipv6 *)
157                             item->mask)->hdr.proto)
158                                 next_protocol =
159                                     ((const struct rte_flow_item_ipv6 *)
160                                       (item->spec))->hdr.proto &
161                                     ((const struct rte_flow_item_ipv6 *)
162                                       (item->mask))->hdr.proto;
163                         if ((next_protocol == IPPROTO_IPIP ||
164                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
165                                 attr->attr = 0;
166                         break;
167                 case RTE_FLOW_ITEM_TYPE_UDP:
168                         if (!attr->tcp)
169                                 attr->udp = 1;
170                         break;
171                 case RTE_FLOW_ITEM_TYPE_TCP:
172                         if (!attr->udp)
173                                 attr->tcp = 1;
174                         break;
175                 default:
176                         break;
177                 }
178         }
179         attr->valid = 1;
180 }
181
182 /**
183  * Convert rte_mtr_color to mlx5 color.
184  *
185  * @param[in] rcol
186  *   rte_mtr_color.
187  *
188  * @return
189  *   mlx5 color.
190  */
191 static int
192 rte_col_2_mlx5_col(enum rte_color rcol)
193 {
194         switch (rcol) {
195         case RTE_COLOR_GREEN:
196                 return MLX5_FLOW_COLOR_GREEN;
197         case RTE_COLOR_YELLOW:
198                 return MLX5_FLOW_COLOR_YELLOW;
199         case RTE_COLOR_RED:
200                 return MLX5_FLOW_COLOR_RED;
201         default:
202                 break;
203         }
204         return MLX5_FLOW_COLOR_UNDEFINED;
205 }
206
207 struct field_modify_info {
208         uint32_t size; /* Size of field in protocol header, in bytes. */
209         uint32_t offset; /* Offset of field in protocol header, in bytes. */
210         enum mlx5_modification_field id;
211 };
212
213 struct field_modify_info modify_eth[] = {
214         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
215         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
216         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
217         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
218         {0, 0, 0},
219 };
220
221 struct field_modify_info modify_vlan_out_first_vid[] = {
222         /* Size in bits !!! */
223         {12, 0, MLX5_MODI_OUT_FIRST_VID},
224         {0, 0, 0},
225 };
226
227 struct field_modify_info modify_ipv4[] = {
228         {1,  1, MLX5_MODI_OUT_IP_DSCP},
229         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
230         {4, 12, MLX5_MODI_OUT_SIPV4},
231         {4, 16, MLX5_MODI_OUT_DIPV4},
232         {0, 0, 0},
233 };
234
235 struct field_modify_info modify_ipv6[] = {
236         {1,  0, MLX5_MODI_OUT_IP_DSCP},
237         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
238         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
239         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
240         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
241         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
242         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
243         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
244         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
245         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
246         {0, 0, 0},
247 };
248
249 struct field_modify_info modify_udp[] = {
250         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
251         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
252         {0, 0, 0},
253 };
254
255 struct field_modify_info modify_tcp[] = {
256         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
257         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
258         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
259         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
260         {0, 0, 0},
261 };
262
263 static void
264 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
265                           uint8_t next_protocol, uint64_t *item_flags,
266                           int *tunnel)
267 {
268         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
269                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
270         if (next_protocol == IPPROTO_IPIP) {
271                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
272                 *tunnel = 1;
273         }
274         if (next_protocol == IPPROTO_IPV6) {
275                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
276                 *tunnel = 1;
277         }
278 }
279
280 /**
281  * Acquire the synchronizing object to protect multithreaded access
282  * to shared dv context. Lock occurs only if context is actually
283  * shared, i.e. we have multiport IB device and representors are
284  * created.
285  *
286  * @param[in] dev
287  *   Pointer to the rte_eth_dev structure.
288  */
289 static void
290 flow_dv_shared_lock(struct rte_eth_dev *dev)
291 {
292         struct mlx5_priv *priv = dev->data->dev_private;
293         struct mlx5_dev_ctx_shared *sh = priv->sh;
294
295         if (sh->dv_refcnt > 1) {
296                 int ret;
297
298                 ret = pthread_mutex_lock(&sh->dv_mutex);
299                 MLX5_ASSERT(!ret);
300                 (void)ret;
301         }
302 }
303
304 static void
305 flow_dv_shared_unlock(struct rte_eth_dev *dev)
306 {
307         struct mlx5_priv *priv = dev->data->dev_private;
308         struct mlx5_dev_ctx_shared *sh = priv->sh;
309
310         if (sh->dv_refcnt > 1) {
311                 int ret;
312
313                 ret = pthread_mutex_unlock(&sh->dv_mutex);
314                 MLX5_ASSERT(!ret);
315                 (void)ret;
316         }
317 }
318
319 /* Update VLAN's VID/PCP based on input rte_flow_action.
320  *
321  * @param[in] action
322  *   Pointer to struct rte_flow_action.
323  * @param[out] vlan
324  *   Pointer to struct rte_vlan_hdr.
325  */
326 static void
327 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
328                          struct rte_vlan_hdr *vlan)
329 {
330         uint16_t vlan_tci;
331         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
332                 vlan_tci =
333                     ((const struct rte_flow_action_of_set_vlan_pcp *)
334                                                action->conf)->vlan_pcp;
335                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
336                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
337                 vlan->vlan_tci |= vlan_tci;
338         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
339                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
340                 vlan->vlan_tci |= rte_be_to_cpu_16
341                     (((const struct rte_flow_action_of_set_vlan_vid *)
342                                              action->conf)->vlan_vid);
343         }
344 }
345
346 /**
347  * Fetch 1, 2, 3 or 4 byte field from the byte array
348  * and return as unsigned integer in host-endian format.
349  *
350  * @param[in] data
351  *   Pointer to data array.
352  * @param[in] size
353  *   Size of field to extract.
354  *
355  * @return
356  *   converted field in host endian format.
357  */
358 static inline uint32_t
359 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
360 {
361         uint32_t ret;
362
363         switch (size) {
364         case 1:
365                 ret = *data;
366                 break;
367         case 2:
368                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
369                 break;
370         case 3:
371                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
372                 ret = (ret << 8) | *(data + sizeof(uint16_t));
373                 break;
374         case 4:
375                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
376                 break;
377         default:
378                 MLX5_ASSERT(false);
379                 ret = 0;
380                 break;
381         }
382         return ret;
383 }
384
385 /**
386  * Convert modify-header action to DV specification.
387  *
388  * Data length of each action is determined by provided field description
389  * and the item mask. Data bit offset and width of each action is determined
390  * by provided item mask.
391  *
392  * @param[in] item
393  *   Pointer to item specification.
394  * @param[in] field
395  *   Pointer to field modification information.
396  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
397  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
398  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
399  * @param[in] dcopy
400  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
401  *   Negative offset value sets the same offset as source offset.
402  *   size field is ignored, value is taken from source field.
403  * @param[in,out] resource
404  *   Pointer to the modify-header resource.
405  * @param[in] type
406  *   Type of modification.
407  * @param[out] error
408  *   Pointer to the error structure.
409  *
410  * @return
411  *   0 on success, a negative errno value otherwise and rte_errno is set.
412  */
413 static int
414 flow_dv_convert_modify_action(struct rte_flow_item *item,
415                               struct field_modify_info *field,
416                               struct field_modify_info *dcopy,
417                               struct mlx5_flow_dv_modify_hdr_resource *resource,
418                               uint32_t type, struct rte_flow_error *error)
419 {
420         uint32_t i = resource->actions_num;
421         struct mlx5_modification_cmd *actions = resource->actions;
422
423         /*
424          * The item and mask are provided in big-endian format.
425          * The fields should be presented as in big-endian format either.
426          * Mask must be always present, it defines the actual field width.
427          */
428         MLX5_ASSERT(item->mask);
429         MLX5_ASSERT(field->size);
430         do {
431                 unsigned int size_b;
432                 unsigned int off_b;
433                 uint32_t mask;
434                 uint32_t data;
435
436                 if (i >= MLX5_MAX_MODIFY_NUM)
437                         return rte_flow_error_set(error, EINVAL,
438                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
439                                  "too many items to modify");
440                 /* Fetch variable byte size mask from the array. */
441                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
442                                            field->offset, field->size);
443                 if (!mask) {
444                         ++field;
445                         continue;
446                 }
447                 /* Deduce actual data width in bits from mask value. */
448                 off_b = rte_bsf32(mask);
449                 size_b = sizeof(uint32_t) * CHAR_BIT -
450                          off_b - __builtin_clz(mask);
451                 MLX5_ASSERT(size_b);
452                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
453                 actions[i] = (struct mlx5_modification_cmd) {
454                         .action_type = type,
455                         .field = field->id,
456                         .offset = off_b,
457                         .length = size_b,
458                 };
459                 /* Convert entire record to expected big-endian format. */
460                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
461                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
462                         MLX5_ASSERT(dcopy);
463                         actions[i].dst_field = dcopy->id;
464                         actions[i].dst_offset =
465                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
466                         /* Convert entire record to big-endian format. */
467                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
468                 } else {
469                         MLX5_ASSERT(item->spec);
470                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
471                                                    field->offset, field->size);
472                         /* Shift out the trailing masked bits from data. */
473                         data = (data & mask) >> off_b;
474                         actions[i].data1 = rte_cpu_to_be_32(data);
475                 }
476                 ++i;
477                 ++field;
478         } while (field->size);
479         if (resource->actions_num == i)
480                 return rte_flow_error_set(error, EINVAL,
481                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
482                                           "invalid modification flow item");
483         resource->actions_num = i;
484         return 0;
485 }
486
487 /**
488  * Convert modify-header set IPv4 address action to DV specification.
489  *
490  * @param[in,out] resource
491  *   Pointer to the modify-header resource.
492  * @param[in] action
493  *   Pointer to action specification.
494  * @param[out] error
495  *   Pointer to the error structure.
496  *
497  * @return
498  *   0 on success, a negative errno value otherwise and rte_errno is set.
499  */
500 static int
501 flow_dv_convert_action_modify_ipv4
502                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
503                          const struct rte_flow_action *action,
504                          struct rte_flow_error *error)
505 {
506         const struct rte_flow_action_set_ipv4 *conf =
507                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
508         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
509         struct rte_flow_item_ipv4 ipv4;
510         struct rte_flow_item_ipv4 ipv4_mask;
511
512         memset(&ipv4, 0, sizeof(ipv4));
513         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
514         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
515                 ipv4.hdr.src_addr = conf->ipv4_addr;
516                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
517         } else {
518                 ipv4.hdr.dst_addr = conf->ipv4_addr;
519                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
520         }
521         item.spec = &ipv4;
522         item.mask = &ipv4_mask;
523         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
524                                              MLX5_MODIFICATION_TYPE_SET, error);
525 }
526
527 /**
528  * Convert modify-header set IPv6 address action to DV specification.
529  *
530  * @param[in,out] resource
531  *   Pointer to the modify-header resource.
532  * @param[in] action
533  *   Pointer to action specification.
534  * @param[out] error
535  *   Pointer to the error structure.
536  *
537  * @return
538  *   0 on success, a negative errno value otherwise and rte_errno is set.
539  */
540 static int
541 flow_dv_convert_action_modify_ipv6
542                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
543                          const struct rte_flow_action *action,
544                          struct rte_flow_error *error)
545 {
546         const struct rte_flow_action_set_ipv6 *conf =
547                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
548         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
549         struct rte_flow_item_ipv6 ipv6;
550         struct rte_flow_item_ipv6 ipv6_mask;
551
552         memset(&ipv6, 0, sizeof(ipv6));
553         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
554         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
555                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
556                        sizeof(ipv6.hdr.src_addr));
557                 memcpy(&ipv6_mask.hdr.src_addr,
558                        &rte_flow_item_ipv6_mask.hdr.src_addr,
559                        sizeof(ipv6.hdr.src_addr));
560         } else {
561                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
562                        sizeof(ipv6.hdr.dst_addr));
563                 memcpy(&ipv6_mask.hdr.dst_addr,
564                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
565                        sizeof(ipv6.hdr.dst_addr));
566         }
567         item.spec = &ipv6;
568         item.mask = &ipv6_mask;
569         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
570                                              MLX5_MODIFICATION_TYPE_SET, error);
571 }
572
573 /**
574  * Convert modify-header set MAC address action to DV specification.
575  *
576  * @param[in,out] resource
577  *   Pointer to the modify-header resource.
578  * @param[in] action
579  *   Pointer to action specification.
580  * @param[out] error
581  *   Pointer to the error structure.
582  *
583  * @return
584  *   0 on success, a negative errno value otherwise and rte_errno is set.
585  */
586 static int
587 flow_dv_convert_action_modify_mac
588                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
589                          const struct rte_flow_action *action,
590                          struct rte_flow_error *error)
591 {
592         const struct rte_flow_action_set_mac *conf =
593                 (const struct rte_flow_action_set_mac *)(action->conf);
594         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
595         struct rte_flow_item_eth eth;
596         struct rte_flow_item_eth eth_mask;
597
598         memset(&eth, 0, sizeof(eth));
599         memset(&eth_mask, 0, sizeof(eth_mask));
600         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
601                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
602                        sizeof(eth.src.addr_bytes));
603                 memcpy(&eth_mask.src.addr_bytes,
604                        &rte_flow_item_eth_mask.src.addr_bytes,
605                        sizeof(eth_mask.src.addr_bytes));
606         } else {
607                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
608                        sizeof(eth.dst.addr_bytes));
609                 memcpy(&eth_mask.dst.addr_bytes,
610                        &rte_flow_item_eth_mask.dst.addr_bytes,
611                        sizeof(eth_mask.dst.addr_bytes));
612         }
613         item.spec = &eth;
614         item.mask = &eth_mask;
615         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
616                                              MLX5_MODIFICATION_TYPE_SET, error);
617 }
618
619 /**
620  * Convert modify-header set VLAN VID action to DV specification.
621  *
622  * @param[in,out] resource
623  *   Pointer to the modify-header resource.
624  * @param[in] action
625  *   Pointer to action specification.
626  * @param[out] error
627  *   Pointer to the error structure.
628  *
629  * @return
630  *   0 on success, a negative errno value otherwise and rte_errno is set.
631  */
632 static int
633 flow_dv_convert_action_modify_vlan_vid
634                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
635                          const struct rte_flow_action *action,
636                          struct rte_flow_error *error)
637 {
638         const struct rte_flow_action_of_set_vlan_vid *conf =
639                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
640         int i = resource->actions_num;
641         struct mlx5_modification_cmd *actions = resource->actions;
642         struct field_modify_info *field = modify_vlan_out_first_vid;
643
644         if (i >= MLX5_MAX_MODIFY_NUM)
645                 return rte_flow_error_set(error, EINVAL,
646                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
647                          "too many items to modify");
648         actions[i] = (struct mlx5_modification_cmd) {
649                 .action_type = MLX5_MODIFICATION_TYPE_SET,
650                 .field = field->id,
651                 .length = field->size,
652                 .offset = field->offset,
653         };
654         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
655         actions[i].data1 = conf->vlan_vid;
656         actions[i].data1 = actions[i].data1 << 16;
657         resource->actions_num = ++i;
658         return 0;
659 }
660
661 /**
662  * Convert modify-header set TP action to DV specification.
663  *
664  * @param[in,out] resource
665  *   Pointer to the modify-header resource.
666  * @param[in] action
667  *   Pointer to action specification.
668  * @param[in] items
669  *   Pointer to rte_flow_item objects list.
670  * @param[in] attr
671  *   Pointer to flow attributes structure.
672  * @param[in] dev_flow
673  *   Pointer to the sub flow.
674  * @param[in] tunnel_decap
675  *   Whether action is after tunnel decapsulation.
676  * @param[out] error
677  *   Pointer to the error structure.
678  *
679  * @return
680  *   0 on success, a negative errno value otherwise and rte_errno is set.
681  */
682 static int
683 flow_dv_convert_action_modify_tp
684                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
685                          const struct rte_flow_action *action,
686                          const struct rte_flow_item *items,
687                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
688                          bool tunnel_decap, struct rte_flow_error *error)
689 {
690         const struct rte_flow_action_set_tp *conf =
691                 (const struct rte_flow_action_set_tp *)(action->conf);
692         struct rte_flow_item item;
693         struct rte_flow_item_udp udp;
694         struct rte_flow_item_udp udp_mask;
695         struct rte_flow_item_tcp tcp;
696         struct rte_flow_item_tcp tcp_mask;
697         struct field_modify_info *field;
698
699         if (!attr->valid)
700                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
701         if (attr->udp) {
702                 memset(&udp, 0, sizeof(udp));
703                 memset(&udp_mask, 0, sizeof(udp_mask));
704                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
705                         udp.hdr.src_port = conf->port;
706                         udp_mask.hdr.src_port =
707                                         rte_flow_item_udp_mask.hdr.src_port;
708                 } else {
709                         udp.hdr.dst_port = conf->port;
710                         udp_mask.hdr.dst_port =
711                                         rte_flow_item_udp_mask.hdr.dst_port;
712                 }
713                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
714                 item.spec = &udp;
715                 item.mask = &udp_mask;
716                 field = modify_udp;
717         } else {
718                 MLX5_ASSERT(attr->tcp);
719                 memset(&tcp, 0, sizeof(tcp));
720                 memset(&tcp_mask, 0, sizeof(tcp_mask));
721                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
722                         tcp.hdr.src_port = conf->port;
723                         tcp_mask.hdr.src_port =
724                                         rte_flow_item_tcp_mask.hdr.src_port;
725                 } else {
726                         tcp.hdr.dst_port = conf->port;
727                         tcp_mask.hdr.dst_port =
728                                         rte_flow_item_tcp_mask.hdr.dst_port;
729                 }
730                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
731                 item.spec = &tcp;
732                 item.mask = &tcp_mask;
733                 field = modify_tcp;
734         }
735         return flow_dv_convert_modify_action(&item, field, NULL, resource,
736                                              MLX5_MODIFICATION_TYPE_SET, error);
737 }
738
739 /**
740  * Convert modify-header set TTL action to DV specification.
741  *
742  * @param[in,out] resource
743  *   Pointer to the modify-header resource.
744  * @param[in] action
745  *   Pointer to action specification.
746  * @param[in] items
747  *   Pointer to rte_flow_item objects list.
748  * @param[in] attr
749  *   Pointer to flow attributes structure.
750  * @param[in] dev_flow
751  *   Pointer to the sub flow.
752  * @param[in] tunnel_decap
753  *   Whether action is after tunnel decapsulation.
754  * @param[out] error
755  *   Pointer to the error structure.
756  *
757  * @return
758  *   0 on success, a negative errno value otherwise and rte_errno is set.
759  */
760 static int
761 flow_dv_convert_action_modify_ttl
762                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
763                          const struct rte_flow_action *action,
764                          const struct rte_flow_item *items,
765                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
766                          bool tunnel_decap, struct rte_flow_error *error)
767 {
768         const struct rte_flow_action_set_ttl *conf =
769                 (const struct rte_flow_action_set_ttl *)(action->conf);
770         struct rte_flow_item item;
771         struct rte_flow_item_ipv4 ipv4;
772         struct rte_flow_item_ipv4 ipv4_mask;
773         struct rte_flow_item_ipv6 ipv6;
774         struct rte_flow_item_ipv6 ipv6_mask;
775         struct field_modify_info *field;
776
777         if (!attr->valid)
778                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
779         if (attr->ipv4) {
780                 memset(&ipv4, 0, sizeof(ipv4));
781                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
782                 ipv4.hdr.time_to_live = conf->ttl_value;
783                 ipv4_mask.hdr.time_to_live = 0xFF;
784                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
785                 item.spec = &ipv4;
786                 item.mask = &ipv4_mask;
787                 field = modify_ipv4;
788         } else {
789                 MLX5_ASSERT(attr->ipv6);
790                 memset(&ipv6, 0, sizeof(ipv6));
791                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
792                 ipv6.hdr.hop_limits = conf->ttl_value;
793                 ipv6_mask.hdr.hop_limits = 0xFF;
794                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
795                 item.spec = &ipv6;
796                 item.mask = &ipv6_mask;
797                 field = modify_ipv6;
798         }
799         return flow_dv_convert_modify_action(&item, field, NULL, resource,
800                                              MLX5_MODIFICATION_TYPE_SET, error);
801 }
802
803 /**
804  * Convert modify-header decrement TTL action to DV specification.
805  *
806  * @param[in,out] resource
807  *   Pointer to the modify-header resource.
808  * @param[in] action
809  *   Pointer to action specification.
810  * @param[in] items
811  *   Pointer to rte_flow_item objects list.
812  * @param[in] attr
813  *   Pointer to flow attributes structure.
814  * @param[in] dev_flow
815  *   Pointer to the sub flow.
816  * @param[in] tunnel_decap
817  *   Whether action is after tunnel decapsulation.
818  * @param[out] error
819  *   Pointer to the error structure.
820  *
821  * @return
822  *   0 on success, a negative errno value otherwise and rte_errno is set.
823  */
824 static int
825 flow_dv_convert_action_modify_dec_ttl
826                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
827                          const struct rte_flow_item *items,
828                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
829                          bool tunnel_decap, struct rte_flow_error *error)
830 {
831         struct rte_flow_item item;
832         struct rte_flow_item_ipv4 ipv4;
833         struct rte_flow_item_ipv4 ipv4_mask;
834         struct rte_flow_item_ipv6 ipv6;
835         struct rte_flow_item_ipv6 ipv6_mask;
836         struct field_modify_info *field;
837
838         if (!attr->valid)
839                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
840         if (attr->ipv4) {
841                 memset(&ipv4, 0, sizeof(ipv4));
842                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
843                 ipv4.hdr.time_to_live = 0xFF;
844                 ipv4_mask.hdr.time_to_live = 0xFF;
845                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
846                 item.spec = &ipv4;
847                 item.mask = &ipv4_mask;
848                 field = modify_ipv4;
849         } else {
850                 MLX5_ASSERT(attr->ipv6);
851                 memset(&ipv6, 0, sizeof(ipv6));
852                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
853                 ipv6.hdr.hop_limits = 0xFF;
854                 ipv6_mask.hdr.hop_limits = 0xFF;
855                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
856                 item.spec = &ipv6;
857                 item.mask = &ipv6_mask;
858                 field = modify_ipv6;
859         }
860         return flow_dv_convert_modify_action(&item, field, NULL, resource,
861                                              MLX5_MODIFICATION_TYPE_ADD, error);
862 }
863
864 /**
865  * Convert modify-header increment/decrement TCP Sequence number
866  * to DV specification.
867  *
868  * @param[in,out] resource
869  *   Pointer to the modify-header resource.
870  * @param[in] action
871  *   Pointer to action specification.
872  * @param[out] error
873  *   Pointer to the error structure.
874  *
875  * @return
876  *   0 on success, a negative errno value otherwise and rte_errno is set.
877  */
878 static int
879 flow_dv_convert_action_modify_tcp_seq
880                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
881                          const struct rte_flow_action *action,
882                          struct rte_flow_error *error)
883 {
884         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
885         uint64_t value = rte_be_to_cpu_32(*conf);
886         struct rte_flow_item item;
887         struct rte_flow_item_tcp tcp;
888         struct rte_flow_item_tcp tcp_mask;
889
890         memset(&tcp, 0, sizeof(tcp));
891         memset(&tcp_mask, 0, sizeof(tcp_mask));
892         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
893                 /*
894                  * The HW has no decrement operation, only increment operation.
895                  * To simulate decrement X from Y using increment operation
896                  * we need to add UINT32_MAX X times to Y.
897                  * Each adding of UINT32_MAX decrements Y by 1.
898                  */
899                 value *= UINT32_MAX;
900         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
901         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
902         item.type = RTE_FLOW_ITEM_TYPE_TCP;
903         item.spec = &tcp;
904         item.mask = &tcp_mask;
905         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
906                                              MLX5_MODIFICATION_TYPE_ADD, error);
907 }
908
909 /**
910  * Convert modify-header increment/decrement TCP Acknowledgment number
911  * to DV specification.
912  *
913  * @param[in,out] resource
914  *   Pointer to the modify-header resource.
915  * @param[in] action
916  *   Pointer to action specification.
917  * @param[out] error
918  *   Pointer to the error structure.
919  *
920  * @return
921  *   0 on success, a negative errno value otherwise and rte_errno is set.
922  */
923 static int
924 flow_dv_convert_action_modify_tcp_ack
925                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
926                          const struct rte_flow_action *action,
927                          struct rte_flow_error *error)
928 {
929         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
930         uint64_t value = rte_be_to_cpu_32(*conf);
931         struct rte_flow_item item;
932         struct rte_flow_item_tcp tcp;
933         struct rte_flow_item_tcp tcp_mask;
934
935         memset(&tcp, 0, sizeof(tcp));
936         memset(&tcp_mask, 0, sizeof(tcp_mask));
937         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
938                 /*
939                  * The HW has no decrement operation, only increment operation.
940                  * To simulate decrement X from Y using increment operation
941                  * we need to add UINT32_MAX X times to Y.
942                  * Each adding of UINT32_MAX decrements Y by 1.
943                  */
944                 value *= UINT32_MAX;
945         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
946         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
947         item.type = RTE_FLOW_ITEM_TYPE_TCP;
948         item.spec = &tcp;
949         item.mask = &tcp_mask;
950         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
951                                              MLX5_MODIFICATION_TYPE_ADD, error);
952 }
953
954 static enum mlx5_modification_field reg_to_field[] = {
955         [REG_NONE] = MLX5_MODI_OUT_NONE,
956         [REG_A] = MLX5_MODI_META_DATA_REG_A,
957         [REG_B] = MLX5_MODI_META_DATA_REG_B,
958         [REG_C_0] = MLX5_MODI_META_REG_C_0,
959         [REG_C_1] = MLX5_MODI_META_REG_C_1,
960         [REG_C_2] = MLX5_MODI_META_REG_C_2,
961         [REG_C_3] = MLX5_MODI_META_REG_C_3,
962         [REG_C_4] = MLX5_MODI_META_REG_C_4,
963         [REG_C_5] = MLX5_MODI_META_REG_C_5,
964         [REG_C_6] = MLX5_MODI_META_REG_C_6,
965         [REG_C_7] = MLX5_MODI_META_REG_C_7,
966 };
967
968 /**
969  * Convert register set to DV specification.
970  *
971  * @param[in,out] resource
972  *   Pointer to the modify-header resource.
973  * @param[in] action
974  *   Pointer to action specification.
975  * @param[out] error
976  *   Pointer to the error structure.
977  *
978  * @return
979  *   0 on success, a negative errno value otherwise and rte_errno is set.
980  */
981 static int
982 flow_dv_convert_action_set_reg
983                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
984                          const struct rte_flow_action *action,
985                          struct rte_flow_error *error)
986 {
987         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
988         struct mlx5_modification_cmd *actions = resource->actions;
989         uint32_t i = resource->actions_num;
990
991         if (i >= MLX5_MAX_MODIFY_NUM)
992                 return rte_flow_error_set(error, EINVAL,
993                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
994                                           "too many items to modify");
995         MLX5_ASSERT(conf->id != REG_NONE);
996         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
997         actions[i] = (struct mlx5_modification_cmd) {
998                 .action_type = MLX5_MODIFICATION_TYPE_SET,
999                 .field = reg_to_field[conf->id],
1000         };
1001         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
1002         actions[i].data1 = rte_cpu_to_be_32(conf->data);
1003         ++i;
1004         resource->actions_num = i;
1005         return 0;
1006 }
1007
1008 /**
1009  * Convert SET_TAG action to DV specification.
1010  *
1011  * @param[in] dev
1012  *   Pointer to the rte_eth_dev structure.
1013  * @param[in,out] resource
1014  *   Pointer to the modify-header resource.
1015  * @param[in] conf
1016  *   Pointer to action specification.
1017  * @param[out] error
1018  *   Pointer to the error structure.
1019  *
1020  * @return
1021  *   0 on success, a negative errno value otherwise and rte_errno is set.
1022  */
1023 static int
1024 flow_dv_convert_action_set_tag
1025                         (struct rte_eth_dev *dev,
1026                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1027                          const struct rte_flow_action_set_tag *conf,
1028                          struct rte_flow_error *error)
1029 {
1030         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1031         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1032         struct rte_flow_item item = {
1033                 .spec = &data,
1034                 .mask = &mask,
1035         };
1036         struct field_modify_info reg_c_x[] = {
1037                 [1] = {0, 0, 0},
1038         };
1039         enum mlx5_modification_field reg_type;
1040         int ret;
1041
1042         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1043         if (ret < 0)
1044                 return ret;
1045         MLX5_ASSERT(ret != REG_NONE);
1046         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1047         reg_type = reg_to_field[ret];
1048         MLX5_ASSERT(reg_type > 0);
1049         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1050         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1051                                              MLX5_MODIFICATION_TYPE_SET, error);
1052 }
1053
1054 /**
1055  * Convert internal COPY_REG action to DV specification.
1056  *
1057  * @param[in] dev
1058  *   Pointer to the rte_eth_dev structure.
1059  * @param[in,out] res
1060  *   Pointer to the modify-header resource.
1061  * @param[in] action
1062  *   Pointer to action specification.
1063  * @param[out] error
1064  *   Pointer to the error structure.
1065  *
1066  * @return
1067  *   0 on success, a negative errno value otherwise and rte_errno is set.
1068  */
1069 static int
1070 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1071                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1072                                  const struct rte_flow_action *action,
1073                                  struct rte_flow_error *error)
1074 {
1075         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1076         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1077         struct rte_flow_item item = {
1078                 .spec = NULL,
1079                 .mask = &mask,
1080         };
1081         struct field_modify_info reg_src[] = {
1082                 {4, 0, reg_to_field[conf->src]},
1083                 {0, 0, 0},
1084         };
1085         struct field_modify_info reg_dst = {
1086                 .offset = 0,
1087                 .id = reg_to_field[conf->dst],
1088         };
1089         /* Adjust reg_c[0] usage according to reported mask. */
1090         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1091                 struct mlx5_priv *priv = dev->data->dev_private;
1092                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1093
1094                 MLX5_ASSERT(reg_c0);
1095                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1096                 if (conf->dst == REG_C_0) {
1097                         /* Copy to reg_c[0], within mask only. */
1098                         reg_dst.offset = rte_bsf32(reg_c0);
1099                         /*
1100                          * Mask is ignoring the enianness, because
1101                          * there is no conversion in datapath.
1102                          */
1103 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1104                         /* Copy from destination lower bits to reg_c[0]. */
1105                         mask = reg_c0 >> reg_dst.offset;
1106 #else
1107                         /* Copy from destination upper bits to reg_c[0]. */
1108                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1109                                           rte_fls_u32(reg_c0));
1110 #endif
1111                 } else {
1112                         mask = rte_cpu_to_be_32(reg_c0);
1113 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1114                         /* Copy from reg_c[0] to destination lower bits. */
1115                         reg_dst.offset = 0;
1116 #else
1117                         /* Copy from reg_c[0] to destination upper bits. */
1118                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1119                                          (rte_fls_u32(reg_c0) -
1120                                           rte_bsf32(reg_c0));
1121 #endif
1122                 }
1123         }
1124         return flow_dv_convert_modify_action(&item,
1125                                              reg_src, &reg_dst, res,
1126                                              MLX5_MODIFICATION_TYPE_COPY,
1127                                              error);
1128 }
1129
1130 /**
1131  * Convert MARK action to DV specification. This routine is used
1132  * in extensive metadata only and requires metadata register to be
1133  * handled. In legacy mode hardware tag resource is engaged.
1134  *
1135  * @param[in] dev
1136  *   Pointer to the rte_eth_dev structure.
1137  * @param[in] conf
1138  *   Pointer to MARK action specification.
1139  * @param[in,out] resource
1140  *   Pointer to the modify-header resource.
1141  * @param[out] error
1142  *   Pointer to the error structure.
1143  *
1144  * @return
1145  *   0 on success, a negative errno value otherwise and rte_errno is set.
1146  */
1147 static int
1148 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1149                             const struct rte_flow_action_mark *conf,
1150                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1151                             struct rte_flow_error *error)
1152 {
1153         struct mlx5_priv *priv = dev->data->dev_private;
1154         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1155                                            priv->sh->dv_mark_mask);
1156         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1157         struct rte_flow_item item = {
1158                 .spec = &data,
1159                 .mask = &mask,
1160         };
1161         struct field_modify_info reg_c_x[] = {
1162                 {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
1163                 {0, 0, 0},
1164         };
1165         int reg;
1166
1167         if (!mask)
1168                 return rte_flow_error_set(error, EINVAL,
1169                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1170                                           NULL, "zero mark action mask");
1171         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1172         if (reg < 0)
1173                 return reg;
1174         MLX5_ASSERT(reg > 0);
1175         if (reg == REG_C_0) {
1176                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1177                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1178
1179                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1180                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1181                 mask = rte_cpu_to_be_32(mask << shl_c0);
1182         }
1183         reg_c_x[0].id = reg_to_field[reg];
1184         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1185                                              MLX5_MODIFICATION_TYPE_SET, error);
1186 }
1187
1188 /**
1189  * Get metadata register index for specified steering domain.
1190  *
1191  * @param[in] dev
1192  *   Pointer to the rte_eth_dev structure.
1193  * @param[in] attr
1194  *   Attributes of flow to determine steering domain.
1195  * @param[out] error
1196  *   Pointer to the error structure.
1197  *
1198  * @return
1199  *   positive index on success, a negative errno value otherwise
1200  *   and rte_errno is set.
1201  */
1202 static enum modify_reg
1203 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1204                          const struct rte_flow_attr *attr,
1205                          struct rte_flow_error *error)
1206 {
1207         int reg =
1208                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1209                                           MLX5_METADATA_FDB :
1210                                             attr->egress ?
1211                                             MLX5_METADATA_TX :
1212                                             MLX5_METADATA_RX, 0, error);
1213         if (reg < 0)
1214                 return rte_flow_error_set(error,
1215                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1216                                           NULL, "unavailable "
1217                                           "metadata register");
1218         return reg;
1219 }
1220
1221 /**
1222  * Convert SET_META action to DV specification.
1223  *
1224  * @param[in] dev
1225  *   Pointer to the rte_eth_dev structure.
1226  * @param[in,out] resource
1227  *   Pointer to the modify-header resource.
1228  * @param[in] attr
1229  *   Attributes of flow that includes this item.
1230  * @param[in] conf
1231  *   Pointer to action specification.
1232  * @param[out] error
1233  *   Pointer to the error structure.
1234  *
1235  * @return
1236  *   0 on success, a negative errno value otherwise and rte_errno is set.
1237  */
1238 static int
1239 flow_dv_convert_action_set_meta
1240                         (struct rte_eth_dev *dev,
1241                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1242                          const struct rte_flow_attr *attr,
1243                          const struct rte_flow_action_set_meta *conf,
1244                          struct rte_flow_error *error)
1245 {
1246         uint32_t data = conf->data;
1247         uint32_t mask = conf->mask;
1248         struct rte_flow_item item = {
1249                 .spec = &data,
1250                 .mask = &mask,
1251         };
1252         struct field_modify_info reg_c_x[] = {
1253                 [1] = {0, 0, 0},
1254         };
1255         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1256
1257         if (reg < 0)
1258                 return reg;
1259         /*
1260          * In datapath code there is no endianness
1261          * coversions for perfromance reasons, all
1262          * pattern conversions are done in rte_flow.
1263          */
1264         if (reg == REG_C_0) {
1265                 struct mlx5_priv *priv = dev->data->dev_private;
1266                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1267                 uint32_t shl_c0;
1268
1269                 MLX5_ASSERT(msk_c0);
1270 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1271                 shl_c0 = rte_bsf32(msk_c0);
1272 #else
1273                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1274 #endif
1275                 mask <<= shl_c0;
1276                 data <<= shl_c0;
1277                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1278         }
1279         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1280         /* The routine expects parameters in memory as big-endian ones. */
1281         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1282                                              MLX5_MODIFICATION_TYPE_SET, error);
1283 }
1284
1285 /**
1286  * Convert modify-header set IPv4 DSCP action to DV specification.
1287  *
1288  * @param[in,out] resource
1289  *   Pointer to the modify-header resource.
1290  * @param[in] action
1291  *   Pointer to action specification.
1292  * @param[out] error
1293  *   Pointer to the error structure.
1294  *
1295  * @return
1296  *   0 on success, a negative errno value otherwise and rte_errno is set.
1297  */
1298 static int
1299 flow_dv_convert_action_modify_ipv4_dscp
1300                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1301                          const struct rte_flow_action *action,
1302                          struct rte_flow_error *error)
1303 {
1304         const struct rte_flow_action_set_dscp *conf =
1305                 (const struct rte_flow_action_set_dscp *)(action->conf);
1306         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1307         struct rte_flow_item_ipv4 ipv4;
1308         struct rte_flow_item_ipv4 ipv4_mask;
1309
1310         memset(&ipv4, 0, sizeof(ipv4));
1311         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1312         ipv4.hdr.type_of_service = conf->dscp;
1313         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1314         item.spec = &ipv4;
1315         item.mask = &ipv4_mask;
1316         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1317                                              MLX5_MODIFICATION_TYPE_SET, error);
1318 }
1319
1320 /**
1321  * Convert modify-header set IPv6 DSCP action to DV specification.
1322  *
1323  * @param[in,out] resource
1324  *   Pointer to the modify-header resource.
1325  * @param[in] action
1326  *   Pointer to action specification.
1327  * @param[out] error
1328  *   Pointer to the error structure.
1329  *
1330  * @return
1331  *   0 on success, a negative errno value otherwise and rte_errno is set.
1332  */
1333 static int
1334 flow_dv_convert_action_modify_ipv6_dscp
1335                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1336                          const struct rte_flow_action *action,
1337                          struct rte_flow_error *error)
1338 {
1339         const struct rte_flow_action_set_dscp *conf =
1340                 (const struct rte_flow_action_set_dscp *)(action->conf);
1341         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1342         struct rte_flow_item_ipv6 ipv6;
1343         struct rte_flow_item_ipv6 ipv6_mask;
1344
1345         memset(&ipv6, 0, sizeof(ipv6));
1346         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1347         /*
1348          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1349          * rdma-core only accept the DSCP bits byte aligned start from
1350          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1351          * bits in IPv6 case as rdma-core requires byte aligned value.
1352          */
1353         ipv6.hdr.vtc_flow = conf->dscp;
1354         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1355         item.spec = &ipv6;
1356         item.mask = &ipv6_mask;
1357         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1358                                              MLX5_MODIFICATION_TYPE_SET, error);
1359 }
1360
1361 /**
1362  * Validate MARK item.
1363  *
1364  * @param[in] dev
1365  *   Pointer to the rte_eth_dev structure.
1366  * @param[in] item
1367  *   Item specification.
1368  * @param[in] attr
1369  *   Attributes of flow that includes this item.
1370  * @param[out] error
1371  *   Pointer to error structure.
1372  *
1373  * @return
1374  *   0 on success, a negative errno value otherwise and rte_errno is set.
1375  */
1376 static int
1377 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1378                            const struct rte_flow_item *item,
1379                            const struct rte_flow_attr *attr __rte_unused,
1380                            struct rte_flow_error *error)
1381 {
1382         struct mlx5_priv *priv = dev->data->dev_private;
1383         struct mlx5_dev_config *config = &priv->config;
1384         const struct rte_flow_item_mark *spec = item->spec;
1385         const struct rte_flow_item_mark *mask = item->mask;
1386         const struct rte_flow_item_mark nic_mask = {
1387                 .id = priv->sh->dv_mark_mask,
1388         };
1389         int ret;
1390
1391         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1392                 return rte_flow_error_set(error, ENOTSUP,
1393                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1394                                           "extended metadata feature"
1395                                           " isn't enabled");
1396         if (!mlx5_flow_ext_mreg_supported(dev))
1397                 return rte_flow_error_set(error, ENOTSUP,
1398                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1399                                           "extended metadata register"
1400                                           " isn't supported");
1401         if (!nic_mask.id)
1402                 return rte_flow_error_set(error, ENOTSUP,
1403                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1404                                           "extended metadata register"
1405                                           " isn't available");
1406         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1407         if (ret < 0)
1408                 return ret;
1409         if (!spec)
1410                 return rte_flow_error_set(error, EINVAL,
1411                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1412                                           item->spec,
1413                                           "data cannot be empty");
1414         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1415                 return rte_flow_error_set(error, EINVAL,
1416                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1417                                           &spec->id,
1418                                           "mark id exceeds the limit");
1419         if (!mask)
1420                 mask = &nic_mask;
1421         if (!mask->id)
1422                 return rte_flow_error_set(error, EINVAL,
1423                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1424                                         "mask cannot be zero");
1425
1426         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1427                                         (const uint8_t *)&nic_mask,
1428                                         sizeof(struct rte_flow_item_mark),
1429                                         error);
1430         if (ret < 0)
1431                 return ret;
1432         return 0;
1433 }
1434
1435 /**
1436  * Validate META item.
1437  *
1438  * @param[in] dev
1439  *   Pointer to the rte_eth_dev structure.
1440  * @param[in] item
1441  *   Item specification.
1442  * @param[in] attr
1443  *   Attributes of flow that includes this item.
1444  * @param[out] error
1445  *   Pointer to error structure.
1446  *
1447  * @return
1448  *   0 on success, a negative errno value otherwise and rte_errno is set.
1449  */
1450 static int
1451 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1452                            const struct rte_flow_item *item,
1453                            const struct rte_flow_attr *attr,
1454                            struct rte_flow_error *error)
1455 {
1456         struct mlx5_priv *priv = dev->data->dev_private;
1457         struct mlx5_dev_config *config = &priv->config;
1458         const struct rte_flow_item_meta *spec = item->spec;
1459         const struct rte_flow_item_meta *mask = item->mask;
1460         struct rte_flow_item_meta nic_mask = {
1461                 .data = UINT32_MAX
1462         };
1463         int reg;
1464         int ret;
1465
1466         if (!spec)
1467                 return rte_flow_error_set(error, EINVAL,
1468                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1469                                           item->spec,
1470                                           "data cannot be empty");
1471         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1472                 if (!mlx5_flow_ext_mreg_supported(dev))
1473                         return rte_flow_error_set(error, ENOTSUP,
1474                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1475                                           "extended metadata register"
1476                                           " isn't supported");
1477                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1478                 if (reg < 0)
1479                         return reg;
1480                 if (reg == REG_B)
1481                         return rte_flow_error_set(error, ENOTSUP,
1482                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1483                                           "match on reg_b "
1484                                           "isn't supported");
1485                 if (reg != REG_A)
1486                         nic_mask.data = priv->sh->dv_meta_mask;
1487         } else if (attr->transfer) {
1488                 return rte_flow_error_set(error, ENOTSUP,
1489                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
1490                                         "extended metadata feature "
1491                                         "should be enabled when "
1492                                         "meta item is requested "
1493                                         "with e-switch mode ");
1494         }
1495         if (!mask)
1496                 mask = &rte_flow_item_meta_mask;
1497         if (!mask->data)
1498                 return rte_flow_error_set(error, EINVAL,
1499                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1500                                         "mask cannot be zero");
1501
1502         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1503                                         (const uint8_t *)&nic_mask,
1504                                         sizeof(struct rte_flow_item_meta),
1505                                         error);
1506         return ret;
1507 }
1508
1509 /**
1510  * Validate TAG item.
1511  *
1512  * @param[in] dev
1513  *   Pointer to the rte_eth_dev structure.
1514  * @param[in] item
1515  *   Item specification.
1516  * @param[in] attr
1517  *   Attributes of flow that includes this item.
1518  * @param[out] error
1519  *   Pointer to error structure.
1520  *
1521  * @return
1522  *   0 on success, a negative errno value otherwise and rte_errno is set.
1523  */
1524 static int
1525 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1526                           const struct rte_flow_item *item,
1527                           const struct rte_flow_attr *attr __rte_unused,
1528                           struct rte_flow_error *error)
1529 {
1530         const struct rte_flow_item_tag *spec = item->spec;
1531         const struct rte_flow_item_tag *mask = item->mask;
1532         const struct rte_flow_item_tag nic_mask = {
1533                 .data = RTE_BE32(UINT32_MAX),
1534                 .index = 0xff,
1535         };
1536         int ret;
1537
1538         if (!mlx5_flow_ext_mreg_supported(dev))
1539                 return rte_flow_error_set(error, ENOTSUP,
1540                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1541                                           "extensive metadata register"
1542                                           " isn't supported");
1543         if (!spec)
1544                 return rte_flow_error_set(error, EINVAL,
1545                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1546                                           item->spec,
1547                                           "data cannot be empty");
1548         if (!mask)
1549                 mask = &rte_flow_item_tag_mask;
1550         if (!mask->data)
1551                 return rte_flow_error_set(error, EINVAL,
1552                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1553                                         "mask cannot be zero");
1554
1555         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1556                                         (const uint8_t *)&nic_mask,
1557                                         sizeof(struct rte_flow_item_tag),
1558                                         error);
1559         if (ret < 0)
1560                 return ret;
1561         if (mask->index != 0xff)
1562                 return rte_flow_error_set(error, EINVAL,
1563                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1564                                           "partial mask for tag index"
1565                                           " is not supported");
1566         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1567         if (ret < 0)
1568                 return ret;
1569         MLX5_ASSERT(ret != REG_NONE);
1570         return 0;
1571 }
1572
1573 /**
1574  * Validate vport item.
1575  *
1576  * @param[in] dev
1577  *   Pointer to the rte_eth_dev structure.
1578  * @param[in] item
1579  *   Item specification.
1580  * @param[in] attr
1581  *   Attributes of flow that includes this item.
1582  * @param[in] item_flags
1583  *   Bit-fields that holds the items detected until now.
1584  * @param[out] error
1585  *   Pointer to error structure.
1586  *
1587  * @return
1588  *   0 on success, a negative errno value otherwise and rte_errno is set.
1589  */
1590 static int
1591 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1592                               const struct rte_flow_item *item,
1593                               const struct rte_flow_attr *attr,
1594                               uint64_t item_flags,
1595                               struct rte_flow_error *error)
1596 {
1597         const struct rte_flow_item_port_id *spec = item->spec;
1598         const struct rte_flow_item_port_id *mask = item->mask;
1599         const struct rte_flow_item_port_id switch_mask = {
1600                         .id = 0xffffffff,
1601         };
1602         struct mlx5_priv *esw_priv;
1603         struct mlx5_priv *dev_priv;
1604         int ret;
1605
1606         if (!attr->transfer)
1607                 return rte_flow_error_set(error, EINVAL,
1608                                           RTE_FLOW_ERROR_TYPE_ITEM,
1609                                           NULL,
1610                                           "match on port id is valid only"
1611                                           " when transfer flag is enabled");
1612         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1613                 return rte_flow_error_set(error, ENOTSUP,
1614                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1615                                           "multiple source ports are not"
1616                                           " supported");
1617         if (!mask)
1618                 mask = &switch_mask;
1619         if (mask->id != 0xffffffff)
1620                 return rte_flow_error_set(error, ENOTSUP,
1621                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1622                                            mask,
1623                                            "no support for partial mask on"
1624                                            " \"id\" field");
1625         ret = mlx5_flow_item_acceptable
1626                                 (item, (const uint8_t *)mask,
1627                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1628                                  sizeof(struct rte_flow_item_port_id),
1629                                  error);
1630         if (ret)
1631                 return ret;
1632         if (!spec)
1633                 return 0;
1634         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1635         if (!esw_priv)
1636                 return rte_flow_error_set(error, rte_errno,
1637                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1638                                           "failed to obtain E-Switch info for"
1639                                           " port");
1640         dev_priv = mlx5_dev_to_eswitch_info(dev);
1641         if (!dev_priv)
1642                 return rte_flow_error_set(error, rte_errno,
1643                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1644                                           NULL,
1645                                           "failed to obtain E-Switch info");
1646         if (esw_priv->domain_id != dev_priv->domain_id)
1647                 return rte_flow_error_set(error, EINVAL,
1648                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1649                                           "cannot match on a port from a"
1650                                           " different E-Switch");
1651         return 0;
1652 }
1653
1654 /**
1655  * Validate VLAN item.
1656  *
1657  * @param[in] item
1658  *   Item specification.
1659  * @param[in] item_flags
1660  *   Bit-fields that holds the items detected until now.
1661  * @param[in] dev
1662  *   Ethernet device flow is being created on.
1663  * @param[out] error
1664  *   Pointer to error structure.
1665  *
1666  * @return
1667  *   0 on success, a negative errno value otherwise and rte_errno is set.
1668  */
1669 static int
1670 flow_dv_validate_item_vlan(const struct rte_flow_item *item,
1671                            uint64_t item_flags,
1672                            struct rte_eth_dev *dev,
1673                            struct rte_flow_error *error)
1674 {
1675         const struct rte_flow_item_vlan *mask = item->mask;
1676         const struct rte_flow_item_vlan nic_mask = {
1677                 .tci = RTE_BE16(UINT16_MAX),
1678                 .inner_type = RTE_BE16(UINT16_MAX),
1679         };
1680         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1681         int ret;
1682         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1683                                         MLX5_FLOW_LAYER_INNER_L4) :
1684                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1685                                         MLX5_FLOW_LAYER_OUTER_L4);
1686         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1687                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1688
1689         if (item_flags & vlanm)
1690                 return rte_flow_error_set(error, EINVAL,
1691                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1692                                           "multiple VLAN layers not supported");
1693         else if ((item_flags & l34m) != 0)
1694                 return rte_flow_error_set(error, EINVAL,
1695                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1696                                           "VLAN cannot follow L3/L4 layer");
1697         if (!mask)
1698                 mask = &rte_flow_item_vlan_mask;
1699         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1700                                         (const uint8_t *)&nic_mask,
1701                                         sizeof(struct rte_flow_item_vlan),
1702                                         error);
1703         if (ret)
1704                 return ret;
1705         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1706                 struct mlx5_priv *priv = dev->data->dev_private;
1707
1708                 if (priv->vmwa_context) {
1709                         /*
1710                          * Non-NULL context means we have a virtual machine
1711                          * and SR-IOV enabled, we have to create VLAN interface
1712                          * to make hypervisor to setup E-Switch vport
1713                          * context correctly. We avoid creating the multiple
1714                          * VLAN interfaces, so we cannot support VLAN tag mask.
1715                          */
1716                         return rte_flow_error_set(error, EINVAL,
1717                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1718                                                   item,
1719                                                   "VLAN tag mask is not"
1720                                                   " supported in virtual"
1721                                                   " environment");
1722                 }
1723         }
1724         return 0;
1725 }
1726
1727 /*
1728  * GTP flags are contained in 1 byte of the format:
1729  * -------------------------------------------
1730  * | bit   | 0 - 2   | 3  | 4   | 5 | 6 | 7  |
1731  * |-----------------------------------------|
1732  * | value | Version | PT | Res | E | S | PN |
1733  * -------------------------------------------
1734  *
1735  * Matching is supported only for GTP flags E, S, PN.
1736  */
1737 #define MLX5_GTP_FLAGS_MASK     0x07
1738
1739 /**
1740  * Validate GTP item.
1741  *
1742  * @param[in] dev
1743  *   Pointer to the rte_eth_dev structure.
1744  * @param[in] item
1745  *   Item specification.
1746  * @param[in] item_flags
1747  *   Bit-fields that holds the items detected until now.
1748  * @param[out] error
1749  *   Pointer to error structure.
1750  *
1751  * @return
1752  *   0 on success, a negative errno value otherwise and rte_errno is set.
1753  */
1754 static int
1755 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1756                           const struct rte_flow_item *item,
1757                           uint64_t item_flags,
1758                           struct rte_flow_error *error)
1759 {
1760         struct mlx5_priv *priv = dev->data->dev_private;
1761         const struct rte_flow_item_gtp *spec = item->spec;
1762         const struct rte_flow_item_gtp *mask = item->mask;
1763         const struct rte_flow_item_gtp nic_mask = {
1764                 .v_pt_rsv_flags = MLX5_GTP_FLAGS_MASK,
1765                 .msg_type = 0xff,
1766                 .teid = RTE_BE32(0xffffffff),
1767         };
1768
1769         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1770                 return rte_flow_error_set(error, ENOTSUP,
1771                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1772                                           "GTP support is not enabled");
1773         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1774                 return rte_flow_error_set(error, ENOTSUP,
1775                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1776                                           "multiple tunnel layers not"
1777                                           " supported");
1778         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1779                 return rte_flow_error_set(error, EINVAL,
1780                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1781                                           "no outer UDP layer found");
1782         if (!mask)
1783                 mask = &rte_flow_item_gtp_mask;
1784         if (spec && spec->v_pt_rsv_flags & ~MLX5_GTP_FLAGS_MASK)
1785                 return rte_flow_error_set(error, ENOTSUP,
1786                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1787                                           "Match is supported for GTP"
1788                                           " flags only");
1789         return mlx5_flow_item_acceptable
1790                 (item, (const uint8_t *)mask,
1791                  (const uint8_t *)&nic_mask,
1792                  sizeof(struct rte_flow_item_gtp),
1793                  error);
1794 }
1795
1796 /**
1797  * Validate the pop VLAN action.
1798  *
1799  * @param[in] dev
1800  *   Pointer to the rte_eth_dev structure.
1801  * @param[in] action_flags
1802  *   Holds the actions detected until now.
1803  * @param[in] action
1804  *   Pointer to the pop vlan action.
1805  * @param[in] item_flags
1806  *   The items found in this flow rule.
1807  * @param[in] attr
1808  *   Pointer to flow attributes.
1809  * @param[out] error
1810  *   Pointer to error structure.
1811  *
1812  * @return
1813  *   0 on success, a negative errno value otherwise and rte_errno is set.
1814  */
1815 static int
1816 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1817                                  uint64_t action_flags,
1818                                  const struct rte_flow_action *action,
1819                                  uint64_t item_flags,
1820                                  const struct rte_flow_attr *attr,
1821                                  struct rte_flow_error *error)
1822 {
1823         const struct mlx5_priv *priv = dev->data->dev_private;
1824
1825         (void)action;
1826         (void)attr;
1827         if (!priv->sh->pop_vlan_action)
1828                 return rte_flow_error_set(error, ENOTSUP,
1829                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1830                                           NULL,
1831                                           "pop vlan action is not supported");
1832         if (attr->egress)
1833                 return rte_flow_error_set(error, ENOTSUP,
1834                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1835                                           NULL,
1836                                           "pop vlan action not supported for "
1837                                           "egress");
1838         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1839                 return rte_flow_error_set(error, ENOTSUP,
1840                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1841                                           "no support for multiple VLAN "
1842                                           "actions");
1843         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1844                 return rte_flow_error_set(error, ENOTSUP,
1845                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1846                                           NULL,
1847                                           "cannot pop vlan without a "
1848                                           "match on (outer) vlan in the flow");
1849         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1850                 return rte_flow_error_set(error, EINVAL,
1851                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1852                                           "wrong action order, port_id should "
1853                                           "be after pop VLAN action");
1854         if (!attr->transfer && priv->representor)
1855                 return rte_flow_error_set(error, ENOTSUP,
1856                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1857                                           "pop vlan action for VF representor "
1858                                           "not supported on NIC table");
1859         return 0;
1860 }
1861
1862 /**
1863  * Get VLAN default info from vlan match info.
1864  *
1865  * @param[in] items
1866  *   the list of item specifications.
1867  * @param[out] vlan
1868  *   pointer VLAN info to fill to.
1869  *
1870  * @return
1871  *   0 on success, a negative errno value otherwise and rte_errno is set.
1872  */
1873 static void
1874 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1875                                   struct rte_vlan_hdr *vlan)
1876 {
1877         const struct rte_flow_item_vlan nic_mask = {
1878                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1879                                 MLX5DV_FLOW_VLAN_VID_MASK),
1880                 .inner_type = RTE_BE16(0xffff),
1881         };
1882
1883         if (items == NULL)
1884                 return;
1885         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1886                 int type = items->type;
1887
1888                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
1889                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
1890                         break;
1891         }
1892         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
1893                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1894                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1895
1896                 /* If VLAN item in pattern doesn't contain data, return here. */
1897                 if (!vlan_v)
1898                         return;
1899                 if (!vlan_m)
1900                         vlan_m = &nic_mask;
1901                 /* Only full match values are accepted */
1902                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1903                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1904                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
1905                         vlan->vlan_tci |=
1906                                 rte_be_to_cpu_16(vlan_v->tci &
1907                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1908                 }
1909                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1910                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1911                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1912                         vlan->vlan_tci |=
1913                                 rte_be_to_cpu_16(vlan_v->tci &
1914                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1915                 }
1916                 if (vlan_m->inner_type == nic_mask.inner_type)
1917                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1918                                                            vlan_m->inner_type);
1919         }
1920 }
1921
1922 /**
1923  * Validate the push VLAN action.
1924  *
1925  * @param[in] dev
1926  *   Pointer to the rte_eth_dev structure.
1927  * @param[in] action_flags
1928  *   Holds the actions detected until now.
1929  * @param[in] item_flags
1930  *   The items found in this flow rule.
1931  * @param[in] action
1932  *   Pointer to the action structure.
1933  * @param[in] attr
1934  *   Pointer to flow attributes
1935  * @param[out] error
1936  *   Pointer to error structure.
1937  *
1938  * @return
1939  *   0 on success, a negative errno value otherwise and rte_errno is set.
1940  */
1941 static int
1942 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
1943                                   uint64_t action_flags,
1944                                   const struct rte_flow_item_vlan *vlan_m,
1945                                   const struct rte_flow_action *action,
1946                                   const struct rte_flow_attr *attr,
1947                                   struct rte_flow_error *error)
1948 {
1949         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1950         const struct mlx5_priv *priv = dev->data->dev_private;
1951
1952         if (!attr->transfer && attr->ingress)
1953                 return rte_flow_error_set(error, ENOTSUP,
1954                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1955                                           NULL,
1956                                           "push VLAN action not supported for "
1957                                           "ingress");
1958         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1959             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1960                 return rte_flow_error_set(error, EINVAL,
1961                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1962                                           "invalid vlan ethertype");
1963         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1964                 return rte_flow_error_set(error, ENOTSUP,
1965                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1966                                           "no support for multiple VLAN "
1967                                           "actions");
1968         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1969                 return rte_flow_error_set(error, EINVAL,
1970                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1971                                           "wrong action order, port_id should "
1972                                           "be after push VLAN");
1973         if (!attr->transfer && priv->representor)
1974                 return rte_flow_error_set(error, ENOTSUP,
1975                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1976                                           "push vlan action for VF representor "
1977                                           "not supported on NIC table");
1978         if (vlan_m &&
1979             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) &&
1980             (vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) !=
1981                 MLX5DV_FLOW_VLAN_PCP_MASK_BE &&
1982             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP) &&
1983             !(mlx5_flow_find_action
1984                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP)))
1985                 return rte_flow_error_set(error, EINVAL,
1986                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1987                                           "not full match mask on VLAN PCP and "
1988                                           "there is no of_set_vlan_pcp action, "
1989                                           "push VLAN action cannot figure out "
1990                                           "PCP value");
1991         if (vlan_m &&
1992             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) &&
1993             (vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) !=
1994                 MLX5DV_FLOW_VLAN_VID_MASK_BE &&
1995             !(action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID) &&
1996             !(mlx5_flow_find_action
1997                 (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID)))
1998                 return rte_flow_error_set(error, EINVAL,
1999                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2000                                           "not full match mask on VLAN VID and "
2001                                           "there is no of_set_vlan_vid action, "
2002                                           "push VLAN action cannot figure out "
2003                                           "VID value");
2004         (void)attr;
2005         return 0;
2006 }
2007
2008 /**
2009  * Validate the set VLAN PCP.
2010  *
2011  * @param[in] action_flags
2012  *   Holds the actions detected until now.
2013  * @param[in] actions
2014  *   Pointer to the list of actions remaining in the flow rule.
2015  * @param[out] error
2016  *   Pointer to error structure.
2017  *
2018  * @return
2019  *   0 on success, a negative errno value otherwise and rte_errno is set.
2020  */
2021 static int
2022 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
2023                                      const struct rte_flow_action actions[],
2024                                      struct rte_flow_error *error)
2025 {
2026         const struct rte_flow_action *action = actions;
2027         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
2028
2029         if (conf->vlan_pcp > 7)
2030                 return rte_flow_error_set(error, EINVAL,
2031                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2032                                           "VLAN PCP value is too big");
2033         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
2034                 return rte_flow_error_set(error, ENOTSUP,
2035                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2036                                           "set VLAN PCP action must follow "
2037                                           "the push VLAN action");
2038         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
2039                 return rte_flow_error_set(error, ENOTSUP,
2040                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2041                                           "Multiple VLAN PCP modification are "
2042                                           "not supported");
2043         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2044                 return rte_flow_error_set(error, EINVAL,
2045                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2046                                           "wrong action order, port_id should "
2047                                           "be after set VLAN PCP");
2048         return 0;
2049 }
2050
2051 /**
2052  * Validate the set VLAN VID.
2053  *
2054  * @param[in] item_flags
2055  *   Holds the items detected in this rule.
2056  * @param[in] action_flags
2057  *   Holds the actions detected until now.
2058  * @param[in] actions
2059  *   Pointer to the list of actions remaining in the flow rule.
2060  * @param[out] error
2061  *   Pointer to error structure.
2062  *
2063  * @return
2064  *   0 on success, a negative errno value otherwise and rte_errno is set.
2065  */
2066 static int
2067 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
2068                                      uint64_t action_flags,
2069                                      const struct rte_flow_action actions[],
2070                                      struct rte_flow_error *error)
2071 {
2072         const struct rte_flow_action *action = actions;
2073         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
2074
2075         if (rte_be_to_cpu_16(conf->vlan_vid) > 0xFFE)
2076                 return rte_flow_error_set(error, EINVAL,
2077                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2078                                           "VLAN VID value is too big");
2079         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
2080             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
2081                 return rte_flow_error_set(error, ENOTSUP,
2082                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2083                                           "set VLAN VID action must follow push"
2084                                           " VLAN action or match on VLAN item");
2085         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
2086                 return rte_flow_error_set(error, ENOTSUP,
2087                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2088                                           "Multiple VLAN VID modifications are "
2089                                           "not supported");
2090         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
2091                 return rte_flow_error_set(error, EINVAL,
2092                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2093                                           "wrong action order, port_id should "
2094                                           "be after set VLAN VID");
2095         return 0;
2096 }
2097
2098 /*
2099  * Validate the FLAG action.
2100  *
2101  * @param[in] dev
2102  *   Pointer to the rte_eth_dev structure.
2103  * @param[in] action_flags
2104  *   Holds the actions detected until now.
2105  * @param[in] attr
2106  *   Pointer to flow attributes
2107  * @param[out] error
2108  *   Pointer to error structure.
2109  *
2110  * @return
2111  *   0 on success, a negative errno value otherwise and rte_errno is set.
2112  */
2113 static int
2114 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
2115                              uint64_t action_flags,
2116                              const struct rte_flow_attr *attr,
2117                              struct rte_flow_error *error)
2118 {
2119         struct mlx5_priv *priv = dev->data->dev_private;
2120         struct mlx5_dev_config *config = &priv->config;
2121         int ret;
2122
2123         /* Fall back if no extended metadata register support. */
2124         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2125                 return mlx5_flow_validate_action_flag(action_flags, attr,
2126                                                       error);
2127         /* Extensive metadata mode requires registers. */
2128         if (!mlx5_flow_ext_mreg_supported(dev))
2129                 return rte_flow_error_set(error, ENOTSUP,
2130                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2131                                           "no metadata registers "
2132                                           "to support flag action");
2133         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2134                 return rte_flow_error_set(error, ENOTSUP,
2135                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2136                                           "extended metadata register"
2137                                           " isn't available");
2138         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2139         if (ret < 0)
2140                 return ret;
2141         MLX5_ASSERT(ret > 0);
2142         if (action_flags & MLX5_FLOW_ACTION_MARK)
2143                 return rte_flow_error_set(error, EINVAL,
2144                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2145                                           "can't mark and flag in same flow");
2146         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2147                 return rte_flow_error_set(error, EINVAL,
2148                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2149                                           "can't have 2 flag"
2150                                           " actions in same flow");
2151         return 0;
2152 }
2153
2154 /**
2155  * Validate MARK action.
2156  *
2157  * @param[in] dev
2158  *   Pointer to the rte_eth_dev structure.
2159  * @param[in] action
2160  *   Pointer to action.
2161  * @param[in] action_flags
2162  *   Holds the actions detected until now.
2163  * @param[in] attr
2164  *   Pointer to flow attributes
2165  * @param[out] error
2166  *   Pointer to error structure.
2167  *
2168  * @return
2169  *   0 on success, a negative errno value otherwise and rte_errno is set.
2170  */
2171 static int
2172 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2173                              const struct rte_flow_action *action,
2174                              uint64_t action_flags,
2175                              const struct rte_flow_attr *attr,
2176                              struct rte_flow_error *error)
2177 {
2178         struct mlx5_priv *priv = dev->data->dev_private;
2179         struct mlx5_dev_config *config = &priv->config;
2180         const struct rte_flow_action_mark *mark = action->conf;
2181         int ret;
2182
2183         /* Fall back if no extended metadata register support. */
2184         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2185                 return mlx5_flow_validate_action_mark(action, action_flags,
2186                                                       attr, error);
2187         /* Extensive metadata mode requires registers. */
2188         if (!mlx5_flow_ext_mreg_supported(dev))
2189                 return rte_flow_error_set(error, ENOTSUP,
2190                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2191                                           "no metadata registers "
2192                                           "to support mark action");
2193         if (!priv->sh->dv_mark_mask)
2194                 return rte_flow_error_set(error, ENOTSUP,
2195                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2196                                           "extended metadata register"
2197                                           " isn't available");
2198         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2199         if (ret < 0)
2200                 return ret;
2201         MLX5_ASSERT(ret > 0);
2202         if (!mark)
2203                 return rte_flow_error_set(error, EINVAL,
2204                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2205                                           "configuration cannot be null");
2206         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2207                 return rte_flow_error_set(error, EINVAL,
2208                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2209                                           &mark->id,
2210                                           "mark id exceeds the limit");
2211         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2212                 return rte_flow_error_set(error, EINVAL,
2213                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2214                                           "can't flag and mark in same flow");
2215         if (action_flags & MLX5_FLOW_ACTION_MARK)
2216                 return rte_flow_error_set(error, EINVAL,
2217                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2218                                           "can't have 2 mark actions in same"
2219                                           " flow");
2220         return 0;
2221 }
2222
2223 /**
2224  * Validate SET_META action.
2225  *
2226  * @param[in] dev
2227  *   Pointer to the rte_eth_dev structure.
2228  * @param[in] action
2229  *   Pointer to the action structure.
2230  * @param[in] action_flags
2231  *   Holds the actions detected until now.
2232  * @param[in] attr
2233  *   Pointer to flow attributes
2234  * @param[out] error
2235  *   Pointer to error structure.
2236  *
2237  * @return
2238  *   0 on success, a negative errno value otherwise and rte_errno is set.
2239  */
2240 static int
2241 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2242                                  const struct rte_flow_action *action,
2243                                  uint64_t action_flags __rte_unused,
2244                                  const struct rte_flow_attr *attr,
2245                                  struct rte_flow_error *error)
2246 {
2247         const struct rte_flow_action_set_meta *conf;
2248         uint32_t nic_mask = UINT32_MAX;
2249         int reg;
2250
2251         if (!mlx5_flow_ext_mreg_supported(dev))
2252                 return rte_flow_error_set(error, ENOTSUP,
2253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2254                                           "extended metadata register"
2255                                           " isn't supported");
2256         reg = flow_dv_get_metadata_reg(dev, attr, error);
2257         if (reg < 0)
2258                 return reg;
2259         if (reg != REG_A && reg != REG_B) {
2260                 struct mlx5_priv *priv = dev->data->dev_private;
2261
2262                 nic_mask = priv->sh->dv_meta_mask;
2263         }
2264         if (!(action->conf))
2265                 return rte_flow_error_set(error, EINVAL,
2266                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2267                                           "configuration cannot be null");
2268         conf = (const struct rte_flow_action_set_meta *)action->conf;
2269         if (!conf->mask)
2270                 return rte_flow_error_set(error, EINVAL,
2271                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2272                                           "zero mask doesn't have any effect");
2273         if (conf->mask & ~nic_mask)
2274                 return rte_flow_error_set(error, EINVAL,
2275                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2276                                           "meta data must be within reg C0");
2277         return 0;
2278 }
2279
2280 /**
2281  * Validate SET_TAG action.
2282  *
2283  * @param[in] dev
2284  *   Pointer to the rte_eth_dev structure.
2285  * @param[in] action
2286  *   Pointer to the action structure.
2287  * @param[in] action_flags
2288  *   Holds the actions detected until now.
2289  * @param[in] attr
2290  *   Pointer to flow attributes
2291  * @param[out] error
2292  *   Pointer to error structure.
2293  *
2294  * @return
2295  *   0 on success, a negative errno value otherwise and rte_errno is set.
2296  */
2297 static int
2298 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2299                                 const struct rte_flow_action *action,
2300                                 uint64_t action_flags,
2301                                 const struct rte_flow_attr *attr,
2302                                 struct rte_flow_error *error)
2303 {
2304         const struct rte_flow_action_set_tag *conf;
2305         const uint64_t terminal_action_flags =
2306                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2307                 MLX5_FLOW_ACTION_RSS;
2308         int ret;
2309
2310         if (!mlx5_flow_ext_mreg_supported(dev))
2311                 return rte_flow_error_set(error, ENOTSUP,
2312                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2313                                           "extensive metadata register"
2314                                           " isn't supported");
2315         if (!(action->conf))
2316                 return rte_flow_error_set(error, EINVAL,
2317                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2318                                           "configuration cannot be null");
2319         conf = (const struct rte_flow_action_set_tag *)action->conf;
2320         if (!conf->mask)
2321                 return rte_flow_error_set(error, EINVAL,
2322                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2323                                           "zero mask doesn't have any effect");
2324         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2325         if (ret < 0)
2326                 return ret;
2327         if (!attr->transfer && attr->ingress &&
2328             (action_flags & terminal_action_flags))
2329                 return rte_flow_error_set(error, EINVAL,
2330                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2331                                           "set_tag has no effect"
2332                                           " with terminal actions");
2333         return 0;
2334 }
2335
2336 /**
2337  * Validate count action.
2338  *
2339  * @param[in] dev
2340  *   Pointer to rte_eth_dev structure.
2341  * @param[out] error
2342  *   Pointer to error structure.
2343  *
2344  * @return
2345  *   0 on success, a negative errno value otherwise and rte_errno is set.
2346  */
2347 static int
2348 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2349                               struct rte_flow_error *error)
2350 {
2351         struct mlx5_priv *priv = dev->data->dev_private;
2352
2353         if (!priv->config.devx)
2354                 goto notsup_err;
2355 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2356         return 0;
2357 #endif
2358 notsup_err:
2359         return rte_flow_error_set
2360                       (error, ENOTSUP,
2361                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2362                        NULL,
2363                        "count action not supported");
2364 }
2365
2366 /**
2367  * Validate the L2 encap action.
2368  *
2369  * @param[in] dev
2370  *   Pointer to the rte_eth_dev structure.
2371  * @param[in] action_flags
2372  *   Holds the actions detected until now.
2373  * @param[in] action
2374  *   Pointer to the action structure.
2375  * @param[in] attr
2376  *   Pointer to flow attributes.
2377  * @param[out] error
2378  *   Pointer to error structure.
2379  *
2380  * @return
2381  *   0 on success, a negative errno value otherwise and rte_errno is set.
2382  */
2383 static int
2384 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2385                                  uint64_t action_flags,
2386                                  const struct rte_flow_action *action,
2387                                  const struct rte_flow_attr *attr,
2388                                  struct rte_flow_error *error)
2389 {
2390         const struct mlx5_priv *priv = dev->data->dev_private;
2391
2392         if (!(action->conf))
2393                 return rte_flow_error_set(error, EINVAL,
2394                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2395                                           "configuration cannot be null");
2396         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2397                 return rte_flow_error_set(error, EINVAL,
2398                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2399                                           "can only have a single encap action "
2400                                           "in a flow");
2401         if (!attr->transfer && priv->representor)
2402                 return rte_flow_error_set(error, ENOTSUP,
2403                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2404                                           "encap action for VF representor "
2405                                           "not supported on NIC table");
2406         return 0;
2407 }
2408
2409 /**
2410  * Validate a decap action.
2411  *
2412  * @param[in] dev
2413  *   Pointer to the rte_eth_dev structure.
2414  * @param[in] action_flags
2415  *   Holds the actions detected until now.
2416  * @param[in] attr
2417  *   Pointer to flow attributes
2418  * @param[out] error
2419  *   Pointer to error structure.
2420  *
2421  * @return
2422  *   0 on success, a negative errno value otherwise and rte_errno is set.
2423  */
2424 static int
2425 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2426                               uint64_t action_flags,
2427                               const struct rte_flow_attr *attr,
2428                               struct rte_flow_error *error)
2429 {
2430         const struct mlx5_priv *priv = dev->data->dev_private;
2431
2432         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2433                 return rte_flow_error_set(error, ENOTSUP,
2434                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2435                                           action_flags &
2436                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2437                                           "have a single decap action" : "decap "
2438                                           "after encap is not supported");
2439         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2440                 return rte_flow_error_set(error, EINVAL,
2441                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2442                                           "can't have decap action after"
2443                                           " modify action");
2444         if (attr->egress)
2445                 return rte_flow_error_set(error, ENOTSUP,
2446                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2447                                           NULL,
2448                                           "decap action not supported for "
2449                                           "egress");
2450         if (!attr->transfer && priv->representor)
2451                 return rte_flow_error_set(error, ENOTSUP,
2452                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2453                                           "decap action for VF representor "
2454                                           "not supported on NIC table");
2455         return 0;
2456 }
2457
2458 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2459
2460 /**
2461  * Validate the raw encap and decap actions.
2462  *
2463  * @param[in] dev
2464  *   Pointer to the rte_eth_dev structure.
2465  * @param[in] decap
2466  *   Pointer to the decap action.
2467  * @param[in] encap
2468  *   Pointer to the encap action.
2469  * @param[in] attr
2470  *   Pointer to flow attributes
2471  * @param[in/out] action_flags
2472  *   Holds the actions detected until now.
2473  * @param[out] actions_n
2474  *   pointer to the number of actions counter.
2475  * @param[out] error
2476  *   Pointer to error structure.
2477  *
2478  * @return
2479  *   0 on success, a negative errno value otherwise and rte_errno is set.
2480  */
2481 static int
2482 flow_dv_validate_action_raw_encap_decap
2483         (struct rte_eth_dev *dev,
2484          const struct rte_flow_action_raw_decap *decap,
2485          const struct rte_flow_action_raw_encap *encap,
2486          const struct rte_flow_attr *attr, uint64_t *action_flags,
2487          int *actions_n, struct rte_flow_error *error)
2488 {
2489         const struct mlx5_priv *priv = dev->data->dev_private;
2490         int ret;
2491
2492         if (encap && (!encap->size || !encap->data))
2493                 return rte_flow_error_set(error, EINVAL,
2494                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2495                                           "raw encap data cannot be empty");
2496         if (decap && encap) {
2497                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2498                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2499                         /* L3 encap. */
2500                         decap = NULL;
2501                 else if (encap->size <=
2502                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2503                            decap->size >
2504                            MLX5_ENCAPSULATION_DECISION_SIZE)
2505                         /* L3 decap. */
2506                         encap = NULL;
2507                 else if (encap->size >
2508                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2509                            decap->size >
2510                            MLX5_ENCAPSULATION_DECISION_SIZE)
2511                         /* 2 L2 actions: encap and decap. */
2512                         ;
2513                 else
2514                         return rte_flow_error_set(error,
2515                                 ENOTSUP,
2516                                 RTE_FLOW_ERROR_TYPE_ACTION,
2517                                 NULL, "unsupported too small "
2518                                 "raw decap and too small raw "
2519                                 "encap combination");
2520         }
2521         if (decap) {
2522                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2523                                                     error);
2524                 if (ret < 0)
2525                         return ret;
2526                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2527                 ++(*actions_n);
2528         }
2529         if (encap) {
2530                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2531                         return rte_flow_error_set(error, ENOTSUP,
2532                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2533                                                   NULL,
2534                                                   "small raw encap size");
2535                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2536                         return rte_flow_error_set(error, EINVAL,
2537                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2538                                                   NULL,
2539                                                   "more than one encap action");
2540                 if (!attr->transfer && priv->representor)
2541                         return rte_flow_error_set
2542                                         (error, ENOTSUP,
2543                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2544                                          "encap action for VF representor "
2545                                          "not supported on NIC table");
2546                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2547                 ++(*actions_n);
2548         }
2549         return 0;
2550 }
2551
2552 /**
2553  * Find existing encap/decap resource or create and register a new one.
2554  *
2555  * @param[in, out] dev
2556  *   Pointer to rte_eth_dev structure.
2557  * @param[in, out] resource
2558  *   Pointer to encap/decap resource.
2559  * @parm[in, out] dev_flow
2560  *   Pointer to the dev_flow.
2561  * @param[out] error
2562  *   pointer to error structure.
2563  *
2564  * @return
2565  *   0 on success otherwise -errno and errno is set.
2566  */
2567 static int
2568 flow_dv_encap_decap_resource_register
2569                         (struct rte_eth_dev *dev,
2570                          struct mlx5_flow_dv_encap_decap_resource *resource,
2571                          struct mlx5_flow *dev_flow,
2572                          struct rte_flow_error *error)
2573 {
2574         struct mlx5_priv *priv = dev->data->dev_private;
2575         struct mlx5_dev_ctx_shared *sh = priv->sh;
2576         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2577         struct mlx5dv_dr_domain *domain;
2578         uint32_t idx = 0;
2579         int ret;
2580
2581         resource->flags = dev_flow->dv.group ? 0 : 1;
2582         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2583                 domain = sh->fdb_domain;
2584         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2585                 domain = sh->rx_domain;
2586         else
2587                 domain = sh->tx_domain;
2588         /* Lookup a matching resource from cache. */
2589         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], sh->encaps_decaps, idx,
2590                       cache_resource, next) {
2591                 if (resource->reformat_type == cache_resource->reformat_type &&
2592                     resource->ft_type == cache_resource->ft_type &&
2593                     resource->flags == cache_resource->flags &&
2594                     resource->size == cache_resource->size &&
2595                     !memcmp((const void *)resource->buf,
2596                             (const void *)cache_resource->buf,
2597                             resource->size)) {
2598                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2599                                 (void *)cache_resource,
2600                                 rte_atomic32_read(&cache_resource->refcnt));
2601                         rte_atomic32_inc(&cache_resource->refcnt);
2602                         dev_flow->handle->dvh.rix_encap_decap = idx;
2603                         dev_flow->dv.encap_decap = cache_resource;
2604                         return 0;
2605                 }
2606         }
2607         /* Register new encap/decap resource. */
2608         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
2609                                        &dev_flow->handle->dvh.rix_encap_decap);
2610         if (!cache_resource)
2611                 return rte_flow_error_set(error, ENOMEM,
2612                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2613                                           "cannot allocate resource memory");
2614         *cache_resource = *resource;
2615         ret = mlx5_flow_os_create_flow_action_packet_reformat
2616                                         (sh->ctx, domain, cache_resource,
2617                                          &cache_resource->action);
2618         if (ret) {
2619                 mlx5_free(cache_resource);
2620                 return rte_flow_error_set(error, ENOMEM,
2621                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2622                                           NULL, "cannot create action");
2623         }
2624         rte_atomic32_init(&cache_resource->refcnt);
2625         rte_atomic32_inc(&cache_resource->refcnt);
2626         ILIST_INSERT(sh->ipool[MLX5_IPOOL_DECAP_ENCAP], &sh->encaps_decaps,
2627                      dev_flow->handle->dvh.rix_encap_decap, cache_resource,
2628                      next);
2629         dev_flow->dv.encap_decap = cache_resource;
2630         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2631                 (void *)cache_resource,
2632                 rte_atomic32_read(&cache_resource->refcnt));
2633         return 0;
2634 }
2635
2636 /**
2637  * Find existing table jump resource or create and register a new one.
2638  *
2639  * @param[in, out] dev
2640  *   Pointer to rte_eth_dev structure.
2641  * @param[in, out] tbl
2642  *   Pointer to flow table resource.
2643  * @parm[in, out] dev_flow
2644  *   Pointer to the dev_flow.
2645  * @param[out] error
2646  *   pointer to error structure.
2647  *
2648  * @return
2649  *   0 on success otherwise -errno and errno is set.
2650  */
2651 static int
2652 flow_dv_jump_tbl_resource_register
2653                         (struct rte_eth_dev *dev __rte_unused,
2654                          struct mlx5_flow_tbl_resource *tbl,
2655                          struct mlx5_flow *dev_flow,
2656                          struct rte_flow_error *error)
2657 {
2658         struct mlx5_flow_tbl_data_entry *tbl_data =
2659                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2660         int cnt, ret;
2661
2662         MLX5_ASSERT(tbl);
2663         cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
2664         if (!cnt) {
2665                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
2666                                 (tbl->obj, &tbl_data->jump.action);
2667                 if (ret)
2668                         return rte_flow_error_set(error, ENOMEM,
2669                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2670                                         NULL, "cannot create jump action");
2671                 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2672                         (void *)&tbl_data->jump, cnt);
2673         } else {
2674                 /* old jump should not make the table ref++. */
2675                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
2676                 MLX5_ASSERT(tbl_data->jump.action);
2677                 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2678                         (void *)&tbl_data->jump, cnt);
2679         }
2680         rte_atomic32_inc(&tbl_data->jump.refcnt);
2681         dev_flow->handle->rix_jump = tbl_data->idx;
2682         dev_flow->dv.jump = &tbl_data->jump;
2683         return 0;
2684 }
2685
2686 /**
2687  * Find existing default miss resource or create and register a new one.
2688  *
2689  * @param[in, out] dev
2690  *   Pointer to rte_eth_dev structure.
2691  * @param[out] error
2692  *   pointer to error structure.
2693  *
2694  * @return
2695  *   0 on success otherwise -errno and errno is set.
2696  */
2697 static int
2698 flow_dv_default_miss_resource_register(struct rte_eth_dev *dev,
2699                 struct rte_flow_error *error)
2700 {
2701         struct mlx5_priv *priv = dev->data->dev_private;
2702         struct mlx5_dev_ctx_shared *sh = priv->sh;
2703         struct mlx5_flow_default_miss_resource *cache_resource =
2704                         &sh->default_miss;
2705         int cnt = rte_atomic32_read(&cache_resource->refcnt);
2706
2707         if (!cnt) {
2708                 MLX5_ASSERT(cache_resource->action);
2709                 cache_resource->action =
2710                 mlx5_glue->dr_create_flow_action_default_miss();
2711                 if (!cache_resource->action)
2712                         return rte_flow_error_set(error, ENOMEM,
2713                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2714                                         "cannot create default miss action");
2715                 DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++",
2716                                 (void *)cache_resource->action, cnt);
2717         }
2718         rte_atomic32_inc(&cache_resource->refcnt);
2719         return 0;
2720 }
2721
2722 /**
2723  * Find existing table port ID resource or create and register a new one.
2724  *
2725  * @param[in, out] dev
2726  *   Pointer to rte_eth_dev structure.
2727  * @param[in, out] resource
2728  *   Pointer to port ID action resource.
2729  * @parm[in, out] dev_flow
2730  *   Pointer to the dev_flow.
2731  * @param[out] error
2732  *   pointer to error structure.
2733  *
2734  * @return
2735  *   0 on success otherwise -errno and errno is set.
2736  */
2737 static int
2738 flow_dv_port_id_action_resource_register
2739                         (struct rte_eth_dev *dev,
2740                          struct mlx5_flow_dv_port_id_action_resource *resource,
2741                          struct mlx5_flow *dev_flow,
2742                          struct rte_flow_error *error)
2743 {
2744         struct mlx5_priv *priv = dev->data->dev_private;
2745         struct mlx5_dev_ctx_shared *sh = priv->sh;
2746         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2747         uint32_t idx = 0;
2748         int ret;
2749
2750         /* Lookup a matching resource from cache. */
2751         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PORT_ID], sh->port_id_action_list,
2752                       idx, cache_resource, next) {
2753                 if (resource->port_id == cache_resource->port_id) {
2754                         DRV_LOG(DEBUG, "port id action resource resource %p: "
2755                                 "refcnt %d++",
2756                                 (void *)cache_resource,
2757                                 rte_atomic32_read(&cache_resource->refcnt));
2758                         rte_atomic32_inc(&cache_resource->refcnt);
2759                         dev_flow->handle->rix_port_id_action = idx;
2760                         dev_flow->dv.port_id_action = cache_resource;
2761                         return 0;
2762                 }
2763         }
2764         /* Register new port id action resource. */
2765         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PORT_ID],
2766                                        &dev_flow->handle->rix_port_id_action);
2767         if (!cache_resource)
2768                 return rte_flow_error_set(error, ENOMEM,
2769                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2770                                           "cannot allocate resource memory");
2771         *cache_resource = *resource;
2772         ret = mlx5_flow_os_create_flow_action_dest_port
2773                                 (priv->sh->fdb_domain, resource->port_id,
2774                                  &cache_resource->action);
2775         if (ret) {
2776                 mlx5_free(cache_resource);
2777                 return rte_flow_error_set(error, ENOMEM,
2778                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2779                                           NULL, "cannot create action");
2780         }
2781         rte_atomic32_init(&cache_resource->refcnt);
2782         rte_atomic32_inc(&cache_resource->refcnt);
2783         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list,
2784                      dev_flow->handle->rix_port_id_action, cache_resource,
2785                      next);
2786         dev_flow->dv.port_id_action = cache_resource;
2787         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
2788                 (void *)cache_resource,
2789                 rte_atomic32_read(&cache_resource->refcnt));
2790         return 0;
2791 }
2792
2793 /**
2794  * Find existing push vlan resource or create and register a new one.
2795  *
2796  * @param [in, out] dev
2797  *   Pointer to rte_eth_dev structure.
2798  * @param[in, out] resource
2799  *   Pointer to port ID action resource.
2800  * @parm[in, out] dev_flow
2801  *   Pointer to the dev_flow.
2802  * @param[out] error
2803  *   pointer to error structure.
2804  *
2805  * @return
2806  *   0 on success otherwise -errno and errno is set.
2807  */
2808 static int
2809 flow_dv_push_vlan_action_resource_register
2810                        (struct rte_eth_dev *dev,
2811                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
2812                         struct mlx5_flow *dev_flow,
2813                         struct rte_flow_error *error)
2814 {
2815         struct mlx5_priv *priv = dev->data->dev_private;
2816         struct mlx5_dev_ctx_shared *sh = priv->sh;
2817         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
2818         struct mlx5dv_dr_domain *domain;
2819         uint32_t idx = 0;
2820         int ret;
2821
2822         /* Lookup a matching resource from cache. */
2823         ILIST_FOREACH(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2824                       sh->push_vlan_action_list, idx, cache_resource, next) {
2825                 if (resource->vlan_tag == cache_resource->vlan_tag &&
2826                     resource->ft_type == cache_resource->ft_type) {
2827                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
2828                                 "refcnt %d++",
2829                                 (void *)cache_resource,
2830                                 rte_atomic32_read(&cache_resource->refcnt));
2831                         rte_atomic32_inc(&cache_resource->refcnt);
2832                         dev_flow->handle->dvh.rix_push_vlan = idx;
2833                         dev_flow->dv.push_vlan_res = cache_resource;
2834                         return 0;
2835                 }
2836         }
2837         /* Register new push_vlan action resource. */
2838         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2839                                        &dev_flow->handle->dvh.rix_push_vlan);
2840         if (!cache_resource)
2841                 return rte_flow_error_set(error, ENOMEM,
2842                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2843                                           "cannot allocate resource memory");
2844         *cache_resource = *resource;
2845         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2846                 domain = sh->fdb_domain;
2847         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2848                 domain = sh->rx_domain;
2849         else
2850                 domain = sh->tx_domain;
2851         ret = mlx5_flow_os_create_flow_action_push_vlan
2852                                         (domain, resource->vlan_tag,
2853                                          &cache_resource->action);
2854         if (ret) {
2855                 mlx5_free(cache_resource);
2856                 return rte_flow_error_set(error, ENOMEM,
2857                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2858                                           NULL, "cannot create action");
2859         }
2860         rte_atomic32_init(&cache_resource->refcnt);
2861         rte_atomic32_inc(&cache_resource->refcnt);
2862         ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN],
2863                      &sh->push_vlan_action_list,
2864                      dev_flow->handle->dvh.rix_push_vlan,
2865                      cache_resource, next);
2866         dev_flow->dv.push_vlan_res = cache_resource;
2867         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
2868                 (void *)cache_resource,
2869                 rte_atomic32_read(&cache_resource->refcnt));
2870         return 0;
2871 }
2872 /**
2873  * Get the size of specific rte_flow_item_type
2874  *
2875  * @param[in] item_type
2876  *   Tested rte_flow_item_type.
2877  *
2878  * @return
2879  *   sizeof struct item_type, 0 if void or irrelevant.
2880  */
2881 static size_t
2882 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
2883 {
2884         size_t retval;
2885
2886         switch (item_type) {
2887         case RTE_FLOW_ITEM_TYPE_ETH:
2888                 retval = sizeof(struct rte_flow_item_eth);
2889                 break;
2890         case RTE_FLOW_ITEM_TYPE_VLAN:
2891                 retval = sizeof(struct rte_flow_item_vlan);
2892                 break;
2893         case RTE_FLOW_ITEM_TYPE_IPV4:
2894                 retval = sizeof(struct rte_flow_item_ipv4);
2895                 break;
2896         case RTE_FLOW_ITEM_TYPE_IPV6:
2897                 retval = sizeof(struct rte_flow_item_ipv6);
2898                 break;
2899         case RTE_FLOW_ITEM_TYPE_UDP:
2900                 retval = sizeof(struct rte_flow_item_udp);
2901                 break;
2902         case RTE_FLOW_ITEM_TYPE_TCP:
2903                 retval = sizeof(struct rte_flow_item_tcp);
2904                 break;
2905         case RTE_FLOW_ITEM_TYPE_VXLAN:
2906                 retval = sizeof(struct rte_flow_item_vxlan);
2907                 break;
2908         case RTE_FLOW_ITEM_TYPE_GRE:
2909                 retval = sizeof(struct rte_flow_item_gre);
2910                 break;
2911         case RTE_FLOW_ITEM_TYPE_NVGRE:
2912                 retval = sizeof(struct rte_flow_item_nvgre);
2913                 break;
2914         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2915                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
2916                 break;
2917         case RTE_FLOW_ITEM_TYPE_MPLS:
2918                 retval = sizeof(struct rte_flow_item_mpls);
2919                 break;
2920         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
2921         default:
2922                 retval = 0;
2923                 break;
2924         }
2925         return retval;
2926 }
2927
2928 #define MLX5_ENCAP_IPV4_VERSION         0x40
2929 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
2930 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
2931 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
2932 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
2933 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
2934 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
2935
2936 /**
2937  * Convert the encap action data from list of rte_flow_item to raw buffer
2938  *
2939  * @param[in] items
2940  *   Pointer to rte_flow_item objects list.
2941  * @param[out] buf
2942  *   Pointer to the output buffer.
2943  * @param[out] size
2944  *   Pointer to the output buffer size.
2945  * @param[out] error
2946  *   Pointer to the error structure.
2947  *
2948  * @return
2949  *   0 on success, a negative errno value otherwise and rte_errno is set.
2950  */
2951 static int
2952 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2953                            size_t *size, struct rte_flow_error *error)
2954 {
2955         struct rte_ether_hdr *eth = NULL;
2956         struct rte_vlan_hdr *vlan = NULL;
2957         struct rte_ipv4_hdr *ipv4 = NULL;
2958         struct rte_ipv6_hdr *ipv6 = NULL;
2959         struct rte_udp_hdr *udp = NULL;
2960         struct rte_vxlan_hdr *vxlan = NULL;
2961         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
2962         struct rte_gre_hdr *gre = NULL;
2963         size_t len;
2964         size_t temp_size = 0;
2965
2966         if (!items)
2967                 return rte_flow_error_set(error, EINVAL,
2968                                           RTE_FLOW_ERROR_TYPE_ACTION,
2969                                           NULL, "invalid empty data");
2970         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2971                 len = flow_dv_get_item_len(items->type);
2972                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
2973                         return rte_flow_error_set(error, EINVAL,
2974                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2975                                                   (void *)items->type,
2976                                                   "items total size is too big"
2977                                                   " for encap action");
2978                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
2979                 switch (items->type) {
2980                 case RTE_FLOW_ITEM_TYPE_ETH:
2981                         eth = (struct rte_ether_hdr *)&buf[temp_size];
2982                         break;
2983                 case RTE_FLOW_ITEM_TYPE_VLAN:
2984                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
2985                         if (!eth)
2986                                 return rte_flow_error_set(error, EINVAL,
2987                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2988                                                 (void *)items->type,
2989                                                 "eth header not found");
2990                         if (!eth->ether_type)
2991                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2992                         break;
2993                 case RTE_FLOW_ITEM_TYPE_IPV4:
2994                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
2995                         if (!vlan && !eth)
2996                                 return rte_flow_error_set(error, EINVAL,
2997                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2998                                                 (void *)items->type,
2999                                                 "neither eth nor vlan"
3000                                                 " header found");
3001                         if (vlan && !vlan->eth_proto)
3002                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3003                         else if (eth && !eth->ether_type)
3004                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
3005                         if (!ipv4->version_ihl)
3006                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
3007                                                     MLX5_ENCAP_IPV4_IHL_MIN;
3008                         if (!ipv4->time_to_live)
3009                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
3010                         break;
3011                 case RTE_FLOW_ITEM_TYPE_IPV6:
3012                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
3013                         if (!vlan && !eth)
3014                                 return rte_flow_error_set(error, EINVAL,
3015                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3016                                                 (void *)items->type,
3017                                                 "neither eth nor vlan"
3018                                                 " header found");
3019                         if (vlan && !vlan->eth_proto)
3020                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3021                         else if (eth && !eth->ether_type)
3022                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
3023                         if (!ipv6->vtc_flow)
3024                                 ipv6->vtc_flow =
3025                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
3026                         if (!ipv6->hop_limits)
3027                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
3028                         break;
3029                 case RTE_FLOW_ITEM_TYPE_UDP:
3030                         udp = (struct rte_udp_hdr *)&buf[temp_size];
3031                         if (!ipv4 && !ipv6)
3032                                 return rte_flow_error_set(error, EINVAL,
3033                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3034                                                 (void *)items->type,
3035                                                 "ip header not found");
3036                         if (ipv4 && !ipv4->next_proto_id)
3037                                 ipv4->next_proto_id = IPPROTO_UDP;
3038                         else if (ipv6 && !ipv6->proto)
3039                                 ipv6->proto = IPPROTO_UDP;
3040                         break;
3041                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3042                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
3043                         if (!udp)
3044                                 return rte_flow_error_set(error, EINVAL,
3045                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3046                                                 (void *)items->type,
3047                                                 "udp header not found");
3048                         if (!udp->dst_port)
3049                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
3050                         if (!vxlan->vx_flags)
3051                                 vxlan->vx_flags =
3052                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
3053                         break;
3054                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3055                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
3056                         if (!udp)
3057                                 return rte_flow_error_set(error, EINVAL,
3058                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3059                                                 (void *)items->type,
3060                                                 "udp header not found");
3061                         if (!vxlan_gpe->proto)
3062                                 return rte_flow_error_set(error, EINVAL,
3063                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3064                                                 (void *)items->type,
3065                                                 "next protocol not found");
3066                         if (!udp->dst_port)
3067                                 udp->dst_port =
3068                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
3069                         if (!vxlan_gpe->vx_flags)
3070                                 vxlan_gpe->vx_flags =
3071                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
3072                         break;
3073                 case RTE_FLOW_ITEM_TYPE_GRE:
3074                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3075                         gre = (struct rte_gre_hdr *)&buf[temp_size];
3076                         if (!gre->proto)
3077                                 return rte_flow_error_set(error, EINVAL,
3078                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3079                                                 (void *)items->type,
3080                                                 "next protocol not found");
3081                         if (!ipv4 && !ipv6)
3082                                 return rte_flow_error_set(error, EINVAL,
3083                                                 RTE_FLOW_ERROR_TYPE_ACTION,
3084                                                 (void *)items->type,
3085                                                 "ip header not found");
3086                         if (ipv4 && !ipv4->next_proto_id)
3087                                 ipv4->next_proto_id = IPPROTO_GRE;
3088                         else if (ipv6 && !ipv6->proto)
3089                                 ipv6->proto = IPPROTO_GRE;
3090                         break;
3091                 case RTE_FLOW_ITEM_TYPE_VOID:
3092                         break;
3093                 default:
3094                         return rte_flow_error_set(error, EINVAL,
3095                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3096                                                   (void *)items->type,
3097                                                   "unsupported item type");
3098                         break;
3099                 }
3100                 temp_size += len;
3101         }
3102         *size = temp_size;
3103         return 0;
3104 }
3105
3106 static int
3107 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
3108 {
3109         struct rte_ether_hdr *eth = NULL;
3110         struct rte_vlan_hdr *vlan = NULL;
3111         struct rte_ipv6_hdr *ipv6 = NULL;
3112         struct rte_udp_hdr *udp = NULL;
3113         char *next_hdr;
3114         uint16_t proto;
3115
3116         eth = (struct rte_ether_hdr *)data;
3117         next_hdr = (char *)(eth + 1);
3118         proto = RTE_BE16(eth->ether_type);
3119
3120         /* VLAN skipping */
3121         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
3122                 vlan = (struct rte_vlan_hdr *)next_hdr;
3123                 proto = RTE_BE16(vlan->eth_proto);
3124                 next_hdr += sizeof(struct rte_vlan_hdr);
3125         }
3126
3127         /* HW calculates IPv4 csum. no need to proceed */
3128         if (proto == RTE_ETHER_TYPE_IPV4)
3129                 return 0;
3130
3131         /* non IPv4/IPv6 header. not supported */
3132         if (proto != RTE_ETHER_TYPE_IPV6) {
3133                 return rte_flow_error_set(error, ENOTSUP,
3134                                           RTE_FLOW_ERROR_TYPE_ACTION,
3135                                           NULL, "Cannot offload non IPv4/IPv6");
3136         }
3137
3138         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
3139
3140         /* ignore non UDP */
3141         if (ipv6->proto != IPPROTO_UDP)
3142                 return 0;
3143
3144         udp = (struct rte_udp_hdr *)(ipv6 + 1);
3145         udp->dgram_cksum = 0;
3146
3147         return 0;
3148 }
3149
3150 /**
3151  * Convert L2 encap action to DV specification.
3152  *
3153  * @param[in] dev
3154  *   Pointer to rte_eth_dev structure.
3155  * @param[in] action
3156  *   Pointer to action structure.
3157  * @param[in, out] dev_flow
3158  *   Pointer to the mlx5_flow.
3159  * @param[in] transfer
3160  *   Mark if the flow is E-Switch flow.
3161  * @param[out] error
3162  *   Pointer to the error structure.
3163  *
3164  * @return
3165  *   0 on success, a negative errno value otherwise and rte_errno is set.
3166  */
3167 static int
3168 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
3169                                const struct rte_flow_action *action,
3170                                struct mlx5_flow *dev_flow,
3171                                uint8_t transfer,
3172                                struct rte_flow_error *error)
3173 {
3174         const struct rte_flow_item *encap_data;
3175         const struct rte_flow_action_raw_encap *raw_encap_data;
3176         struct mlx5_flow_dv_encap_decap_resource res = {
3177                 .reformat_type =
3178                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
3179                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3180                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
3181         };
3182
3183         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3184                 raw_encap_data =
3185                         (const struct rte_flow_action_raw_encap *)action->conf;
3186                 res.size = raw_encap_data->size;
3187                 memcpy(res.buf, raw_encap_data->data, res.size);
3188         } else {
3189                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3190                         encap_data =
3191                                 ((const struct rte_flow_action_vxlan_encap *)
3192                                                 action->conf)->definition;
3193                 else
3194                         encap_data =
3195                                 ((const struct rte_flow_action_nvgre_encap *)
3196                                                 action->conf)->definition;
3197                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3198                                                &res.size, error))
3199                         return -rte_errno;
3200         }
3201         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3202                 return -rte_errno;
3203         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3204                 return rte_flow_error_set(error, EINVAL,
3205                                           RTE_FLOW_ERROR_TYPE_ACTION,
3206                                           NULL, "can't create L2 encap action");
3207         return 0;
3208 }
3209
3210 /**
3211  * Convert L2 decap action to DV specification.
3212  *
3213  * @param[in] dev
3214  *   Pointer to rte_eth_dev structure.
3215  * @param[in, out] dev_flow
3216  *   Pointer to the mlx5_flow.
3217  * @param[in] transfer
3218  *   Mark if the flow is E-Switch flow.
3219  * @param[out] error
3220  *   Pointer to the error structure.
3221  *
3222  * @return
3223  *   0 on success, a negative errno value otherwise and rte_errno is set.
3224  */
3225 static int
3226 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3227                                struct mlx5_flow *dev_flow,
3228                                uint8_t transfer,
3229                                struct rte_flow_error *error)
3230 {
3231         struct mlx5_flow_dv_encap_decap_resource res = {
3232                 .size = 0,
3233                 .reformat_type =
3234                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3235                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3236                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3237         };
3238
3239         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3240                 return rte_flow_error_set(error, EINVAL,
3241                                           RTE_FLOW_ERROR_TYPE_ACTION,
3242                                           NULL, "can't create L2 decap action");
3243         return 0;
3244 }
3245
3246 /**
3247  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3248  *
3249  * @param[in] dev
3250  *   Pointer to rte_eth_dev structure.
3251  * @param[in] action
3252  *   Pointer to action structure.
3253  * @param[in, out] dev_flow
3254  *   Pointer to the mlx5_flow.
3255  * @param[in] attr
3256  *   Pointer to the flow attributes.
3257  * @param[out] error
3258  *   Pointer to the error structure.
3259  *
3260  * @return
3261  *   0 on success, a negative errno value otherwise and rte_errno is set.
3262  */
3263 static int
3264 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3265                                 const struct rte_flow_action *action,
3266                                 struct mlx5_flow *dev_flow,
3267                                 const struct rte_flow_attr *attr,
3268                                 struct rte_flow_error *error)
3269 {
3270         const struct rte_flow_action_raw_encap *encap_data;
3271         struct mlx5_flow_dv_encap_decap_resource res;
3272
3273         memset(&res, 0, sizeof(res));
3274         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3275         res.size = encap_data->size;
3276         memcpy(res.buf, encap_data->data, res.size);
3277         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3278                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3279                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3280         if (attr->transfer)
3281                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3282         else
3283                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3284                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3285         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3286                 return rte_flow_error_set(error, EINVAL,
3287                                           RTE_FLOW_ERROR_TYPE_ACTION,
3288                                           NULL, "can't create encap action");
3289         return 0;
3290 }
3291
3292 /**
3293  * Create action push VLAN.
3294  *
3295  * @param[in] dev
3296  *   Pointer to rte_eth_dev structure.
3297  * @param[in] attr
3298  *   Pointer to the flow attributes.
3299  * @param[in] vlan
3300  *   Pointer to the vlan to push to the Ethernet header.
3301  * @param[in, out] dev_flow
3302  *   Pointer to the mlx5_flow.
3303  * @param[out] error
3304  *   Pointer to the error structure.
3305  *
3306  * @return
3307  *   0 on success, a negative errno value otherwise and rte_errno is set.
3308  */
3309 static int
3310 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3311                                 const struct rte_flow_attr *attr,
3312                                 const struct rte_vlan_hdr *vlan,
3313                                 struct mlx5_flow *dev_flow,
3314                                 struct rte_flow_error *error)
3315 {
3316         struct mlx5_flow_dv_push_vlan_action_resource res;
3317
3318         memset(&res, 0, sizeof(res));
3319         res.vlan_tag =
3320                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3321                                  vlan->vlan_tci);
3322         if (attr->transfer)
3323                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3324         else
3325                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3326                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3327         return flow_dv_push_vlan_action_resource_register
3328                                             (dev, &res, dev_flow, error);
3329 }
3330
3331 /**
3332  * Validate the modify-header actions.
3333  *
3334  * @param[in] action_flags
3335  *   Holds the actions detected until now.
3336  * @param[in] action
3337  *   Pointer to the modify action.
3338  * @param[out] error
3339  *   Pointer to error structure.
3340  *
3341  * @return
3342  *   0 on success, a negative errno value otherwise and rte_errno is set.
3343  */
3344 static int
3345 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3346                                    const struct rte_flow_action *action,
3347                                    struct rte_flow_error *error)
3348 {
3349         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3350                 return rte_flow_error_set(error, EINVAL,
3351                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3352                                           NULL, "action configuration not set");
3353         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3354                 return rte_flow_error_set(error, EINVAL,
3355                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3356                                           "can't have encap action before"
3357                                           " modify action");
3358         return 0;
3359 }
3360
3361 /**
3362  * Validate the modify-header MAC address actions.
3363  *
3364  * @param[in] action_flags
3365  *   Holds the actions detected until now.
3366  * @param[in] action
3367  *   Pointer to the modify action.
3368  * @param[in] item_flags
3369  *   Holds the items detected.
3370  * @param[out] error
3371  *   Pointer to error structure.
3372  *
3373  * @return
3374  *   0 on success, a negative errno value otherwise and rte_errno is set.
3375  */
3376 static int
3377 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3378                                    const struct rte_flow_action *action,
3379                                    const uint64_t item_flags,
3380                                    struct rte_flow_error *error)
3381 {
3382         int ret = 0;
3383
3384         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3385         if (!ret) {
3386                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3387                         return rte_flow_error_set(error, EINVAL,
3388                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3389                                                   NULL,
3390                                                   "no L2 item in pattern");
3391         }
3392         return ret;
3393 }
3394
3395 /**
3396  * Validate the modify-header IPv4 address actions.
3397  *
3398  * @param[in] action_flags
3399  *   Holds the actions detected until now.
3400  * @param[in] action
3401  *   Pointer to the modify action.
3402  * @param[in] item_flags
3403  *   Holds the items detected.
3404  * @param[out] error
3405  *   Pointer to error structure.
3406  *
3407  * @return
3408  *   0 on success, a negative errno value otherwise and rte_errno is set.
3409  */
3410 static int
3411 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3412                                     const struct rte_flow_action *action,
3413                                     const uint64_t item_flags,
3414                                     struct rte_flow_error *error)
3415 {
3416         int ret = 0;
3417         uint64_t layer;
3418
3419         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3420         if (!ret) {
3421                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3422                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3423                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3424                 if (!(item_flags & layer))
3425                         return rte_flow_error_set(error, EINVAL,
3426                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3427                                                   NULL,
3428                                                   "no ipv4 item in pattern");
3429         }
3430         return ret;
3431 }
3432
3433 /**
3434  * Validate the modify-header IPv6 address actions.
3435  *
3436  * @param[in] action_flags
3437  *   Holds the actions detected until now.
3438  * @param[in] action
3439  *   Pointer to the modify action.
3440  * @param[in] item_flags
3441  *   Holds the items detected.
3442  * @param[out] error
3443  *   Pointer to error structure.
3444  *
3445  * @return
3446  *   0 on success, a negative errno value otherwise and rte_errno is set.
3447  */
3448 static int
3449 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3450                                     const struct rte_flow_action *action,
3451                                     const uint64_t item_flags,
3452                                     struct rte_flow_error *error)
3453 {
3454         int ret = 0;
3455         uint64_t layer;
3456
3457         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3458         if (!ret) {
3459                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3460                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3461                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3462                 if (!(item_flags & layer))
3463                         return rte_flow_error_set(error, EINVAL,
3464                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3465                                                   NULL,
3466                                                   "no ipv6 item in pattern");
3467         }
3468         return ret;
3469 }
3470
3471 /**
3472  * Validate the modify-header TP actions.
3473  *
3474  * @param[in] action_flags
3475  *   Holds the actions detected until now.
3476  * @param[in] action
3477  *   Pointer to the modify action.
3478  * @param[in] item_flags
3479  *   Holds the items detected.
3480  * @param[out] error
3481  *   Pointer to error structure.
3482  *
3483  * @return
3484  *   0 on success, a negative errno value otherwise and rte_errno is set.
3485  */
3486 static int
3487 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3488                                   const struct rte_flow_action *action,
3489                                   const uint64_t item_flags,
3490                                   struct rte_flow_error *error)
3491 {
3492         int ret = 0;
3493         uint64_t layer;
3494
3495         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3496         if (!ret) {
3497                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3498                                  MLX5_FLOW_LAYER_INNER_L4 :
3499                                  MLX5_FLOW_LAYER_OUTER_L4;
3500                 if (!(item_flags & layer))
3501                         return rte_flow_error_set(error, EINVAL,
3502                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3503                                                   NULL, "no transport layer "
3504                                                   "in pattern");
3505         }
3506         return ret;
3507 }
3508
3509 /**
3510  * Validate the modify-header actions of increment/decrement
3511  * TCP Sequence-number.
3512  *
3513  * @param[in] action_flags
3514  *   Holds the actions detected until now.
3515  * @param[in] action
3516  *   Pointer to the modify action.
3517  * @param[in] item_flags
3518  *   Holds the items detected.
3519  * @param[out] error
3520  *   Pointer to error structure.
3521  *
3522  * @return
3523  *   0 on success, a negative errno value otherwise and rte_errno is set.
3524  */
3525 static int
3526 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3527                                        const struct rte_flow_action *action,
3528                                        const uint64_t item_flags,
3529                                        struct rte_flow_error *error)
3530 {
3531         int ret = 0;
3532         uint64_t layer;
3533
3534         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3535         if (!ret) {
3536                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3537                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3538                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3539                 if (!(item_flags & layer))
3540                         return rte_flow_error_set(error, EINVAL,
3541                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3542                                                   NULL, "no TCP item in"
3543                                                   " pattern");
3544                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3545                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3546                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3547                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3548                         return rte_flow_error_set(error, EINVAL,
3549                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3550                                                   NULL,
3551                                                   "cannot decrease and increase"
3552                                                   " TCP sequence number"
3553                                                   " at the same time");
3554         }
3555         return ret;
3556 }
3557
3558 /**
3559  * Validate the modify-header actions of increment/decrement
3560  * TCP Acknowledgment number.
3561  *
3562  * @param[in] action_flags
3563  *   Holds the actions detected until now.
3564  * @param[in] action
3565  *   Pointer to the modify action.
3566  * @param[in] item_flags
3567  *   Holds the items detected.
3568  * @param[out] error
3569  *   Pointer to error structure.
3570  *
3571  * @return
3572  *   0 on success, a negative errno value otherwise and rte_errno is set.
3573  */
3574 static int
3575 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3576                                        const struct rte_flow_action *action,
3577                                        const uint64_t item_flags,
3578                                        struct rte_flow_error *error)
3579 {
3580         int ret = 0;
3581         uint64_t layer;
3582
3583         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3584         if (!ret) {
3585                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3586                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3587                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3588                 if (!(item_flags & layer))
3589                         return rte_flow_error_set(error, EINVAL,
3590                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3591                                                   NULL, "no TCP item in"
3592                                                   " pattern");
3593                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3594                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3595                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3596                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3597                         return rte_flow_error_set(error, EINVAL,
3598                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3599                                                   NULL,
3600                                                   "cannot decrease and increase"
3601                                                   " TCP acknowledgment number"
3602                                                   " at the same time");
3603         }
3604         return ret;
3605 }
3606
3607 /**
3608  * Validate the modify-header TTL actions.
3609  *
3610  * @param[in] action_flags
3611  *   Holds the actions detected until now.
3612  * @param[in] action
3613  *   Pointer to the modify action.
3614  * @param[in] item_flags
3615  *   Holds the items detected.
3616  * @param[out] error
3617  *   Pointer to error structure.
3618  *
3619  * @return
3620  *   0 on success, a negative errno value otherwise and rte_errno is set.
3621  */
3622 static int
3623 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3624                                    const struct rte_flow_action *action,
3625                                    const uint64_t item_flags,
3626                                    struct rte_flow_error *error)
3627 {
3628         int ret = 0;
3629         uint64_t layer;
3630
3631         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3632         if (!ret) {
3633                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3634                                  MLX5_FLOW_LAYER_INNER_L3 :
3635                                  MLX5_FLOW_LAYER_OUTER_L3;
3636                 if (!(item_flags & layer))
3637                         return rte_flow_error_set(error, EINVAL,
3638                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3639                                                   NULL,
3640                                                   "no IP protocol in pattern");
3641         }
3642         return ret;
3643 }
3644
3645 /**
3646  * Validate jump action.
3647  *
3648  * @param[in] action
3649  *   Pointer to the jump action.
3650  * @param[in] action_flags
3651  *   Holds the actions detected until now.
3652  * @param[in] attributes
3653  *   Pointer to flow attributes
3654  * @param[in] external
3655  *   Action belongs to flow rule created by request external to PMD.
3656  * @param[out] error
3657  *   Pointer to error structure.
3658  *
3659  * @return
3660  *   0 on success, a negative errno value otherwise and rte_errno is set.
3661  */
3662 static int
3663 flow_dv_validate_action_jump(const struct rte_flow_action *action,
3664                              uint64_t action_flags,
3665                              const struct rte_flow_attr *attributes,
3666                              bool external, struct rte_flow_error *error)
3667 {
3668         uint32_t target_group, table;
3669         int ret = 0;
3670
3671         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3672                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3673                 return rte_flow_error_set(error, EINVAL,
3674                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3675                                           "can't have 2 fate actions in"
3676                                           " same flow");
3677         if (action_flags & MLX5_FLOW_ACTION_METER)
3678                 return rte_flow_error_set(error, ENOTSUP,
3679                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3680                                           "jump with meter not support");
3681         if (!action->conf)
3682                 return rte_flow_error_set(error, EINVAL,
3683                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3684                                           NULL, "action configuration not set");
3685         target_group =
3686                 ((const struct rte_flow_action_jump *)action->conf)->group;
3687         ret = mlx5_flow_group_to_table(attributes, external, target_group,
3688                                        true, &table, error);
3689         if (ret)
3690                 return ret;
3691         if (attributes->group == target_group)
3692                 return rte_flow_error_set(error, EINVAL,
3693                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3694                                           "target group must be other than"
3695                                           " the current flow group");
3696         return 0;
3697 }
3698
3699 /*
3700  * Validate the port_id action.
3701  *
3702  * @param[in] dev
3703  *   Pointer to rte_eth_dev structure.
3704  * @param[in] action_flags
3705  *   Bit-fields that holds the actions detected until now.
3706  * @param[in] action
3707  *   Port_id RTE action structure.
3708  * @param[in] attr
3709  *   Attributes of flow that includes this action.
3710  * @param[out] error
3711  *   Pointer to error structure.
3712  *
3713  * @return
3714  *   0 on success, a negative errno value otherwise and rte_errno is set.
3715  */
3716 static int
3717 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3718                                 uint64_t action_flags,
3719                                 const struct rte_flow_action *action,
3720                                 const struct rte_flow_attr *attr,
3721                                 struct rte_flow_error *error)
3722 {
3723         const struct rte_flow_action_port_id *port_id;
3724         struct mlx5_priv *act_priv;
3725         struct mlx5_priv *dev_priv;
3726         uint16_t port;
3727
3728         if (!attr->transfer)
3729                 return rte_flow_error_set(error, ENOTSUP,
3730                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3731                                           NULL,
3732                                           "port id action is valid in transfer"
3733                                           " mode only");
3734         if (!action || !action->conf)
3735                 return rte_flow_error_set(error, ENOTSUP,
3736                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3737                                           NULL,
3738                                           "port id action parameters must be"
3739                                           " specified");
3740         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3741                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3742                 return rte_flow_error_set(error, EINVAL,
3743                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3744                                           "can have only one fate actions in"
3745                                           " a flow");
3746         dev_priv = mlx5_dev_to_eswitch_info(dev);
3747         if (!dev_priv)
3748                 return rte_flow_error_set(error, rte_errno,
3749                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3750                                           NULL,
3751                                           "failed to obtain E-Switch info");
3752         port_id = action->conf;
3753         port = port_id->original ? dev->data->port_id : port_id->id;
3754         act_priv = mlx5_port_to_eswitch_info(port, false);
3755         if (!act_priv)
3756                 return rte_flow_error_set
3757                                 (error, rte_errno,
3758                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
3759                                  "failed to obtain E-Switch port id for port");
3760         if (act_priv->domain_id != dev_priv->domain_id)
3761                 return rte_flow_error_set
3762                                 (error, EINVAL,
3763                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3764                                  "port does not belong to"
3765                                  " E-Switch being configured");
3766         return 0;
3767 }
3768
3769 /**
3770  * Get the maximum number of modify header actions.
3771  *
3772  * @param dev
3773  *   Pointer to rte_eth_dev structure.
3774  * @param flags
3775  *   Flags bits to check if root level.
3776  *
3777  * @return
3778  *   Max number of modify header actions device can support.
3779  */
3780 static inline unsigned int
3781 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev __rte_unused,
3782                               uint64_t flags)
3783 {
3784         /*
3785          * There's no way to directly query the max capacity from FW.
3786          * The maximal value on root table should be assumed to be supported.
3787          */
3788         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
3789                 return MLX5_MAX_MODIFY_NUM;
3790         else
3791                 return MLX5_ROOT_TBL_MODIFY_NUM;
3792 }
3793
3794 /**
3795  * Validate the meter action.
3796  *
3797  * @param[in] dev
3798  *   Pointer to rte_eth_dev structure.
3799  * @param[in] action_flags
3800  *   Bit-fields that holds the actions detected until now.
3801  * @param[in] action
3802  *   Pointer to the meter action.
3803  * @param[in] attr
3804  *   Attributes of flow that includes this action.
3805  * @param[out] error
3806  *   Pointer to error structure.
3807  *
3808  * @return
3809  *   0 on success, a negative errno value otherwise and rte_ernno is set.
3810  */
3811 static int
3812 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
3813                                 uint64_t action_flags,
3814                                 const struct rte_flow_action *action,
3815                                 const struct rte_flow_attr *attr,
3816                                 struct rte_flow_error *error)
3817 {
3818         struct mlx5_priv *priv = dev->data->dev_private;
3819         const struct rte_flow_action_meter *am = action->conf;
3820         struct mlx5_flow_meter *fm;
3821
3822         if (!am)
3823                 return rte_flow_error_set(error, EINVAL,
3824                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3825                                           "meter action conf is NULL");
3826
3827         if (action_flags & MLX5_FLOW_ACTION_METER)
3828                 return rte_flow_error_set(error, ENOTSUP,
3829                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3830                                           "meter chaining not support");
3831         if (action_flags & MLX5_FLOW_ACTION_JUMP)
3832                 return rte_flow_error_set(error, ENOTSUP,
3833                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3834                                           "meter with jump not support");
3835         if (!priv->mtr_en)
3836                 return rte_flow_error_set(error, ENOTSUP,
3837                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3838                                           NULL,
3839                                           "meter action not supported");
3840         fm = mlx5_flow_meter_find(priv, am->mtr_id);
3841         if (!fm)
3842                 return rte_flow_error_set(error, EINVAL,
3843                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3844                                           "Meter not found");
3845         if (fm->ref_cnt && (!(fm->transfer == attr->transfer ||
3846               (!fm->ingress && !attr->ingress && attr->egress) ||
3847               (!fm->egress && !attr->egress && attr->ingress))))
3848                 return rte_flow_error_set(error, EINVAL,
3849                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3850                                           "Flow attributes are either invalid "
3851                                           "or have a conflict with current "
3852                                           "meter attributes");
3853         return 0;
3854 }
3855
3856 /**
3857  * Validate the age action.
3858  *
3859  * @param[in] action_flags
3860  *   Holds the actions detected until now.
3861  * @param[in] action
3862  *   Pointer to the age action.
3863  * @param[in] dev
3864  *   Pointer to the Ethernet device structure.
3865  * @param[out] error
3866  *   Pointer to error structure.
3867  *
3868  * @return
3869  *   0 on success, a negative errno value otherwise and rte_errno is set.
3870  */
3871 static int
3872 flow_dv_validate_action_age(uint64_t action_flags,
3873                             const struct rte_flow_action *action,
3874                             struct rte_eth_dev *dev,
3875                             struct rte_flow_error *error)
3876 {
3877         struct mlx5_priv *priv = dev->data->dev_private;
3878         const struct rte_flow_action_age *age = action->conf;
3879
3880         if (!priv->config.devx || priv->counter_fallback)
3881                 return rte_flow_error_set(error, ENOTSUP,
3882                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3883                                           NULL,
3884                                           "age action not supported");
3885         if (!(action->conf))
3886                 return rte_flow_error_set(error, EINVAL,
3887                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3888                                           "configuration cannot be null");
3889         if (age->timeout >= UINT16_MAX / 2 / 10)
3890                 return rte_flow_error_set(error, ENOTSUP,
3891                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
3892                                           "Max age time: 3275 seconds");
3893         if (action_flags & MLX5_FLOW_ACTION_AGE)
3894                 return rte_flow_error_set(error, EINVAL,
3895                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3896                                           "Duplicate age ctions set");
3897         return 0;
3898 }
3899
3900 /**
3901  * Validate the modify-header IPv4 DSCP actions.
3902  *
3903  * @param[in] action_flags
3904  *   Holds the actions detected until now.
3905  * @param[in] action
3906  *   Pointer to the modify action.
3907  * @param[in] item_flags
3908  *   Holds the items detected.
3909  * @param[out] error
3910  *   Pointer to error structure.
3911  *
3912  * @return
3913  *   0 on success, a negative errno value otherwise and rte_errno is set.
3914  */
3915 static int
3916 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
3917                                          const struct rte_flow_action *action,
3918                                          const uint64_t item_flags,
3919                                          struct rte_flow_error *error)
3920 {
3921         int ret = 0;
3922
3923         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3924         if (!ret) {
3925                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3926                         return rte_flow_error_set(error, EINVAL,
3927                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3928                                                   NULL,
3929                                                   "no ipv4 item in pattern");
3930         }
3931         return ret;
3932 }
3933
3934 /**
3935  * Validate the modify-header IPv6 DSCP actions.
3936  *
3937  * @param[in] action_flags
3938  *   Holds the actions detected until now.
3939  * @param[in] action
3940  *   Pointer to the modify action.
3941  * @param[in] item_flags
3942  *   Holds the items detected.
3943  * @param[out] error
3944  *   Pointer to error structure.
3945  *
3946  * @return
3947  *   0 on success, a negative errno value otherwise and rte_errno is set.
3948  */
3949 static int
3950 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
3951                                          const struct rte_flow_action *action,
3952                                          const uint64_t item_flags,
3953                                          struct rte_flow_error *error)
3954 {
3955         int ret = 0;
3956
3957         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3958         if (!ret) {
3959                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3960                         return rte_flow_error_set(error, EINVAL,
3961                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3962                                                   NULL,
3963                                                   "no ipv6 item in pattern");
3964         }
3965         return ret;
3966 }
3967
3968 /**
3969  * Find existing modify-header resource or create and register a new one.
3970  *
3971  * @param dev[in, out]
3972  *   Pointer to rte_eth_dev structure.
3973  * @param[in, out] resource
3974  *   Pointer to modify-header resource.
3975  * @parm[in, out] dev_flow
3976  *   Pointer to the dev_flow.
3977  * @param[out] error
3978  *   pointer to error structure.
3979  *
3980  * @return
3981  *   0 on success otherwise -errno and errno is set.
3982  */
3983 static int
3984 flow_dv_modify_hdr_resource_register
3985                         (struct rte_eth_dev *dev,
3986                          struct mlx5_flow_dv_modify_hdr_resource *resource,
3987                          struct mlx5_flow *dev_flow,
3988                          struct rte_flow_error *error)
3989 {
3990         struct mlx5_priv *priv = dev->data->dev_private;
3991         struct mlx5_dev_ctx_shared *sh = priv->sh;
3992         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
3993         struct mlx5dv_dr_domain *ns;
3994         uint32_t actions_len;
3995         int ret;
3996
3997         resource->flags = dev_flow->dv.group ? 0 :
3998                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
3999         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
4000                                     resource->flags))
4001                 return rte_flow_error_set(error, EOVERFLOW,
4002                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
4003                                           "too many modify header items");
4004         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
4005                 ns = sh->fdb_domain;
4006         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
4007                 ns = sh->tx_domain;
4008         else
4009                 ns = sh->rx_domain;
4010         /* Lookup a matching resource from cache. */
4011         actions_len = resource->actions_num * sizeof(resource->actions[0]);
4012         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
4013                 if (resource->ft_type == cache_resource->ft_type &&
4014                     resource->actions_num == cache_resource->actions_num &&
4015                     resource->flags == cache_resource->flags &&
4016                     !memcmp((const void *)resource->actions,
4017                             (const void *)cache_resource->actions,
4018                             actions_len)) {
4019                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
4020                                 (void *)cache_resource,
4021                                 rte_atomic32_read(&cache_resource->refcnt));
4022                         rte_atomic32_inc(&cache_resource->refcnt);
4023                         dev_flow->handle->dvh.modify_hdr = cache_resource;
4024                         return 0;
4025                 }
4026         }
4027         /* Register new modify-header resource. */
4028         cache_resource = mlx5_malloc(MLX5_MEM_ZERO,
4029                                     sizeof(*cache_resource) + actions_len, 0,
4030                                     SOCKET_ID_ANY);
4031         if (!cache_resource)
4032                 return rte_flow_error_set(error, ENOMEM,
4033                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4034                                           "cannot allocate resource memory");
4035         *cache_resource = *resource;
4036         rte_memcpy(cache_resource->actions, resource->actions, actions_len);
4037         ret = mlx5_flow_os_create_flow_action_modify_header
4038                                         (sh->ctx, ns, cache_resource,
4039                                          actions_len, &cache_resource->action);
4040         if (ret) {
4041                 mlx5_free(cache_resource);
4042                 return rte_flow_error_set(error, ENOMEM,
4043                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4044                                           NULL, "cannot create action");
4045         }
4046         rte_atomic32_init(&cache_resource->refcnt);
4047         rte_atomic32_inc(&cache_resource->refcnt);
4048         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
4049         dev_flow->handle->dvh.modify_hdr = cache_resource;
4050         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
4051                 (void *)cache_resource,
4052                 rte_atomic32_read(&cache_resource->refcnt));
4053         return 0;
4054 }
4055
4056 /**
4057  * Get DV flow counter by index.
4058  *
4059  * @param[in] dev
4060  *   Pointer to the Ethernet device structure.
4061  * @param[in] idx
4062  *   mlx5 flow counter index in the container.
4063  * @param[out] ppool
4064  *   mlx5 flow counter pool in the container,
4065  *
4066  * @return
4067  *   Pointer to the counter, NULL otherwise.
4068  */
4069 static struct mlx5_flow_counter *
4070 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
4071                            uint32_t idx,
4072                            struct mlx5_flow_counter_pool **ppool)
4073 {
4074         struct mlx5_priv *priv = dev->data->dev_private;
4075         struct mlx5_pools_container *cont;
4076         struct mlx5_flow_counter_pool *pool;
4077         uint32_t batch = 0, age = 0;
4078
4079         idx--;
4080         age = MLX_CNT_IS_AGE(idx);
4081         idx = age ? idx - MLX5_CNT_AGE_OFFSET : idx;
4082         if (idx >= MLX5_CNT_BATCH_OFFSET) {
4083                 idx -= MLX5_CNT_BATCH_OFFSET;
4084                 batch = 1;
4085         }
4086         cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
4087         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
4088         pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
4089         MLX5_ASSERT(pool);
4090         if (ppool)
4091                 *ppool = pool;
4092         return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL);
4093 }
4094
4095 /**
4096  * Check the devx counter belongs to the pool.
4097  *
4098  * @param[in] pool
4099  *   Pointer to the counter pool.
4100  * @param[in] id
4101  *   The counter devx ID.
4102  *
4103  * @return
4104  *   True if counter belongs to the pool, false otherwise.
4105  */
4106 static bool
4107 flow_dv_is_counter_in_pool(struct mlx5_flow_counter_pool *pool, int id)
4108 {
4109         int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
4110                    MLX5_COUNTERS_PER_POOL;
4111
4112         if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
4113                 return true;
4114         return false;
4115 }
4116
4117 /**
4118  * Get a pool by devx counter ID.
4119  *
4120  * @param[in] cont
4121  *   Pointer to the counter container.
4122  * @param[in] id
4123  *   The counter devx ID.
4124  *
4125  * @return
4126  *   The counter pool pointer if exists, NULL otherwise,
4127  */
4128 static struct mlx5_flow_counter_pool *
4129 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
4130 {
4131         uint32_t i;
4132
4133         /* Check last used pool. */
4134         if (cont->last_pool_idx != POOL_IDX_INVALID &&
4135             flow_dv_is_counter_in_pool(cont->pools[cont->last_pool_idx], id))
4136                 return cont->pools[cont->last_pool_idx];
4137         /* ID out of range means no suitable pool in the container. */
4138         if (id > cont->max_id || id < cont->min_id)
4139                 return NULL;
4140         /*
4141          * Find the pool from the end of the container, since mostly counter
4142          * ID is sequence increasing, and the last pool should be the needed
4143          * one.
4144          */
4145         i = rte_atomic16_read(&cont->n_valid);
4146         while (i--) {
4147                 struct mlx5_flow_counter_pool *pool = cont->pools[i];
4148
4149                 if (flow_dv_is_counter_in_pool(pool, id))
4150                         return pool;
4151         }
4152         return NULL;
4153 }
4154
4155 /**
4156  * Allocate a new memory for the counter values wrapped by all the needed
4157  * management.
4158  *
4159  * @param[in] dev
4160  *   Pointer to the Ethernet device structure.
4161  * @param[in] raws_n
4162  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
4163  *
4164  * @return
4165  *   The new memory management pointer on success, otherwise NULL and rte_errno
4166  *   is set.
4167  */
4168 static struct mlx5_counter_stats_mem_mng *
4169 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
4170 {
4171         struct mlx5_priv *priv = dev->data->dev_private;
4172         struct mlx5_dev_ctx_shared *sh = priv->sh;
4173         struct mlx5_devx_mkey_attr mkey_attr;
4174         struct mlx5_counter_stats_mem_mng *mem_mng;
4175         volatile struct flow_counter_stats *raw_data;
4176         int size = (sizeof(struct flow_counter_stats) *
4177                         MLX5_COUNTERS_PER_POOL +
4178                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
4179                         sizeof(struct mlx5_counter_stats_mem_mng);
4180         uint8_t *mem = mlx5_malloc(MLX5_MEM_ZERO, size, sysconf(_SC_PAGESIZE),
4181                                   SOCKET_ID_ANY);
4182         int i;
4183
4184         if (!mem) {
4185                 rte_errno = ENOMEM;
4186                 return NULL;
4187         }
4188         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
4189         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
4190         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
4191                                                  IBV_ACCESS_LOCAL_WRITE);
4192         if (!mem_mng->umem) {
4193                 rte_errno = errno;
4194                 mlx5_free(mem);
4195                 return NULL;
4196         }
4197         mkey_attr.addr = (uintptr_t)mem;
4198         mkey_attr.size = size;
4199         mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
4200         mkey_attr.pd = sh->pdn;
4201         mkey_attr.log_entity_size = 0;
4202         mkey_attr.pg_access = 0;
4203         mkey_attr.klm_array = NULL;
4204         mkey_attr.klm_num = 0;
4205         if (priv->config.hca_attr.relaxed_ordering_write &&
4206                 priv->config.hca_attr.relaxed_ordering_read  &&
4207                 !haswell_broadwell_cpu)
4208                 mkey_attr.relaxed_ordering = 1;
4209         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
4210         if (!mem_mng->dm) {
4211                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
4212                 rte_errno = errno;
4213                 mlx5_free(mem);
4214                 return NULL;
4215         }
4216         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
4217         raw_data = (volatile struct flow_counter_stats *)mem;
4218         for (i = 0; i < raws_n; ++i) {
4219                 mem_mng->raws[i].mem_mng = mem_mng;
4220                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
4221         }
4222         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
4223         return mem_mng;
4224 }
4225
4226 /**
4227  * Resize a counter container.
4228  *
4229  * @param[in] dev
4230  *   Pointer to the Ethernet device structure.
4231  * @param[in] batch
4232  *   Whether the pool is for counter that was allocated by batch command.
4233  * @param[in] age
4234  *   Whether the pool is for Aging counter.
4235  *
4236  * @return
4237  *   0 on success, otherwise negative errno value and rte_errno is set.
4238  */
4239 static int
4240 flow_dv_container_resize(struct rte_eth_dev *dev,
4241                                 uint32_t batch, uint32_t age)
4242 {
4243         struct mlx5_priv *priv = dev->data->dev_private;
4244         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4245                                                                age);
4246         struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
4247         void *old_pools = cont->pools;
4248         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
4249         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
4250         void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY);
4251
4252         if (!pools) {
4253                 rte_errno = ENOMEM;
4254                 return -ENOMEM;
4255         }
4256         if (old_pools)
4257                 memcpy(pools, old_pools, cont->n *
4258                                        sizeof(struct mlx5_flow_counter_pool *));
4259         /*
4260          * Fallback mode query the counter directly, no background query
4261          * resources are needed.
4262          */
4263         if (!priv->counter_fallback) {
4264                 int i;
4265
4266                 mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
4267                           MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
4268                 if (!mem_mng) {
4269                         mlx5_free(pools);
4270                         return -ENOMEM;
4271                 }
4272                 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
4273                         LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
4274                                          mem_mng->raws +
4275                                          MLX5_CNT_CONTAINER_RESIZE +
4276                                          i, next);
4277         }
4278         rte_spinlock_lock(&cont->resize_sl);
4279         cont->n = resize;
4280         cont->mem_mng = mem_mng;
4281         cont->pools = pools;
4282         rte_spinlock_unlock(&cont->resize_sl);
4283         if (old_pools)
4284                 mlx5_free(old_pools);
4285         return 0;
4286 }
4287
4288 /**
4289  * Query a devx flow counter.
4290  *
4291  * @param[in] dev
4292  *   Pointer to the Ethernet device structure.
4293  * @param[in] cnt
4294  *   Index to the flow counter.
4295  * @param[out] pkts
4296  *   The statistics value of packets.
4297  * @param[out] bytes
4298  *   The statistics value of bytes.
4299  *
4300  * @return
4301  *   0 on success, otherwise a negative errno value and rte_errno is set.
4302  */
4303 static inline int
4304 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4305                      uint64_t *bytes)
4306 {
4307         struct mlx5_priv *priv = dev->data->dev_private;
4308         struct mlx5_flow_counter_pool *pool = NULL;
4309         struct mlx5_flow_counter *cnt;
4310         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4311         int offset;
4312
4313         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4314         MLX5_ASSERT(pool);
4315         if (counter < MLX5_CNT_BATCH_OFFSET) {
4316                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4317                 if (priv->counter_fallback)
4318                         return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0,
4319                                         0, pkts, bytes, 0, NULL, NULL, 0);
4320         }
4321
4322         rte_spinlock_lock(&pool->sl);
4323         /*
4324          * The single counters allocation may allocate smaller ID than the
4325          * current allocated in parallel to the host reading.
4326          * In this case the new counter values must be reported as 0.
4327          */
4328         if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
4329                 *pkts = 0;
4330                 *bytes = 0;
4331         } else {
4332                 offset = MLX5_CNT_ARRAY_IDX(pool, cnt);
4333                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4334                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4335         }
4336         rte_spinlock_unlock(&pool->sl);
4337         return 0;
4338 }
4339
4340 /**
4341  * Create and initialize a new counter pool.
4342  *
4343  * @param[in] dev
4344  *   Pointer to the Ethernet device structure.
4345  * @param[out] dcs
4346  *   The devX counter handle.
4347  * @param[in] batch
4348  *   Whether the pool is for counter that was allocated by batch command.
4349  * @param[in] age
4350  *   Whether the pool is for counter that was allocated for aging.
4351  * @param[in/out] cont_cur
4352  *   Pointer to the container pointer, it will be update in pool resize.
4353  *
4354  * @return
4355  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4356  */
4357 static struct mlx5_flow_counter_pool *
4358 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4359                     uint32_t batch, uint32_t age)
4360 {
4361         struct mlx5_priv *priv = dev->data->dev_private;
4362         struct mlx5_flow_counter_pool *pool;
4363         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4364                                                                age);
4365         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
4366         uint32_t size = sizeof(*pool);
4367
4368         if (cont->n == n_valid && flow_dv_container_resize(dev, batch, age))
4369                 return NULL;
4370         size += MLX5_COUNTERS_PER_POOL * CNT_SIZE;
4371         size += (batch ? 0 : MLX5_COUNTERS_PER_POOL * CNTEXT_SIZE);
4372         size += (!age ? 0 : MLX5_COUNTERS_PER_POOL * AGE_SIZE);
4373         pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY);
4374         if (!pool) {
4375                 rte_errno = ENOMEM;
4376                 return NULL;
4377         }
4378         pool->min_dcs = dcs;
4379         if (!priv->counter_fallback)
4380                 pool->raw = cont->mem_mng->raws + n_valid %
4381                                                       MLX5_CNT_CONTAINER_RESIZE;
4382         pool->raw_hw = NULL;
4383         pool->type = 0;
4384         pool->type |= (batch ? 0 :  CNT_POOL_TYPE_EXT);
4385         pool->type |= (!age ? 0 :  CNT_POOL_TYPE_AGE);
4386         pool->query_gen = 0;
4387         rte_spinlock_init(&pool->sl);
4388         TAILQ_INIT(&pool->counters[0]);
4389         TAILQ_INIT(&pool->counters[1]);
4390         TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
4391         pool->index = n_valid;
4392         cont->pools[n_valid] = pool;
4393         if (!batch) {
4394                 int base = RTE_ALIGN_FLOOR(dcs->id, MLX5_COUNTERS_PER_POOL);
4395
4396                 if (base < cont->min_id)
4397                         cont->min_id = base;
4398                 if (base > cont->max_id)
4399                         cont->max_id = base + MLX5_COUNTERS_PER_POOL - 1;
4400                 cont->last_pool_idx = pool->index;
4401         }
4402         /* Pool initialization must be updated before host thread access. */
4403         rte_cio_wmb();
4404         rte_atomic16_add(&cont->n_valid, 1);
4405         return pool;
4406 }
4407
4408 /**
4409  * Update the minimum dcs-id for aged or no-aged counter pool.
4410  *
4411  * @param[in] dev
4412  *   Pointer to the Ethernet device structure.
4413  * @param[in] pool
4414  *   Current counter pool.
4415  * @param[in] batch
4416  *   Whether the pool is for counter that was allocated by batch command.
4417  * @param[in] age
4418  *   Whether the counter is for aging.
4419  */
4420 static void
4421 flow_dv_counter_update_min_dcs(struct rte_eth_dev *dev,
4422                         struct mlx5_flow_counter_pool *pool,
4423                         uint32_t batch, uint32_t age)
4424 {
4425         struct mlx5_priv *priv = dev->data->dev_private;
4426         struct mlx5_flow_counter_pool *other;
4427         struct mlx5_pools_container *cont;
4428
4429         cont = MLX5_CNT_CONTAINER(priv->sh, batch, (age ^ 0x1));
4430         other = flow_dv_find_pool_by_id(cont, pool->min_dcs->id);
4431         if (!other)
4432                 return;
4433         if (pool->min_dcs->id < other->min_dcs->id) {
4434                 rte_atomic64_set(&other->a64_dcs,
4435                         rte_atomic64_read(&pool->a64_dcs));
4436         } else {
4437                 rte_atomic64_set(&pool->a64_dcs,
4438                         rte_atomic64_read(&other->a64_dcs));
4439         }
4440 }
4441 /**
4442  * Prepare a new counter and/or a new counter pool.
4443  *
4444  * @param[in] dev
4445  *   Pointer to the Ethernet device structure.
4446  * @param[out] cnt_free
4447  *   Where to put the pointer of a new counter.
4448  * @param[in] batch
4449  *   Whether the pool is for counter that was allocated by batch command.
4450  * @param[in] age
4451  *   Whether the pool is for counter that was allocated for aging.
4452  *
4453  * @return
4454  *   The counter pool pointer and @p cnt_free is set on success,
4455  *   NULL otherwise and rte_errno is set.
4456  */
4457 static struct mlx5_flow_counter_pool *
4458 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4459                              struct mlx5_flow_counter **cnt_free,
4460                              uint32_t batch, uint32_t age)
4461 {
4462         struct mlx5_priv *priv = dev->data->dev_private;
4463         struct mlx5_pools_container *cont;
4464         struct mlx5_flow_counter_pool *pool;
4465         struct mlx5_counters tmp_tq;
4466         struct mlx5_devx_obj *dcs = NULL;
4467         struct mlx5_flow_counter *cnt;
4468         uint32_t i;
4469
4470         cont = MLX5_CNT_CONTAINER(priv->sh, batch, age);
4471         if (!batch) {
4472                 /* bulk_bitmap must be 0 for single counter allocation. */
4473                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4474                 if (!dcs)
4475                         return NULL;
4476                 pool = flow_dv_find_pool_by_id(cont, dcs->id);
4477                 if (!pool) {
4478                         pool = flow_dv_pool_create(dev, dcs, batch, age);
4479                         if (!pool) {
4480                                 mlx5_devx_cmd_destroy(dcs);
4481                                 return NULL;
4482                         }
4483                 } else if (dcs->id < pool->min_dcs->id) {
4484                         rte_atomic64_set(&pool->a64_dcs,
4485                                          (int64_t)(uintptr_t)dcs);
4486                 }
4487                 flow_dv_counter_update_min_dcs(dev,
4488                                                 pool, batch, age);
4489                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4490                 cnt = MLX5_POOL_GET_CNT(pool, i);
4491                 cnt->pool = pool;
4492                 MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
4493                 *cnt_free = cnt;
4494                 return pool;
4495         }
4496         /* bulk_bitmap is in 128 counters units. */
4497         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
4498                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4499         if (!dcs) {
4500                 rte_errno = ENODATA;
4501                 return NULL;
4502         }
4503         pool = flow_dv_pool_create(dev, dcs, batch, age);
4504         if (!pool) {
4505                 mlx5_devx_cmd_destroy(dcs);
4506                 return NULL;
4507         }
4508         TAILQ_INIT(&tmp_tq);
4509         for (i = 1; i < MLX5_COUNTERS_PER_POOL; ++i) {
4510                 cnt = MLX5_POOL_GET_CNT(pool, i);
4511                 cnt->pool = pool;
4512                 TAILQ_INSERT_HEAD(&tmp_tq, cnt, next);
4513         }
4514         rte_spinlock_lock(&cont->csl);
4515         TAILQ_CONCAT(&cont->counters, &tmp_tq, next);
4516         rte_spinlock_unlock(&cont->csl);
4517         *cnt_free = MLX5_POOL_GET_CNT(pool, 0);
4518         (*cnt_free)->pool = pool;
4519         return pool;
4520 }
4521
4522 /**
4523  * Search for existed shared counter.
4524  *
4525  * @param[in] dev
4526  *   Pointer to the Ethernet device structure.
4527  * @param[in] id
4528  *   The shared counter ID to search.
4529  * @param[out] ppool
4530  *   mlx5 flow counter pool in the container,
4531  *
4532  * @return
4533  *   NULL if not existed, otherwise pointer to the shared extend counter.
4534  */
4535 static struct mlx5_flow_counter_ext *
4536 flow_dv_counter_shared_search(struct rte_eth_dev *dev, uint32_t id,
4537                               struct mlx5_flow_counter_pool **ppool)
4538 {
4539         struct mlx5_priv *priv = dev->data->dev_private;
4540         union mlx5_l3t_data data;
4541         uint32_t cnt_idx;
4542
4543         if (mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) || !data.dword)
4544                 return NULL;
4545         cnt_idx = data.dword;
4546         /*
4547          * Shared counters don't have age info. The counter extend is after
4548          * the counter datat structure.
4549          */
4550         return (struct mlx5_flow_counter_ext *)
4551                ((flow_dv_counter_get_by_idx(dev, cnt_idx, ppool)) + 1);
4552 }
4553
4554 /**
4555  * Allocate a flow counter.
4556  *
4557  * @param[in] dev
4558  *   Pointer to the Ethernet device structure.
4559  * @param[in] shared
4560  *   Indicate if this counter is shared with other flows.
4561  * @param[in] id
4562  *   Counter identifier.
4563  * @param[in] group
4564  *   Counter flow group.
4565  * @param[in] age
4566  *   Whether the counter was allocated for aging.
4567  *
4568  * @return
4569  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4570  */
4571 static uint32_t
4572 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
4573                       uint16_t group, uint32_t age)
4574 {
4575         struct mlx5_priv *priv = dev->data->dev_private;
4576         struct mlx5_flow_counter_pool *pool = NULL;
4577         struct mlx5_flow_counter *cnt_free = NULL;
4578         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4579         /*
4580          * Currently group 0 flow counter cannot be assigned to a flow if it is
4581          * not the first one in the batch counter allocation, so it is better
4582          * to allocate counters one by one for these flows in a separate
4583          * container.
4584          * A counter can be shared between different groups so need to take
4585          * shared counters from the single container.
4586          */
4587         uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
4588         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4589                                                                age);
4590         uint32_t cnt_idx;
4591
4592         if (!priv->config.devx) {
4593                 rte_errno = ENOTSUP;
4594                 return 0;
4595         }
4596         if (shared) {
4597                 cnt_ext = flow_dv_counter_shared_search(dev, id, &pool);
4598                 if (cnt_ext) {
4599                         if (cnt_ext->ref_cnt + 1 == 0) {
4600                                 rte_errno = E2BIG;
4601                                 return 0;
4602                         }
4603                         cnt_ext->ref_cnt++;
4604                         cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
4605                                   (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
4606                                   + 1;
4607                         return cnt_idx;
4608                 }
4609         }
4610         /* Get free counters from container. */
4611         rte_spinlock_lock(&cont->csl);
4612         cnt_free = TAILQ_FIRST(&cont->counters);
4613         if (cnt_free)
4614                 TAILQ_REMOVE(&cont->counters, cnt_free, next);
4615         rte_spinlock_unlock(&cont->csl);
4616         if (!cnt_free && !flow_dv_counter_pool_prepare(dev, &cnt_free,
4617                                                        batch, age))
4618                 goto err;
4619         pool = cnt_free->pool;
4620         if (!batch)
4621                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
4622         /* Create a DV counter action only in the first time usage. */
4623         if (!cnt_free->action) {
4624                 uint16_t offset;
4625                 struct mlx5_devx_obj *dcs;
4626                 int ret;
4627
4628                 if (batch) {
4629                         offset = MLX5_CNT_ARRAY_IDX(pool, cnt_free);
4630                         dcs = pool->min_dcs;
4631                 } else {
4632                         offset = 0;
4633                         dcs = cnt_ext->dcs;
4634                 }
4635                 ret = mlx5_flow_os_create_flow_action_count(dcs->obj, offset,
4636                                                             &cnt_free->action);
4637                 if (ret) {
4638                         rte_errno = errno;
4639                         goto err;
4640                 }
4641         }
4642         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4643                                 MLX5_CNT_ARRAY_IDX(pool, cnt_free));
4644         cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
4645         cnt_idx += age * MLX5_CNT_AGE_OFFSET;
4646         /* Update the counter reset values. */
4647         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4648                                  &cnt_free->bytes))
4649                 goto err;
4650         if (cnt_ext) {
4651                 cnt_ext->shared = shared;
4652                 cnt_ext->ref_cnt = 1;
4653                 cnt_ext->id = id;
4654                 if (shared) {
4655                         union mlx5_l3t_data data;
4656
4657                         data.dword = cnt_idx;
4658                         if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data))
4659                                 return 0;
4660                 }
4661         }
4662         if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
4663                 /* Start the asynchronous batch query by the host thread. */
4664                 mlx5_set_query_alarm(priv->sh);
4665         return cnt_idx;
4666 err:
4667         if (cnt_free) {
4668                 cnt_free->pool = pool;
4669                 rte_spinlock_lock(&cont->csl);
4670                 TAILQ_INSERT_TAIL(&cont->counters, cnt_free, next);
4671                 rte_spinlock_unlock(&cont->csl);
4672         }
4673         return 0;
4674 }
4675
4676 /**
4677  * Get age param from counter index.
4678  *
4679  * @param[in] dev
4680  *   Pointer to the Ethernet device structure.
4681  * @param[in] counter
4682  *   Index to the counter handler.
4683  *
4684  * @return
4685  *   The aging parameter specified for the counter index.
4686  */
4687 static struct mlx5_age_param*
4688 flow_dv_counter_idx_get_age(struct rte_eth_dev *dev,
4689                                 uint32_t counter)
4690 {
4691         struct mlx5_flow_counter *cnt;
4692         struct mlx5_flow_counter_pool *pool = NULL;
4693
4694         flow_dv_counter_get_by_idx(dev, counter, &pool);
4695         counter = (counter - 1) % MLX5_COUNTERS_PER_POOL;
4696         cnt = MLX5_POOL_GET_CNT(pool, counter);
4697         return MLX5_CNT_TO_AGE(cnt);
4698 }
4699
4700 /**
4701  * Remove a flow counter from aged counter list.
4702  *
4703  * @param[in] dev
4704  *   Pointer to the Ethernet device structure.
4705  * @param[in] counter
4706  *   Index to the counter handler.
4707  * @param[in] cnt
4708  *   Pointer to the counter handler.
4709  */
4710 static void
4711 flow_dv_counter_remove_from_age(struct rte_eth_dev *dev,
4712                                 uint32_t counter, struct mlx5_flow_counter *cnt)
4713 {
4714         struct mlx5_age_info *age_info;
4715         struct mlx5_age_param *age_param;
4716         struct mlx5_priv *priv = dev->data->dev_private;
4717
4718         age_info = GET_PORT_AGE_INFO(priv);
4719         age_param = flow_dv_counter_idx_get_age(dev, counter);
4720         if (rte_atomic16_cmpset((volatile uint16_t *)
4721                         &age_param->state,
4722                         AGE_CANDIDATE, AGE_FREE)
4723                         != AGE_CANDIDATE) {
4724                 /**
4725                  * We need the lock even it is age timeout,
4726                  * since counter may still in process.
4727                  */
4728                 rte_spinlock_lock(&age_info->aged_sl);
4729                 TAILQ_REMOVE(&age_info->aged_counters, cnt, next);
4730                 rte_spinlock_unlock(&age_info->aged_sl);
4731         }
4732         rte_atomic16_set(&age_param->state, AGE_FREE);
4733 }
4734 /**
4735  * Release a flow counter.
4736  *
4737  * @param[in] dev
4738  *   Pointer to the Ethernet device structure.
4739  * @param[in] counter
4740  *   Index to the counter handler.
4741  */
4742 static void
4743 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
4744 {
4745         struct mlx5_priv *priv = dev->data->dev_private;
4746         struct mlx5_flow_counter_pool *pool = NULL;
4747         struct mlx5_flow_counter *cnt;
4748         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4749
4750         if (!counter)
4751                 return;
4752         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4753         MLX5_ASSERT(pool);
4754         if (counter < MLX5_CNT_BATCH_OFFSET) {
4755                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4756                 if (cnt_ext) {
4757                         if (--cnt_ext->ref_cnt)
4758                                 return;
4759                         if (cnt_ext->shared)
4760                                 mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl,
4761                                                      cnt_ext->id);
4762                 }
4763         }
4764         if (IS_AGE_POOL(pool))
4765                 flow_dv_counter_remove_from_age(dev, counter, cnt);
4766         cnt->pool = pool;
4767         /*
4768          * Put the counter back to list to be updated in none fallback mode.
4769          * Currently, we are using two list alternately, while one is in query,
4770          * add the freed counter to the other list based on the pool query_gen
4771          * value. After query finishes, add counter the list to the global
4772          * container counter list. The list changes while query starts. In
4773          * this case, lock will not be needed as query callback and release
4774          * function both operate with the different list.
4775          *
4776          */
4777         if (!priv->counter_fallback)
4778                 TAILQ_INSERT_TAIL(&pool->counters[pool->query_gen], cnt, next);
4779         else
4780                 TAILQ_INSERT_TAIL(&((MLX5_CNT_CONTAINER
4781                                   (priv->sh, 0, 0))->counters),
4782                                   cnt, next);
4783 }
4784
4785 /**
4786  * Verify the @p attributes will be correctly understood by the NIC and store
4787  * them in the @p flow if everything is correct.
4788  *
4789  * @param[in] dev
4790  *   Pointer to dev struct.
4791  * @param[in] attributes
4792  *   Pointer to flow attributes
4793  * @param[in] external
4794  *   This flow rule is created by request external to PMD.
4795  * @param[out] error
4796  *   Pointer to error structure.
4797  *
4798  * @return
4799  *   - 0 on success and non root table.
4800  *   - 1 on success and root table.
4801  *   - a negative errno value otherwise and rte_errno is set.
4802  */
4803 static int
4804 flow_dv_validate_attributes(struct rte_eth_dev *dev,
4805                             const struct rte_flow_attr *attributes,
4806                             bool external __rte_unused,
4807                             struct rte_flow_error *error)
4808 {
4809         struct mlx5_priv *priv = dev->data->dev_private;
4810         uint32_t priority_max = priv->config.flow_prio - 1;
4811         int ret = 0;
4812
4813 #ifndef HAVE_MLX5DV_DR
4814         if (attributes->group)
4815                 return rte_flow_error_set(error, ENOTSUP,
4816                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4817                                           NULL,
4818                                           "groups are not supported");
4819 #else
4820         uint32_t table = 0;
4821
4822         ret = mlx5_flow_group_to_table(attributes, external,
4823                                        attributes->group, !!priv->fdb_def_rule,
4824                                        &table, error);
4825         if (ret)
4826                 return ret;
4827         if (!table)
4828                 ret = MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
4829 #endif
4830         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
4831             attributes->priority >= priority_max)
4832                 return rte_flow_error_set(error, ENOTSUP,
4833                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4834                                           NULL,
4835                                           "priority out of range");
4836         if (attributes->transfer) {
4837                 if (!priv->config.dv_esw_en)
4838                         return rte_flow_error_set
4839                                 (error, ENOTSUP,
4840                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4841                                  "E-Switch dr is not supported");
4842                 if (!(priv->representor || priv->master))
4843                         return rte_flow_error_set
4844                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4845                                  NULL, "E-Switch configuration can only be"
4846                                  " done by a master or a representor device");
4847                 if (attributes->egress)
4848                         return rte_flow_error_set
4849                                 (error, ENOTSUP,
4850                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
4851                                  "egress is not supported");
4852         }
4853         if (!(attributes->egress ^ attributes->ingress))
4854                 return rte_flow_error_set(error, ENOTSUP,
4855                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4856                                           "must specify exactly one of "
4857                                           "ingress or egress");
4858         return ret;
4859 }
4860
4861 /**
4862  * Internal validation function. For validating both actions and items.
4863  *
4864  * @param[in] dev
4865  *   Pointer to the rte_eth_dev structure.
4866  * @param[in] attr
4867  *   Pointer to the flow attributes.
4868  * @param[in] items
4869  *   Pointer to the list of items.
4870  * @param[in] actions
4871  *   Pointer to the list of actions.
4872  * @param[in] external
4873  *   This flow rule is created by request external to PMD.
4874  * @param[in] hairpin
4875  *   Number of hairpin TX actions, 0 means classic flow.
4876  * @param[out] error
4877  *   Pointer to the error structure.
4878  *
4879  * @return
4880  *   0 on success, a negative errno value otherwise and rte_errno is set.
4881  */
4882 static int
4883 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4884                  const struct rte_flow_item items[],
4885                  const struct rte_flow_action actions[],
4886                  bool external, int hairpin, struct rte_flow_error *error)
4887 {
4888         int ret;
4889         uint64_t action_flags = 0;
4890         uint64_t item_flags = 0;
4891         uint64_t last_item = 0;
4892         uint8_t next_protocol = 0xff;
4893         uint16_t ether_type = 0;
4894         int actions_n = 0;
4895         uint8_t item_ipv6_proto = 0;
4896         const struct rte_flow_item *gre_item = NULL;
4897         const struct rte_flow_action_raw_decap *decap;
4898         const struct rte_flow_action_raw_encap *encap;
4899         const struct rte_flow_action_rss *rss;
4900         const struct rte_flow_item_tcp nic_tcp_mask = {
4901                 .hdr = {
4902                         .tcp_flags = 0xFF,
4903                         .src_port = RTE_BE16(UINT16_MAX),
4904                         .dst_port = RTE_BE16(UINT16_MAX),
4905                 }
4906         };
4907         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
4908                 .hdr = {
4909                         .src_addr = RTE_BE32(0xffffffff),
4910                         .dst_addr = RTE_BE32(0xffffffff),
4911                         .type_of_service = 0xff,
4912                         .next_proto_id = 0xff,
4913                         .time_to_live = 0xff,
4914                 },
4915         };
4916         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
4917                 .hdr = {
4918                         .src_addr =
4919                         "\xff\xff\xff\xff\xff\xff\xff\xff"
4920                         "\xff\xff\xff\xff\xff\xff\xff\xff",
4921                         .dst_addr =
4922                         "\xff\xff\xff\xff\xff\xff\xff\xff"
4923                         "\xff\xff\xff\xff\xff\xff\xff\xff",
4924                         .vtc_flow = RTE_BE32(0xffffffff),
4925                         .proto = 0xff,
4926                         .hop_limits = 0xff,
4927                 },
4928         };
4929         const struct rte_flow_item_ecpri nic_ecpri_mask = {
4930                 .hdr = {
4931                         .common = {
4932                                 .u32 =
4933                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
4934                                         .type = 0xFF,
4935                                         }).u32),
4936                         },
4937                         .dummy[0] = 0xffffffff,
4938                 },
4939         };
4940         struct mlx5_priv *priv = dev->data->dev_private;
4941         struct mlx5_dev_config *dev_conf = &priv->config;
4942         uint16_t queue_index = 0xFFFF;
4943         const struct rte_flow_item_vlan *vlan_m = NULL;
4944         int16_t rw_act_num = 0;
4945         uint64_t is_root;
4946
4947         if (items == NULL)
4948                 return -1;
4949         ret = flow_dv_validate_attributes(dev, attr, external, error);
4950         if (ret < 0)
4951                 return ret;
4952         is_root = (uint64_t)ret;
4953         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4954                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4955                 int type = items->type;
4956
4957                 if (!mlx5_flow_os_item_supported(type))
4958                         return rte_flow_error_set(error, ENOTSUP,
4959                                                   RTE_FLOW_ERROR_TYPE_ITEM,
4960                                                   NULL, "item not supported");
4961                 switch (type) {
4962                 case RTE_FLOW_ITEM_TYPE_VOID:
4963                         break;
4964                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4965                         ret = flow_dv_validate_item_port_id
4966                                         (dev, items, attr, item_flags, error);
4967                         if (ret < 0)
4968                                 return ret;
4969                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4970                         break;
4971                 case RTE_FLOW_ITEM_TYPE_ETH:
4972                         ret = mlx5_flow_validate_item_eth(items, item_flags,
4973                                                           error);
4974                         if (ret < 0)
4975                                 return ret;
4976                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4977                                              MLX5_FLOW_LAYER_OUTER_L2;
4978                         if (items->mask != NULL && items->spec != NULL) {
4979                                 ether_type =
4980                                         ((const struct rte_flow_item_eth *)
4981                                          items->spec)->type;
4982                                 ether_type &=
4983                                         ((const struct rte_flow_item_eth *)
4984                                          items->mask)->type;
4985                                 ether_type = rte_be_to_cpu_16(ether_type);
4986                         } else {
4987                                 ether_type = 0;
4988                         }
4989                         break;
4990                 case RTE_FLOW_ITEM_TYPE_VLAN:
4991                         ret = flow_dv_validate_item_vlan(items, item_flags,
4992                                                          dev, error);
4993                         if (ret < 0)
4994                                 return ret;
4995                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
4996                                              MLX5_FLOW_LAYER_OUTER_VLAN;
4997                         if (items->mask != NULL && items->spec != NULL) {
4998                                 ether_type =
4999                                         ((const struct rte_flow_item_vlan *)
5000                                          items->spec)->inner_type;
5001                                 ether_type &=
5002                                         ((const struct rte_flow_item_vlan *)
5003                                          items->mask)->inner_type;
5004                                 ether_type = rte_be_to_cpu_16(ether_type);
5005                         } else {
5006                                 ether_type = 0;
5007                         }
5008                         /* Store outer VLAN mask for of_push_vlan action. */
5009                         if (!tunnel)
5010                                 vlan_m = items->mask;
5011                         break;
5012                 case RTE_FLOW_ITEM_TYPE_IPV4:
5013                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5014                                                   &item_flags, &tunnel);
5015                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
5016                                                            last_item,
5017                                                            ether_type,
5018                                                            &nic_ipv4_mask,
5019                                                            error);
5020                         if (ret < 0)
5021                                 return ret;
5022                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5023                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5024                         if (items->mask != NULL &&
5025                             ((const struct rte_flow_item_ipv4 *)
5026                              items->mask)->hdr.next_proto_id) {
5027                                 next_protocol =
5028                                         ((const struct rte_flow_item_ipv4 *)
5029                                          (items->spec))->hdr.next_proto_id;
5030                                 next_protocol &=
5031                                         ((const struct rte_flow_item_ipv4 *)
5032                                          (items->mask))->hdr.next_proto_id;
5033                         } else {
5034                                 /* Reset for inner layer. */
5035                                 next_protocol = 0xff;
5036                         }
5037                         break;
5038                 case RTE_FLOW_ITEM_TYPE_IPV6:
5039                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5040                                                   &item_flags, &tunnel);
5041                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
5042                                                            last_item,
5043                                                            ether_type,
5044                                                            &nic_ipv6_mask,
5045                                                            error);
5046                         if (ret < 0)
5047                                 return ret;
5048                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5049                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5050                         if (items->mask != NULL &&
5051                             ((const struct rte_flow_item_ipv6 *)
5052                              items->mask)->hdr.proto) {
5053                                 item_ipv6_proto =
5054                                         ((const struct rte_flow_item_ipv6 *)
5055                                          items->spec)->hdr.proto;
5056                                 next_protocol =
5057                                         ((const struct rte_flow_item_ipv6 *)
5058                                          items->spec)->hdr.proto;
5059                                 next_protocol &=
5060                                         ((const struct rte_flow_item_ipv6 *)
5061                                          items->mask)->hdr.proto;
5062                         } else {
5063                                 /* Reset for inner layer. */
5064                                 next_protocol = 0xff;
5065                         }
5066                         break;
5067                 case RTE_FLOW_ITEM_TYPE_TCP:
5068                         ret = mlx5_flow_validate_item_tcp
5069                                                 (items, item_flags,
5070                                                  next_protocol,
5071                                                  &nic_tcp_mask,
5072                                                  error);
5073                         if (ret < 0)
5074                                 return ret;
5075                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5076                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5077                         break;
5078                 case RTE_FLOW_ITEM_TYPE_UDP:
5079                         ret = mlx5_flow_validate_item_udp(items, item_flags,
5080                                                           next_protocol,
5081                                                           error);
5082                         if (ret < 0)
5083                                 return ret;
5084                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5085                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5086                         break;
5087                 case RTE_FLOW_ITEM_TYPE_GRE:
5088                         ret = mlx5_flow_validate_item_gre(items, item_flags,
5089                                                           next_protocol, error);
5090                         if (ret < 0)
5091                                 return ret;
5092                         gre_item = items;
5093                         last_item = MLX5_FLOW_LAYER_GRE;
5094                         break;
5095                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5096                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
5097                                                             next_protocol,
5098                                                             error);
5099                         if (ret < 0)
5100                                 return ret;
5101                         last_item = MLX5_FLOW_LAYER_NVGRE;
5102                         break;
5103                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5104                         ret = mlx5_flow_validate_item_gre_key
5105                                 (items, item_flags, gre_item, error);
5106                         if (ret < 0)
5107                                 return ret;
5108                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5109                         break;
5110                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5111                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
5112                                                             error);
5113                         if (ret < 0)
5114                                 return ret;
5115                         last_item = MLX5_FLOW_LAYER_VXLAN;
5116                         break;
5117                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5118                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
5119                                                                 item_flags, dev,
5120                                                                 error);
5121                         if (ret < 0)
5122                                 return ret;
5123                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5124                         break;
5125                 case RTE_FLOW_ITEM_TYPE_GENEVE:
5126                         ret = mlx5_flow_validate_item_geneve(items,
5127                                                              item_flags, dev,
5128                                                              error);
5129                         if (ret < 0)
5130                                 return ret;
5131                         last_item = MLX5_FLOW_LAYER_GENEVE;
5132                         break;
5133                 case RTE_FLOW_ITEM_TYPE_MPLS:
5134                         ret = mlx5_flow_validate_item_mpls(dev, items,
5135                                                            item_flags,
5136                                                            last_item, error);
5137                         if (ret < 0)
5138                                 return ret;
5139                         last_item = MLX5_FLOW_LAYER_MPLS;
5140                         break;
5141
5142                 case RTE_FLOW_ITEM_TYPE_MARK:
5143                         ret = flow_dv_validate_item_mark(dev, items, attr,
5144                                                          error);
5145                         if (ret < 0)
5146                                 return ret;
5147                         last_item = MLX5_FLOW_ITEM_MARK;
5148                         break;
5149                 case RTE_FLOW_ITEM_TYPE_META:
5150                         ret = flow_dv_validate_item_meta(dev, items, attr,
5151                                                          error);
5152                         if (ret < 0)
5153                                 return ret;
5154                         last_item = MLX5_FLOW_ITEM_METADATA;
5155                         break;
5156                 case RTE_FLOW_ITEM_TYPE_ICMP:
5157                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
5158                                                            next_protocol,
5159                                                            error);
5160                         if (ret < 0)
5161                                 return ret;
5162                         last_item = MLX5_FLOW_LAYER_ICMP;
5163                         break;
5164                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5165                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
5166                                                             next_protocol,
5167                                                             error);
5168                         if (ret < 0)
5169                                 return ret;
5170                         item_ipv6_proto = IPPROTO_ICMPV6;
5171                         last_item = MLX5_FLOW_LAYER_ICMP6;
5172                         break;
5173                 case RTE_FLOW_ITEM_TYPE_TAG:
5174                         ret = flow_dv_validate_item_tag(dev, items,
5175                                                         attr, error);
5176                         if (ret < 0)
5177                                 return ret;
5178                         last_item = MLX5_FLOW_ITEM_TAG;
5179                         break;
5180                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
5181                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
5182                         break;
5183                 case RTE_FLOW_ITEM_TYPE_GTP:
5184                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
5185                                                         error);
5186                         if (ret < 0)
5187                                 return ret;
5188                         last_item = MLX5_FLOW_LAYER_GTP;
5189                         break;
5190                 case RTE_FLOW_ITEM_TYPE_ECPRI:
5191                         /* Capacity will be checked in the translate stage. */
5192                         ret = mlx5_flow_validate_item_ecpri(items, item_flags,
5193                                                             last_item,
5194                                                             ether_type,
5195                                                             &nic_ecpri_mask,
5196                                                             error);
5197                         if (ret < 0)
5198                                 return ret;
5199                         last_item = MLX5_FLOW_LAYER_ECPRI;
5200                         break;
5201                 default:
5202                         return rte_flow_error_set(error, ENOTSUP,
5203                                                   RTE_FLOW_ERROR_TYPE_ITEM,
5204                                                   NULL, "item not supported");
5205                 }
5206                 item_flags |= last_item;
5207         }
5208         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5209                 int type = actions->type;
5210
5211                 if (!mlx5_flow_os_action_supported(type))
5212                         return rte_flow_error_set(error, ENOTSUP,
5213                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5214                                                   actions,
5215                                                   "action not supported");
5216                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
5217                         return rte_flow_error_set(error, ENOTSUP,
5218                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5219                                                   actions, "too many actions");
5220                 switch (type) {
5221                 case RTE_FLOW_ACTION_TYPE_VOID:
5222                         break;
5223                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5224                         ret = flow_dv_validate_action_port_id(dev,
5225                                                               action_flags,
5226                                                               actions,
5227                                                               attr,
5228                                                               error);
5229                         if (ret)
5230                                 return ret;
5231                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5232                         ++actions_n;
5233                         break;
5234                 case RTE_FLOW_ACTION_TYPE_FLAG:
5235                         ret = flow_dv_validate_action_flag(dev, action_flags,
5236                                                            attr, error);
5237                         if (ret < 0)
5238                                 return ret;
5239                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5240                                 /* Count all modify-header actions as one. */
5241                                 if (!(action_flags &
5242                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5243                                         ++actions_n;
5244                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
5245                                                 MLX5_FLOW_ACTION_MARK_EXT;
5246                         } else {
5247                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
5248                                 ++actions_n;
5249                         }
5250                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5251                         break;
5252                 case RTE_FLOW_ACTION_TYPE_MARK:
5253                         ret = flow_dv_validate_action_mark(dev, actions,
5254                                                            action_flags,
5255                                                            attr, error);
5256                         if (ret < 0)
5257                                 return ret;
5258                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
5259                                 /* Count all modify-header actions as one. */
5260                                 if (!(action_flags &
5261                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
5262                                         ++actions_n;
5263                                 action_flags |= MLX5_FLOW_ACTION_MARK |
5264                                                 MLX5_FLOW_ACTION_MARK_EXT;
5265                         } else {
5266                                 action_flags |= MLX5_FLOW_ACTION_MARK;
5267                                 ++actions_n;
5268                         }
5269                         rw_act_num += MLX5_ACT_NUM_SET_MARK;
5270                         break;
5271                 case RTE_FLOW_ACTION_TYPE_SET_META:
5272                         ret = flow_dv_validate_action_set_meta(dev, actions,
5273                                                                action_flags,
5274                                                                attr, error);
5275                         if (ret < 0)
5276                                 return ret;
5277                         /* Count all modify-header actions as one action. */
5278                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5279                                 ++actions_n;
5280                         action_flags |= MLX5_FLOW_ACTION_SET_META;
5281                         rw_act_num += MLX5_ACT_NUM_SET_META;
5282                         break;
5283                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
5284                         ret = flow_dv_validate_action_set_tag(dev, actions,
5285                                                               action_flags,
5286                                                               attr, error);
5287                         if (ret < 0)
5288                                 return ret;
5289                         /* Count all modify-header actions as one action. */
5290                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5291                                 ++actions_n;
5292                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5293                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5294                         break;
5295                 case RTE_FLOW_ACTION_TYPE_DROP:
5296                         ret = mlx5_flow_validate_action_drop(action_flags,
5297                                                              attr, error);
5298                         if (ret < 0)
5299                                 return ret;
5300                         action_flags |= MLX5_FLOW_ACTION_DROP;
5301                         ++actions_n;
5302                         break;
5303                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5304                         ret = mlx5_flow_validate_action_queue(actions,
5305                                                               action_flags, dev,
5306                                                               attr, error);
5307                         if (ret < 0)
5308                                 return ret;
5309                         queue_index = ((const struct rte_flow_action_queue *)
5310                                                         (actions->conf))->index;
5311                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5312                         ++actions_n;
5313                         break;
5314                 case RTE_FLOW_ACTION_TYPE_RSS:
5315                         rss = actions->conf;
5316                         ret = mlx5_flow_validate_action_rss(actions,
5317                                                             action_flags, dev,
5318                                                             attr, item_flags,
5319                                                             error);
5320                         if (ret < 0)
5321                                 return ret;
5322                         if (rss != NULL && rss->queue_num)
5323                                 queue_index = rss->queue[0];
5324                         action_flags |= MLX5_FLOW_ACTION_RSS;
5325                         ++actions_n;
5326                         break;
5327                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
5328                         ret =
5329                         mlx5_flow_validate_action_default_miss(action_flags,
5330                                         attr, error);
5331                         if (ret < 0)
5332                                 return ret;
5333                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
5334                         ++actions_n;
5335                         break;
5336                 case RTE_FLOW_ACTION_TYPE_COUNT:
5337                         ret = flow_dv_validate_action_count(dev, error);
5338                         if (ret < 0)
5339                                 return ret;
5340                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5341                         ++actions_n;
5342                         break;
5343                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5344                         if (flow_dv_validate_action_pop_vlan(dev,
5345                                                              action_flags,
5346                                                              actions,
5347                                                              item_flags, attr,
5348                                                              error))
5349                                 return -rte_errno;
5350                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5351                         ++actions_n;
5352                         break;
5353                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5354                         ret = flow_dv_validate_action_push_vlan(dev,
5355                                                                 action_flags,
5356                                                                 vlan_m,
5357                                                                 actions, attr,
5358                                                                 error);
5359                         if (ret < 0)
5360                                 return ret;
5361                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5362                         ++actions_n;
5363                         break;
5364                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5365                         ret = flow_dv_validate_action_set_vlan_pcp
5366                                                 (action_flags, actions, error);
5367                         if (ret < 0)
5368                                 return ret;
5369                         /* Count PCP with push_vlan command. */
5370                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
5371                         break;
5372                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5373                         ret = flow_dv_validate_action_set_vlan_vid
5374                                                 (item_flags, action_flags,
5375                                                  actions, error);
5376                         if (ret < 0)
5377                                 return ret;
5378                         /* Count VID with push_vlan command. */
5379                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5380                         rw_act_num += MLX5_ACT_NUM_MDF_VID;
5381                         break;
5382                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5383                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5384                         ret = flow_dv_validate_action_l2_encap(dev,
5385                                                                action_flags,
5386                                                                actions, attr,
5387                                                                error);
5388                         if (ret < 0)
5389                                 return ret;
5390                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
5391                         ++actions_n;
5392                         break;
5393                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5394                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5395                         ret = flow_dv_validate_action_decap(dev, action_flags,
5396                                                             attr, error);
5397                         if (ret < 0)
5398                                 return ret;
5399                         action_flags |= MLX5_FLOW_ACTION_DECAP;
5400                         ++actions_n;
5401                         break;
5402                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5403                         ret = flow_dv_validate_action_raw_encap_decap
5404                                 (dev, NULL, actions->conf, attr, &action_flags,
5405                                  &actions_n, error);
5406                         if (ret < 0)
5407                                 return ret;
5408                         break;
5409                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5410                         decap = actions->conf;
5411                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
5412                                 ;
5413                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5414                                 encap = NULL;
5415                                 actions--;
5416                         } else {
5417                                 encap = actions->conf;
5418                         }
5419                         ret = flow_dv_validate_action_raw_encap_decap
5420                                            (dev,
5421                                             decap ? decap : &empty_decap, encap,
5422                                             attr, &action_flags, &actions_n,
5423                                             error);
5424                         if (ret < 0)
5425                                 return ret;
5426                         break;
5427                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5428                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5429                         ret = flow_dv_validate_action_modify_mac(action_flags,
5430                                                                  actions,
5431                                                                  item_flags,
5432                                                                  error);
5433                         if (ret < 0)
5434                                 return ret;
5435                         /* Count all modify-header actions as one action. */
5436                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5437                                 ++actions_n;
5438                         action_flags |= actions->type ==
5439                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5440                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5441                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5442                         /*
5443                          * Even if the source and destination MAC addresses have
5444                          * overlap in the header with 4B alignment, the convert
5445                          * function will handle them separately and 4 SW actions
5446                          * will be created. And 2 actions will be added each
5447                          * time no matter how many bytes of address will be set.
5448                          */
5449                         rw_act_num += MLX5_ACT_NUM_MDF_MAC;
5450                         break;
5451                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5452                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5453                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5454                                                                   actions,
5455                                                                   item_flags,
5456                                                                   error);
5457                         if (ret < 0)
5458                                 return ret;
5459                         /* Count all modify-header actions as one action. */
5460                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5461                                 ++actions_n;
5462                         action_flags |= actions->type ==
5463                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5464                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5465                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5466                         rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
5467                         break;
5468                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5469                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5470                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5471                                                                   actions,
5472                                                                   item_flags,
5473                                                                   error);
5474                         if (ret < 0)
5475                                 return ret;
5476                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5477                                 return rte_flow_error_set(error, ENOTSUP,
5478                                         RTE_FLOW_ERROR_TYPE_ACTION,
5479                                         actions,
5480                                         "Can't change header "
5481                                         "with ICMPv6 proto");
5482                         /* Count all modify-header actions as one action. */
5483                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5484                                 ++actions_n;
5485                         action_flags |= actions->type ==
5486                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5487                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5488                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5489                         rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
5490                         break;
5491                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5492                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5493                         ret = flow_dv_validate_action_modify_tp(action_flags,
5494                                                                 actions,
5495                                                                 item_flags,
5496                                                                 error);
5497                         if (ret < 0)
5498                                 return ret;
5499                         /* Count all modify-header actions as one action. */
5500                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5501                                 ++actions_n;
5502                         action_flags |= actions->type ==
5503                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5504                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5505                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5506                         rw_act_num += MLX5_ACT_NUM_MDF_PORT;
5507                         break;
5508                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5509                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5510                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5511                                                                  actions,
5512                                                                  item_flags,
5513                                                                  error);
5514                         if (ret < 0)
5515                                 return ret;
5516                         /* Count all modify-header actions as one action. */
5517                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5518                                 ++actions_n;
5519                         action_flags |= actions->type ==
5520                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5521                                                 MLX5_FLOW_ACTION_SET_TTL :
5522                                                 MLX5_FLOW_ACTION_DEC_TTL;
5523                         rw_act_num += MLX5_ACT_NUM_MDF_TTL;
5524                         break;
5525                 case RTE_FLOW_ACTION_TYPE_JUMP:
5526                         ret = flow_dv_validate_action_jump(actions,
5527                                                            action_flags,
5528                                                            attr, external,
5529                                                            error);
5530                         if (ret)
5531                                 return ret;
5532                         ++actions_n;
5533                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5534                         break;
5535                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5536                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5537                         ret = flow_dv_validate_action_modify_tcp_seq
5538                                                                 (action_flags,
5539                                                                  actions,
5540                                                                  item_flags,
5541                                                                  error);
5542                         if (ret < 0)
5543                                 return ret;
5544                         /* Count all modify-header actions as one action. */
5545                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5546                                 ++actions_n;
5547                         action_flags |= actions->type ==
5548                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5549                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5550                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5551                         rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
5552                         break;
5553                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5554                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5555                         ret = flow_dv_validate_action_modify_tcp_ack
5556                                                                 (action_flags,
5557                                                                  actions,
5558                                                                  item_flags,
5559                                                                  error);
5560                         if (ret < 0)
5561                                 return ret;
5562                         /* Count all modify-header actions as one action. */
5563                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5564                                 ++actions_n;
5565                         action_flags |= actions->type ==
5566                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5567                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5568                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5569                         rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
5570                         break;
5571                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5572                         break;
5573                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5574                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5575                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5576                         break;
5577                 case RTE_FLOW_ACTION_TYPE_METER:
5578                         ret = mlx5_flow_validate_action_meter(dev,
5579                                                               action_flags,
5580                                                               actions, attr,
5581                                                               error);
5582                         if (ret < 0)
5583                                 return ret;
5584                         action_flags |= MLX5_FLOW_ACTION_METER;
5585                         ++actions_n;
5586                         /* Meter action will add one more TAG action. */
5587                         rw_act_num += MLX5_ACT_NUM_SET_TAG;
5588                         break;
5589                 case RTE_FLOW_ACTION_TYPE_AGE:
5590                         ret = flow_dv_validate_action_age(action_flags,
5591                                                           actions, dev,
5592                                                           error);
5593                         if (ret < 0)
5594                                 return ret;
5595                         action_flags |= MLX5_FLOW_ACTION_AGE;
5596                         ++actions_n;
5597                         break;
5598                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5599                         ret = flow_dv_validate_action_modify_ipv4_dscp
5600                                                          (action_flags,
5601                                                           actions,
5602                                                           item_flags,
5603                                                           error);
5604                         if (ret < 0)
5605                                 return ret;
5606                         /* Count all modify-header actions as one action. */
5607                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5608                                 ++actions_n;
5609                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5610                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5611                         break;
5612                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5613                         ret = flow_dv_validate_action_modify_ipv6_dscp
5614                                                                 (action_flags,
5615                                                                  actions,
5616                                                                  item_flags,
5617                                                                  error);
5618                         if (ret < 0)
5619                                 return ret;
5620                         /* Count all modify-header actions as one action. */
5621                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5622                                 ++actions_n;
5623                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5624                         rw_act_num += MLX5_ACT_NUM_SET_DSCP;
5625                         break;
5626                 default:
5627                         return rte_flow_error_set(error, ENOTSUP,
5628                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5629                                                   actions,
5630                                                   "action not supported");
5631                 }
5632         }
5633         /*
5634          * Validate the drop action mutual exclusion with other actions.
5635          * Drop action is mutually-exclusive with any other action, except for
5636          * Count action.
5637          */
5638         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
5639             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
5640                 return rte_flow_error_set(error, EINVAL,
5641                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5642                                           "Drop action is mutually-exclusive "
5643                                           "with any other action, except for "
5644                                           "Count action");
5645         /* Eswitch has few restrictions on using items and actions */
5646         if (attr->transfer) {
5647                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5648                     action_flags & MLX5_FLOW_ACTION_FLAG)
5649                         return rte_flow_error_set(error, ENOTSUP,
5650                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5651                                                   NULL,
5652                                                   "unsupported action FLAG");
5653                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5654                     action_flags & MLX5_FLOW_ACTION_MARK)
5655                         return rte_flow_error_set(error, ENOTSUP,
5656                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5657                                                   NULL,
5658                                                   "unsupported action MARK");
5659                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
5660                         return rte_flow_error_set(error, ENOTSUP,
5661                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5662                                                   NULL,
5663                                                   "unsupported action QUEUE");
5664                 if (action_flags & MLX5_FLOW_ACTION_RSS)
5665                         return rte_flow_error_set(error, ENOTSUP,
5666                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5667                                                   NULL,
5668                                                   "unsupported action RSS");
5669                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5670                         return rte_flow_error_set(error, EINVAL,
5671                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5672                                                   actions,
5673                                                   "no fate action is found");
5674         } else {
5675                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
5676                         return rte_flow_error_set(error, EINVAL,
5677                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5678                                                   actions,
5679                                                   "no fate action is found");
5680         }
5681         /* Continue validation for Xcap actions.*/
5682         if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
5683             mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5684                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5685                     MLX5_FLOW_XCAP_ACTIONS)
5686                         return rte_flow_error_set(error, ENOTSUP,
5687                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5688                                                   NULL, "encap and decap "
5689                                                   "combination aren't supported");
5690                 if (!attr->transfer && attr->ingress && (action_flags &
5691                                                         MLX5_FLOW_ACTION_ENCAP))
5692                         return rte_flow_error_set(error, ENOTSUP,
5693                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5694                                                   NULL, "encap is not supported"
5695                                                   " for ingress traffic");
5696         }
5697         /* Hairpin flow will add one more TAG action. */
5698         if (hairpin > 0)
5699                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5700         /* extra metadata enabled: one more TAG action will be add. */
5701         if (dev_conf->dv_flow_en &&
5702             dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
5703             mlx5_flow_ext_mreg_supported(dev))
5704                 rw_act_num += MLX5_ACT_NUM_SET_TAG;
5705         if ((uint32_t)rw_act_num >
5706                         flow_dv_modify_hdr_action_max(dev, is_root)) {
5707                 return rte_flow_error_set(error, ENOTSUP,
5708                                           RTE_FLOW_ERROR_TYPE_ACTION,
5709                                           NULL, "too many header modify"
5710                                           " actions to support");
5711         }
5712         return 0;
5713 }
5714
5715 /**
5716  * Internal preparation function. Allocates the DV flow size,
5717  * this size is constant.
5718  *
5719  * @param[in] dev
5720  *   Pointer to the rte_eth_dev structure.
5721  * @param[in] attr
5722  *   Pointer to the flow attributes.
5723  * @param[in] items
5724  *   Pointer to the list of items.
5725  * @param[in] actions
5726  *   Pointer to the list of actions.
5727  * @param[out] error
5728  *   Pointer to the error structure.
5729  *
5730  * @return
5731  *   Pointer to mlx5_flow object on success,
5732  *   otherwise NULL and rte_errno is set.
5733  */
5734 static struct mlx5_flow *
5735 flow_dv_prepare(struct rte_eth_dev *dev,
5736                 const struct rte_flow_attr *attr __rte_unused,
5737                 const struct rte_flow_item items[] __rte_unused,
5738                 const struct rte_flow_action actions[] __rte_unused,
5739                 struct rte_flow_error *error)
5740 {
5741         uint32_t handle_idx = 0;
5742         struct mlx5_flow *dev_flow;
5743         struct mlx5_flow_handle *dev_handle;
5744         struct mlx5_priv *priv = dev->data->dev_private;
5745
5746         /* In case of corrupting the memory. */
5747         if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
5748                 rte_flow_error_set(error, ENOSPC,
5749                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5750                                    "not free temporary device flow");
5751                 return NULL;
5752         }
5753         dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
5754                                    &handle_idx);
5755         if (!dev_handle) {
5756                 rte_flow_error_set(error, ENOMEM,
5757                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5758                                    "not enough memory to create flow handle");
5759                 return NULL;
5760         }
5761         /* No multi-thread supporting. */
5762         dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
5763         dev_flow->handle = dev_handle;
5764         dev_flow->handle_idx = handle_idx;
5765         /*
5766          * In some old rdma-core releases, before continuing, a check of the
5767          * length of matching parameter will be done at first. It needs to use
5768          * the length without misc4 param. If the flow has misc4 support, then
5769          * the length needs to be adjusted accordingly. Each param member is
5770          * aligned with a 64B boundary naturally.
5771          */
5772         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
5773                                   MLX5_ST_SZ_BYTES(fte_match_set_misc4);
5774         /*
5775          * The matching value needs to be cleared to 0 before using. In the
5776          * past, it will be automatically cleared when using rte_*alloc
5777          * API. The time consumption will be almost the same as before.
5778          */
5779         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
5780         dev_flow->ingress = attr->ingress;
5781         dev_flow->dv.transfer = attr->transfer;
5782         return dev_flow;
5783 }
5784
5785 #ifdef RTE_LIBRTE_MLX5_DEBUG
5786 /**
5787  * Sanity check for match mask and value. Similar to check_valid_spec() in
5788  * kernel driver. If unmasked bit is present in value, it returns failure.
5789  *
5790  * @param match_mask
5791  *   pointer to match mask buffer.
5792  * @param match_value
5793  *   pointer to match value buffer.
5794  *
5795  * @return
5796  *   0 if valid, -EINVAL otherwise.
5797  */
5798 static int
5799 flow_dv_check_valid_spec(void *match_mask, void *match_value)
5800 {
5801         uint8_t *m = match_mask;
5802         uint8_t *v = match_value;
5803         unsigned int i;
5804
5805         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
5806                 if (v[i] & ~m[i]) {
5807                         DRV_LOG(ERR,
5808                                 "match_value differs from match_criteria"
5809                                 " %p[%u] != %p[%u]",
5810                                 match_value, i, match_mask, i);
5811                         return -EINVAL;
5812                 }
5813         }
5814         return 0;
5815 }
5816 #endif
5817
5818 /**
5819  * Add match of ip_version.
5820  *
5821  * @param[in] group
5822  *   Flow group.
5823  * @param[in] headers_v
5824  *   Values header pointer.
5825  * @param[in] headers_m
5826  *   Masks header pointer.
5827  * @param[in] ip_version
5828  *   The IP version to set.
5829  */
5830 static inline void
5831 flow_dv_set_match_ip_version(uint32_t group,
5832                              void *headers_v,
5833                              void *headers_m,
5834                              uint8_t ip_version)
5835 {
5836         if (group == 0)
5837                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5838         else
5839                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
5840                          ip_version);
5841         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
5842         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
5843         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
5844 }
5845
5846 /**
5847  * Add Ethernet item to matcher and to the value.
5848  *
5849  * @param[in, out] matcher
5850  *   Flow matcher.
5851  * @param[in, out] key
5852  *   Flow matcher value.
5853  * @param[in] item
5854  *   Flow pattern to translate.
5855  * @param[in] inner
5856  *   Item is inner pattern.
5857  */
5858 static void
5859 flow_dv_translate_item_eth(void *matcher, void *key,
5860                            const struct rte_flow_item *item, int inner,
5861                            uint32_t group)
5862 {
5863         const struct rte_flow_item_eth *eth_m = item->mask;
5864         const struct rte_flow_item_eth *eth_v = item->spec;
5865         const struct rte_flow_item_eth nic_mask = {
5866                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5867                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5868                 .type = RTE_BE16(0xffff),
5869         };
5870         void *headers_m;
5871         void *headers_v;
5872         char *l24_v;
5873         unsigned int i;
5874
5875         if (!eth_v)
5876                 return;
5877         if (!eth_m)
5878                 eth_m = &nic_mask;
5879         if (inner) {
5880                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5881                                          inner_headers);
5882                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5883         } else {
5884                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5885                                          outer_headers);
5886                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5887         }
5888         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
5889                &eth_m->dst, sizeof(eth_m->dst));
5890         /* The value must be in the range of the mask. */
5891         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
5892         for (i = 0; i < sizeof(eth_m->dst); ++i)
5893                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
5894         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
5895                &eth_m->src, sizeof(eth_m->src));
5896         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
5897         /* The value must be in the range of the mask. */
5898         for (i = 0; i < sizeof(eth_m->dst); ++i)
5899                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
5900         if (eth_v->type) {
5901                 /* When ethertype is present set mask for tagged VLAN. */
5902                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5903                 /* Set value for tagged VLAN if ethertype is 802.1Q. */
5904                 if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
5905                     eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
5906                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
5907                                  1);
5908                         /* Return here to avoid setting match on ethertype. */
5909                         return;
5910                 }
5911         }
5912         /*
5913          * HW supports match on one Ethertype, the Ethertype following the last
5914          * VLAN tag of the packet (see PRM).
5915          * Set match on ethertype only if ETH header is not followed by VLAN.
5916          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
5917          * ethertype, and use ip_version field instead.
5918          * eCPRI over Ether layer will use type value 0xAEFE.
5919          */
5920         if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
5921             eth_m->type == 0xFFFF) {
5922                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
5923         } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
5924                    eth_m->type == 0xFFFF) {
5925                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
5926         } else {
5927                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5928                          rte_be_to_cpu_16(eth_m->type));
5929                 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5930                                      ethertype);
5931                 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
5932         }
5933 }
5934
5935 /**
5936  * Add VLAN item to matcher and to the value.
5937  *
5938  * @param[in, out] dev_flow
5939  *   Flow descriptor.
5940  * @param[in, out] matcher
5941  *   Flow matcher.
5942  * @param[in, out] key
5943  *   Flow matcher value.
5944  * @param[in] item
5945  *   Flow pattern to translate.
5946  * @param[in] inner
5947  *   Item is inner pattern.
5948  */
5949 static void
5950 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
5951                             void *matcher, void *key,
5952                             const struct rte_flow_item *item,
5953                             int inner, uint32_t group)
5954 {
5955         const struct rte_flow_item_vlan *vlan_m = item->mask;
5956         const struct rte_flow_item_vlan *vlan_v = item->spec;
5957         void *headers_m;
5958         void *headers_v;
5959         uint16_t tci_m;
5960         uint16_t tci_v;
5961
5962         if (inner) {
5963                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5964                                          inner_headers);
5965                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5966         } else {
5967                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5968                                          outer_headers);
5969                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5970                 /*
5971                  * This is workaround, masks are not supported,
5972                  * and pre-validated.
5973                  */
5974                 if (vlan_v)
5975                         dev_flow->handle->vf_vlan.tag =
5976                                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
5977         }
5978         /*
5979          * When VLAN item exists in flow, mark packet as tagged,
5980          * even if TCI is not specified.
5981          */
5982         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5983         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
5984         if (!vlan_v)
5985                 return;
5986         if (!vlan_m)
5987                 vlan_m = &rte_flow_item_vlan_mask;
5988         tci_m = rte_be_to_cpu_16(vlan_m->tci);
5989         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
5990         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
5991         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
5992         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
5993         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
5994         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
5995         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
5996         /*
5997          * HW is optimized for IPv4/IPv6. In such cases, avoid setting
5998          * ethertype, and use ip_version field instead.
5999          */
6000         if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
6001             vlan_m->inner_type == 0xFFFF) {
6002                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6003         } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
6004                    vlan_m->inner_type == 0xFFFF) {
6005                 flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6006         } else {
6007                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
6008                          rte_be_to_cpu_16(vlan_m->inner_type));
6009                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
6010                          rte_be_to_cpu_16(vlan_m->inner_type &
6011                                           vlan_v->inner_type));
6012         }
6013 }
6014
6015 /**
6016  * Add IPV4 item to matcher and to the value.
6017  *
6018  * @param[in, out] matcher
6019  *   Flow matcher.
6020  * @param[in, out] key
6021  *   Flow matcher value.
6022  * @param[in] item
6023  *   Flow pattern to translate.
6024  * @param[in] item_flags
6025  *   Bit-fields that holds the items detected until now.
6026  * @param[in] inner
6027  *   Item is inner pattern.
6028  * @param[in] group
6029  *   The group to insert the rule.
6030  */
6031 static void
6032 flow_dv_translate_item_ipv4(void *matcher, void *key,
6033                             const struct rte_flow_item *item,
6034                             const uint64_t item_flags,
6035                             int inner, uint32_t group)
6036 {
6037         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
6038         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
6039         const struct rte_flow_item_ipv4 nic_mask = {
6040                 .hdr = {
6041                         .src_addr = RTE_BE32(0xffffffff),
6042                         .dst_addr = RTE_BE32(0xffffffff),
6043                         .type_of_service = 0xff,
6044                         .next_proto_id = 0xff,
6045                         .time_to_live = 0xff,
6046                 },
6047         };
6048         void *headers_m;
6049         void *headers_v;
6050         char *l24_m;
6051         char *l24_v;
6052         uint8_t tos;
6053
6054         if (inner) {
6055                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6056                                          inner_headers);
6057                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6058         } else {
6059                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6060                                          outer_headers);
6061                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6062         }
6063         flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
6064         /*
6065          * On outer header (which must contains L2), or inner header with L2,
6066          * set cvlan_tag mask bit to mark this packet as untagged.
6067          * This should be done even if item->spec is empty.
6068          */
6069         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
6070                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6071         if (!ipv4_v)
6072                 return;
6073         if (!ipv4_m)
6074                 ipv4_m = &nic_mask;
6075         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6076                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6077         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6078                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
6079         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
6080         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
6081         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6082                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6083         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6084                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
6085         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
6086         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
6087         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
6088         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
6089                  ipv4_m->hdr.type_of_service);
6090         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
6091         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
6092                  ipv4_m->hdr.type_of_service >> 2);
6093         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
6094         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6095                  ipv4_m->hdr.next_proto_id);
6096         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6097                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
6098         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6099                  ipv4_m->hdr.time_to_live);
6100         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6101                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
6102 }
6103
6104 /**
6105  * Add IPV6 item to matcher and to the value.
6106  *
6107  * @param[in, out] matcher
6108  *   Flow matcher.
6109  * @param[in, out] key
6110  *   Flow matcher value.
6111  * @param[in] item
6112  *   Flow pattern to translate.
6113  * @param[in] item_flags
6114  *   Bit-fields that holds the items detected until now.
6115  * @param[in] inner
6116  *   Item is inner pattern.
6117  * @param[in] group
6118  *   The group to insert the rule.
6119  */
6120 static void
6121 flow_dv_translate_item_ipv6(void *matcher, void *key,
6122                             const struct rte_flow_item *item,
6123                             const uint64_t item_flags,
6124                             int inner, uint32_t group)
6125 {
6126         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
6127         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
6128         const struct rte_flow_item_ipv6 nic_mask = {
6129                 .hdr = {
6130                         .src_addr =
6131                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6132                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6133                         .dst_addr =
6134                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
6135                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
6136                         .vtc_flow = RTE_BE32(0xffffffff),
6137                         .proto = 0xff,
6138                         .hop_limits = 0xff,
6139                 },
6140         };
6141         void *headers_m;
6142         void *headers_v;
6143         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6144         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6145         char *l24_m;
6146         char *l24_v;
6147         uint32_t vtc_m;
6148         uint32_t vtc_v;
6149         int i;
6150         int size;
6151
6152         if (inner) {
6153                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6154                                          inner_headers);
6155                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6156         } else {
6157                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6158                                          outer_headers);
6159                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6160         }
6161         flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
6162         /*
6163          * On outer header (which must contains L2), or inner header with L2,
6164          * set cvlan_tag mask bit to mark this packet as untagged.
6165          * This should be done even if item->spec is empty.
6166          */
6167         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
6168                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
6169         if (!ipv6_v)
6170                 return;
6171         if (!ipv6_m)
6172                 ipv6_m = &nic_mask;
6173         size = sizeof(ipv6_m->hdr.dst_addr);
6174         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6175                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6176         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6177                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
6178         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
6179         for (i = 0; i < size; ++i)
6180                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
6181         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
6182                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6183         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
6184                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
6185         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
6186         for (i = 0; i < size; ++i)
6187                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
6188         /* TOS. */
6189         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
6190         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
6191         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
6192         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
6193         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
6194         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
6195         /* Label. */
6196         if (inner) {
6197                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
6198                          vtc_m);
6199                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
6200                          vtc_v);
6201         } else {
6202                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
6203                          vtc_m);
6204                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
6205                          vtc_v);
6206         }
6207         /* Protocol. */
6208         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
6209                  ipv6_m->hdr.proto);
6210         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6211                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
6212         /* Hop limit. */
6213         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
6214                  ipv6_m->hdr.hop_limits);
6215         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
6216                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
6217 }
6218
6219 /**
6220  * Add TCP item to matcher and to the value.
6221  *
6222  * @param[in, out] matcher
6223  *   Flow matcher.
6224  * @param[in, out] key
6225  *   Flow matcher value.
6226  * @param[in] item
6227  *   Flow pattern to translate.
6228  * @param[in] inner
6229  *   Item is inner pattern.
6230  */
6231 static void
6232 flow_dv_translate_item_tcp(void *matcher, void *key,
6233                            const struct rte_flow_item *item,
6234                            int inner)
6235 {
6236         const struct rte_flow_item_tcp *tcp_m = item->mask;
6237         const struct rte_flow_item_tcp *tcp_v = item->spec;
6238         void *headers_m;
6239         void *headers_v;
6240
6241         if (inner) {
6242                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6243                                          inner_headers);
6244                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6245         } else {
6246                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6247                                          outer_headers);
6248                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6249         }
6250         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6251         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
6252         if (!tcp_v)
6253                 return;
6254         if (!tcp_m)
6255                 tcp_m = &rte_flow_item_tcp_mask;
6256         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
6257                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
6258         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
6259                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
6260         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
6261                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
6262         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
6263                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
6264         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
6265                  tcp_m->hdr.tcp_flags);
6266         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
6267                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
6268 }
6269
6270 /**
6271  * Add UDP item to matcher and to the value.
6272  *
6273  * @param[in, out] matcher
6274  *   Flow matcher.
6275  * @param[in, out] key
6276  *   Flow matcher value.
6277  * @param[in] item
6278  *   Flow pattern to translate.
6279  * @param[in] inner
6280  *   Item is inner pattern.
6281  */
6282 static void
6283 flow_dv_translate_item_udp(void *matcher, void *key,
6284                            const struct rte_flow_item *item,
6285                            int inner)
6286 {
6287         const struct rte_flow_item_udp *udp_m = item->mask;
6288         const struct rte_flow_item_udp *udp_v = item->spec;
6289         void *headers_m;
6290         void *headers_v;
6291
6292         if (inner) {
6293                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6294                                          inner_headers);
6295                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6296         } else {
6297                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6298                                          outer_headers);
6299                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6300         }
6301         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6302         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
6303         if (!udp_v)
6304                 return;
6305         if (!udp_m)
6306                 udp_m = &rte_flow_item_udp_mask;
6307         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
6308                  rte_be_to_cpu_16(udp_m->hdr.src_port));
6309         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
6310                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
6311         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
6312                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
6313         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6314                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
6315 }
6316
6317 /**
6318  * Add GRE optional Key item to matcher and to the value.
6319  *
6320  * @param[in, out] matcher
6321  *   Flow matcher.
6322  * @param[in, out] key
6323  *   Flow matcher value.
6324  * @param[in] item
6325  *   Flow pattern to translate.
6326  * @param[in] inner
6327  *   Item is inner pattern.
6328  */
6329 static void
6330 flow_dv_translate_item_gre_key(void *matcher, void *key,
6331                                    const struct rte_flow_item *item)
6332 {
6333         const rte_be32_t *key_m = item->mask;
6334         const rte_be32_t *key_v = item->spec;
6335         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6336         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6337         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6338
6339         /* GRE K bit must be on and should already be validated */
6340         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
6341         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
6342         if (!key_v)
6343                 return;
6344         if (!key_m)
6345                 key_m = &gre_key_default_mask;
6346         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
6347                  rte_be_to_cpu_32(*key_m) >> 8);
6348         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
6349                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
6350         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
6351                  rte_be_to_cpu_32(*key_m) & 0xFF);
6352         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
6353                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
6354 }
6355
6356 /**
6357  * Add GRE item to matcher and to the value.
6358  *
6359  * @param[in, out] matcher
6360  *   Flow matcher.
6361  * @param[in, out] key
6362  *   Flow matcher value.
6363  * @param[in] item
6364  *   Flow pattern to translate.
6365  * @param[in] inner
6366  *   Item is inner pattern.
6367  */
6368 static void
6369 flow_dv_translate_item_gre(void *matcher, void *key,
6370                            const struct rte_flow_item *item,
6371                            int inner)
6372 {
6373         const struct rte_flow_item_gre *gre_m = item->mask;
6374         const struct rte_flow_item_gre *gre_v = item->spec;
6375         void *headers_m;
6376         void *headers_v;
6377         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6378         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6379         struct {
6380                 union {
6381                         __extension__
6382                         struct {
6383                                 uint16_t version:3;
6384                                 uint16_t rsvd0:9;
6385                                 uint16_t s_present:1;
6386                                 uint16_t k_present:1;
6387                                 uint16_t rsvd_bit1:1;
6388                                 uint16_t c_present:1;
6389                         };
6390                         uint16_t value;
6391                 };
6392         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
6393
6394         if (inner) {
6395                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6396                                          inner_headers);
6397                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6398         } else {
6399                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6400                                          outer_headers);
6401                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6402         }
6403         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6404         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
6405         if (!gre_v)
6406                 return;
6407         if (!gre_m)
6408                 gre_m = &rte_flow_item_gre_mask;
6409         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
6410                  rte_be_to_cpu_16(gre_m->protocol));
6411         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6412                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
6413         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
6414         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
6415         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
6416                  gre_crks_rsvd0_ver_m.c_present);
6417         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
6418                  gre_crks_rsvd0_ver_v.c_present &
6419                  gre_crks_rsvd0_ver_m.c_present);
6420         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
6421                  gre_crks_rsvd0_ver_m.k_present);
6422         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
6423                  gre_crks_rsvd0_ver_v.k_present &
6424                  gre_crks_rsvd0_ver_m.k_present);
6425         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
6426                  gre_crks_rsvd0_ver_m.s_present);
6427         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
6428                  gre_crks_rsvd0_ver_v.s_present &
6429                  gre_crks_rsvd0_ver_m.s_present);
6430 }
6431
6432 /**
6433  * Add NVGRE item to matcher and to the value.
6434  *
6435  * @param[in, out] matcher
6436  *   Flow matcher.
6437  * @param[in, out] key
6438  *   Flow matcher value.
6439  * @param[in] item
6440  *   Flow pattern to translate.
6441  * @param[in] inner
6442  *   Item is inner pattern.
6443  */
6444 static void
6445 flow_dv_translate_item_nvgre(void *matcher, void *key,
6446                              const struct rte_flow_item *item,
6447                              int inner)
6448 {
6449         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
6450         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
6451         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6452         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6453         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
6454         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
6455         char *gre_key_m;
6456         char *gre_key_v;
6457         int size;
6458         int i;
6459
6460         /* For NVGRE, GRE header fields must be set with defined values. */
6461         const struct rte_flow_item_gre gre_spec = {
6462                 .c_rsvd0_ver = RTE_BE16(0x2000),
6463                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
6464         };
6465         const struct rte_flow_item_gre gre_mask = {
6466                 .c_rsvd0_ver = RTE_BE16(0xB000),
6467                 .protocol = RTE_BE16(UINT16_MAX),
6468         };
6469         const struct rte_flow_item gre_item = {
6470                 .spec = &gre_spec,
6471                 .mask = &gre_mask,
6472                 .last = NULL,
6473         };
6474         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
6475         if (!nvgre_v)
6476                 return;
6477         if (!nvgre_m)
6478                 nvgre_m = &rte_flow_item_nvgre_mask;
6479         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
6480         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
6481         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
6482         memcpy(gre_key_m, tni_flow_id_m, size);
6483         for (i = 0; i < size; ++i)
6484                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
6485 }
6486
6487 /**
6488  * Add VXLAN item to matcher and to the value.
6489  *
6490  * @param[in, out] matcher
6491  *   Flow matcher.
6492  * @param[in, out] key
6493  *   Flow matcher value.
6494  * @param[in] item
6495  *   Flow pattern to translate.
6496  * @param[in] inner
6497  *   Item is inner pattern.
6498  */
6499 static void
6500 flow_dv_translate_item_vxlan(void *matcher, void *key,
6501                              const struct rte_flow_item *item,
6502                              int inner)
6503 {
6504         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
6505         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
6506         void *headers_m;
6507         void *headers_v;
6508         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6509         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6510         char *vni_m;
6511         char *vni_v;
6512         uint16_t dport;
6513         int size;
6514         int i;
6515
6516         if (inner) {
6517                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6518                                          inner_headers);
6519                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6520         } else {
6521                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6522                                          outer_headers);
6523                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6524         }
6525         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6526                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6527         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6528                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6529                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6530         }
6531         if (!vxlan_v)
6532                 return;
6533         if (!vxlan_m)
6534                 vxlan_m = &rte_flow_item_vxlan_mask;
6535         size = sizeof(vxlan_m->vni);
6536         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
6537         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
6538         memcpy(vni_m, vxlan_m->vni, size);
6539         for (i = 0; i < size; ++i)
6540                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6541 }
6542
6543 /**
6544  * Add VXLAN-GPE item to matcher and to the value.
6545  *
6546  * @param[in, out] matcher
6547  *   Flow matcher.
6548  * @param[in, out] key
6549  *   Flow matcher value.
6550  * @param[in] item
6551  *   Flow pattern to translate.
6552  * @param[in] inner
6553  *   Item is inner pattern.
6554  */
6555
6556 static void
6557 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
6558                                  const struct rte_flow_item *item, int inner)
6559 {
6560         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
6561         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
6562         void *headers_m;
6563         void *headers_v;
6564         void *misc_m =
6565                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
6566         void *misc_v =
6567                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6568         char *vni_m;
6569         char *vni_v;
6570         uint16_t dport;
6571         int size;
6572         int i;
6573         uint8_t flags_m = 0xff;
6574         uint8_t flags_v = 0xc;
6575
6576         if (inner) {
6577                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6578                                          inner_headers);
6579                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6580         } else {
6581                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6582                                          outer_headers);
6583                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6584         }
6585         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6586                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6587         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6588                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6589                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6590         }
6591         if (!vxlan_v)
6592                 return;
6593         if (!vxlan_m)
6594                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
6595         size = sizeof(vxlan_m->vni);
6596         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
6597         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
6598         memcpy(vni_m, vxlan_m->vni, size);
6599         for (i = 0; i < size; ++i)
6600                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6601         if (vxlan_m->flags) {
6602                 flags_m = vxlan_m->flags;
6603                 flags_v = vxlan_v->flags;
6604         }
6605         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
6606         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
6607         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
6608                  vxlan_m->protocol);
6609         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
6610                  vxlan_v->protocol);
6611 }
6612
6613 /**
6614  * Add Geneve item to matcher and to the value.
6615  *
6616  * @param[in, out] matcher
6617  *   Flow matcher.
6618  * @param[in, out] key
6619  *   Flow matcher value.
6620  * @param[in] item
6621  *   Flow pattern to translate.
6622  * @param[in] inner
6623  *   Item is inner pattern.
6624  */
6625
6626 static void
6627 flow_dv_translate_item_geneve(void *matcher, void *key,
6628                               const struct rte_flow_item *item, int inner)
6629 {
6630         const struct rte_flow_item_geneve *geneve_m = item->mask;
6631         const struct rte_flow_item_geneve *geneve_v = item->spec;
6632         void *headers_m;
6633         void *headers_v;
6634         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6635         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6636         uint16_t dport;
6637         uint16_t gbhdr_m;
6638         uint16_t gbhdr_v;
6639         char *vni_m;
6640         char *vni_v;
6641         size_t size, i;
6642
6643         if (inner) {
6644                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6645                                          inner_headers);
6646                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6647         } else {
6648                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6649                                          outer_headers);
6650                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6651         }
6652         dport = MLX5_UDP_PORT_GENEVE;
6653         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6654                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6655                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6656         }
6657         if (!geneve_v)
6658                 return;
6659         if (!geneve_m)
6660                 geneve_m = &rte_flow_item_geneve_mask;
6661         size = sizeof(geneve_m->vni);
6662         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
6663         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
6664         memcpy(vni_m, geneve_m->vni, size);
6665         for (i = 0; i < size; ++i)
6666                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
6667         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
6668                  rte_be_to_cpu_16(geneve_m->protocol));
6669         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
6670                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
6671         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
6672         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
6673         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
6674                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6675         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
6676                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6677         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
6678                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6679         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
6680                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
6681                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6682 }
6683
6684 /**
6685  * Add MPLS item to matcher and to the value.
6686  *
6687  * @param[in, out] matcher
6688  *   Flow matcher.
6689  * @param[in, out] key
6690  *   Flow matcher value.
6691  * @param[in] item
6692  *   Flow pattern to translate.
6693  * @param[in] prev_layer
6694  *   The protocol layer indicated in previous item.
6695  * @param[in] inner
6696  *   Item is inner pattern.
6697  */
6698 static void
6699 flow_dv_translate_item_mpls(void *matcher, void *key,
6700                             const struct rte_flow_item *item,
6701                             uint64_t prev_layer,
6702                             int inner)
6703 {
6704         const uint32_t *in_mpls_m = item->mask;
6705         const uint32_t *in_mpls_v = item->spec;
6706         uint32_t *out_mpls_m = 0;
6707         uint32_t *out_mpls_v = 0;
6708         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6709         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6710         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
6711                                      misc_parameters_2);
6712         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6713         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
6714         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6715
6716         switch (prev_layer) {
6717         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6718                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
6719                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6720                          MLX5_UDP_PORT_MPLS);
6721                 break;
6722         case MLX5_FLOW_LAYER_GRE:
6723                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
6724                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6725                          RTE_ETHER_TYPE_MPLS);
6726                 break;
6727         default:
6728                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6729                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6730                          IPPROTO_MPLS);
6731                 break;
6732         }
6733         if (!in_mpls_v)
6734                 return;
6735         if (!in_mpls_m)
6736                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
6737         switch (prev_layer) {
6738         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6739                 out_mpls_m =
6740                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6741                                                  outer_first_mpls_over_udp);
6742                 out_mpls_v =
6743                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6744                                                  outer_first_mpls_over_udp);
6745                 break;
6746         case MLX5_FLOW_LAYER_GRE:
6747                 out_mpls_m =
6748                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6749                                                  outer_first_mpls_over_gre);
6750                 out_mpls_v =
6751                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6752                                                  outer_first_mpls_over_gre);
6753                 break;
6754         default:
6755                 /* Inner MPLS not over GRE is not supported. */
6756                 if (!inner) {
6757                         out_mpls_m =
6758                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6759                                                          misc2_m,
6760                                                          outer_first_mpls);
6761                         out_mpls_v =
6762                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6763                                                          misc2_v,
6764                                                          outer_first_mpls);
6765                 }
6766                 break;
6767         }
6768         if (out_mpls_m && out_mpls_v) {
6769                 *out_mpls_m = *in_mpls_m;
6770                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
6771         }
6772 }
6773
6774 /**
6775  * Add metadata register item to matcher
6776  *
6777  * @param[in, out] matcher
6778  *   Flow matcher.
6779  * @param[in, out] key
6780  *   Flow matcher value.
6781  * @param[in] reg_type
6782  *   Type of device metadata register
6783  * @param[in] value
6784  *   Register value
6785  * @param[in] mask
6786  *   Register mask
6787  */
6788 static void
6789 flow_dv_match_meta_reg(void *matcher, void *key,
6790                        enum modify_reg reg_type,
6791                        uint32_t data, uint32_t mask)
6792 {
6793         void *misc2_m =
6794                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
6795         void *misc2_v =
6796                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6797         uint32_t temp;
6798
6799         data &= mask;
6800         switch (reg_type) {
6801         case REG_A:
6802                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
6803                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
6804                 break;
6805         case REG_B:
6806                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
6807                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
6808                 break;
6809         case REG_C_0:
6810                 /*
6811                  * The metadata register C0 field might be divided into
6812                  * source vport index and META item value, we should set
6813                  * this field according to specified mask, not as whole one.
6814                  */
6815                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
6816                 temp |= mask;
6817                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
6818                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
6819                 temp &= ~mask;
6820                 temp |= data;
6821                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
6822                 break;
6823         case REG_C_1:
6824                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
6825                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
6826                 break;
6827         case REG_C_2:
6828                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
6829                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
6830                 break;
6831         case REG_C_3:
6832                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
6833                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
6834                 break;
6835         case REG_C_4:
6836                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
6837                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
6838                 break;
6839         case REG_C_5:
6840                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
6841                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
6842                 break;
6843         case REG_C_6:
6844                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
6845                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
6846                 break;
6847         case REG_C_7:
6848                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
6849                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
6850                 break;
6851         default:
6852                 MLX5_ASSERT(false);
6853                 break;
6854         }
6855 }
6856
6857 /**
6858  * Add MARK item to matcher
6859  *
6860  * @param[in] dev
6861  *   The device to configure through.
6862  * @param[in, out] matcher
6863  *   Flow matcher.
6864  * @param[in, out] key
6865  *   Flow matcher value.
6866  * @param[in] item
6867  *   Flow pattern to translate.
6868  */
6869 static void
6870 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
6871                             void *matcher, void *key,
6872                             const struct rte_flow_item *item)
6873 {
6874         struct mlx5_priv *priv = dev->data->dev_private;
6875         const struct rte_flow_item_mark *mark;
6876         uint32_t value;
6877         uint32_t mask;
6878
6879         mark = item->mask ? (const void *)item->mask :
6880                             &rte_flow_item_mark_mask;
6881         mask = mark->id & priv->sh->dv_mark_mask;
6882         mark = (const void *)item->spec;
6883         MLX5_ASSERT(mark);
6884         value = mark->id & priv->sh->dv_mark_mask & mask;
6885         if (mask) {
6886                 enum modify_reg reg;
6887
6888                 /* Get the metadata register index for the mark. */
6889                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
6890                 MLX5_ASSERT(reg > 0);
6891                 if (reg == REG_C_0) {
6892                         struct mlx5_priv *priv = dev->data->dev_private;
6893                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6894                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6895
6896                         mask &= msk_c0;
6897                         mask <<= shl_c0;
6898                         value <<= shl_c0;
6899                 }
6900                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6901         }
6902 }
6903
6904 /**
6905  * Add META item to matcher
6906  *
6907  * @param[in] dev
6908  *   The devich to configure through.
6909  * @param[in, out] matcher
6910  *   Flow matcher.
6911  * @param[in, out] key
6912  *   Flow matcher value.
6913  * @param[in] attr
6914  *   Attributes of flow that includes this item.
6915  * @param[in] item
6916  *   Flow pattern to translate.
6917  */
6918 static void
6919 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
6920                             void *matcher, void *key,
6921                             const struct rte_flow_attr *attr,
6922                             const struct rte_flow_item *item)
6923 {
6924         const struct rte_flow_item_meta *meta_m;
6925         const struct rte_flow_item_meta *meta_v;
6926
6927         meta_m = (const void *)item->mask;
6928         if (!meta_m)
6929                 meta_m = &rte_flow_item_meta_mask;
6930         meta_v = (const void *)item->spec;
6931         if (meta_v) {
6932                 int reg;
6933                 uint32_t value = meta_v->data;
6934                 uint32_t mask = meta_m->data;
6935
6936                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
6937                 if (reg < 0)
6938                         return;
6939                 /*
6940                  * In datapath code there is no endianness
6941                  * coversions for perfromance reasons, all
6942                  * pattern conversions are done in rte_flow.
6943                  */
6944                 value = rte_cpu_to_be_32(value);
6945                 mask = rte_cpu_to_be_32(mask);
6946                 if (reg == REG_C_0) {
6947                         struct mlx5_priv *priv = dev->data->dev_private;
6948                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6949                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6950 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6951                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
6952
6953                         value >>= shr_c0;
6954                         mask >>= shr_c0;
6955 #endif
6956                         value <<= shl_c0;
6957                         mask <<= shl_c0;
6958                         MLX5_ASSERT(msk_c0);
6959                         MLX5_ASSERT(!(~msk_c0 & mask));
6960                 }
6961                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6962         }
6963 }
6964
6965 /**
6966  * Add vport metadata Reg C0 item to matcher
6967  *
6968  * @param[in, out] matcher
6969  *   Flow matcher.
6970  * @param[in, out] key
6971  *   Flow matcher value.
6972  * @param[in] reg
6973  *   Flow pattern to translate.
6974  */
6975 static void
6976 flow_dv_translate_item_meta_vport(void *matcher, void *key,
6977                                   uint32_t value, uint32_t mask)
6978 {
6979         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
6980 }
6981
6982 /**
6983  * Add tag item to matcher
6984  *
6985  * @param[in] dev
6986  *   The devich to configure through.
6987  * @param[in, out] matcher
6988  *   Flow matcher.
6989  * @param[in, out] key
6990  *   Flow matcher value.
6991  * @param[in] item
6992  *   Flow pattern to translate.
6993  */
6994 static void
6995 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
6996                                 void *matcher, void *key,
6997                                 const struct rte_flow_item *item)
6998 {
6999         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
7000         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
7001         uint32_t mask, value;
7002
7003         MLX5_ASSERT(tag_v);
7004         value = tag_v->data;
7005         mask = tag_m ? tag_m->data : UINT32_MAX;
7006         if (tag_v->id == REG_C_0) {
7007                 struct mlx5_priv *priv = dev->data->dev_private;
7008                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
7009                 uint32_t shl_c0 = rte_bsf32(msk_c0);
7010
7011                 mask &= msk_c0;
7012                 mask <<= shl_c0;
7013                 value <<= shl_c0;
7014         }
7015         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
7016 }
7017
7018 /**
7019  * Add TAG item to matcher
7020  *
7021  * @param[in] dev
7022  *   The devich to configure through.
7023  * @param[in, out] matcher
7024  *   Flow matcher.
7025  * @param[in, out] key
7026  *   Flow matcher value.
7027  * @param[in] item
7028  *   Flow pattern to translate.
7029  */
7030 static void
7031 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
7032                            void *matcher, void *key,
7033                            const struct rte_flow_item *item)
7034 {
7035         const struct rte_flow_item_tag *tag_v = item->spec;
7036         const struct rte_flow_item_tag *tag_m = item->mask;
7037         enum modify_reg reg;
7038
7039         MLX5_ASSERT(tag_v);
7040         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
7041         /* Get the metadata register index for the tag. */
7042         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
7043         MLX5_ASSERT(reg > 0);
7044         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
7045 }
7046
7047 /**
7048  * Add source vport match to the specified matcher.
7049  *
7050  * @param[in, out] matcher
7051  *   Flow matcher.
7052  * @param[in, out] key
7053  *   Flow matcher value.
7054  * @param[in] port
7055  *   Source vport value to match
7056  * @param[in] mask
7057  *   Mask
7058  */
7059 static void
7060 flow_dv_translate_item_source_vport(void *matcher, void *key,
7061                                     int16_t port, uint16_t mask)
7062 {
7063         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7064         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7065
7066         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
7067         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
7068 }
7069
7070 /**
7071  * Translate port-id item to eswitch match on  port-id.
7072  *
7073  * @param[in] dev
7074  *   The devich to configure through.
7075  * @param[in, out] matcher
7076  *   Flow matcher.
7077  * @param[in, out] key
7078  *   Flow matcher value.
7079  * @param[in] item
7080  *   Flow pattern to translate.
7081  *
7082  * @return
7083  *   0 on success, a negative errno value otherwise.
7084  */
7085 static int
7086 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
7087                                void *key, const struct rte_flow_item *item)
7088 {
7089         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
7090         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
7091         struct mlx5_priv *priv;
7092         uint16_t mask, id;
7093
7094         mask = pid_m ? pid_m->id : 0xffff;
7095         id = pid_v ? pid_v->id : dev->data->port_id;
7096         priv = mlx5_port_to_eswitch_info(id, item == NULL);
7097         if (!priv)
7098                 return -rte_errno;
7099         /* Translate to vport field or to metadata, depending on mode. */
7100         if (priv->vport_meta_mask)
7101                 flow_dv_translate_item_meta_vport(matcher, key,
7102                                                   priv->vport_meta_tag,
7103                                                   priv->vport_meta_mask);
7104         else
7105                 flow_dv_translate_item_source_vport(matcher, key,
7106                                                     priv->vport_id, mask);
7107         return 0;
7108 }
7109
7110 /**
7111  * Add ICMP6 item to matcher and to the value.
7112  *
7113  * @param[in, out] matcher
7114  *   Flow matcher.
7115  * @param[in, out] key
7116  *   Flow matcher value.
7117  * @param[in] item
7118  *   Flow pattern to translate.
7119  * @param[in] inner
7120  *   Item is inner pattern.
7121  */
7122 static void
7123 flow_dv_translate_item_icmp6(void *matcher, void *key,
7124                               const struct rte_flow_item *item,
7125                               int inner)
7126 {
7127         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
7128         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
7129         void *headers_m;
7130         void *headers_v;
7131         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7132                                      misc_parameters_3);
7133         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7134         if (inner) {
7135                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7136                                          inner_headers);
7137                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7138         } else {
7139                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7140                                          outer_headers);
7141                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7142         }
7143         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7144         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
7145         if (!icmp6_v)
7146                 return;
7147         if (!icmp6_m)
7148                 icmp6_m = &rte_flow_item_icmp6_mask;
7149         /*
7150          * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
7151          * If only the protocol is specified, no need to match the frag.
7152          */
7153         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7154         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
7155         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
7156         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
7157                  icmp6_v->type & icmp6_m->type);
7158         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
7159         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
7160                  icmp6_v->code & icmp6_m->code);
7161 }
7162
7163 /**
7164  * Add ICMP item to matcher and to the value.
7165  *
7166  * @param[in, out] matcher
7167  *   Flow matcher.
7168  * @param[in, out] key
7169  *   Flow matcher value.
7170  * @param[in] item
7171  *   Flow pattern to translate.
7172  * @param[in] inner
7173  *   Item is inner pattern.
7174  */
7175 static void
7176 flow_dv_translate_item_icmp(void *matcher, void *key,
7177                             const struct rte_flow_item *item,
7178                             int inner)
7179 {
7180         const struct rte_flow_item_icmp *icmp_m = item->mask;
7181         const struct rte_flow_item_icmp *icmp_v = item->spec;
7182         void *headers_m;
7183         void *headers_v;
7184         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7185                                      misc_parameters_3);
7186         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7187         if (inner) {
7188                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7189                                          inner_headers);
7190                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7191         } else {
7192                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7193                                          outer_headers);
7194                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7195         }
7196         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
7197         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
7198         if (!icmp_v)
7199                 return;
7200         if (!icmp_m)
7201                 icmp_m = &rte_flow_item_icmp_mask;
7202         /*
7203          * Force flow only to match the non-fragmented IPv4 ICMP packets.
7204          * If only the protocol is specified, no need to match the frag.
7205          */
7206         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
7207         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
7208         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
7209                  icmp_m->hdr.icmp_type);
7210         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
7211                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
7212         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
7213                  icmp_m->hdr.icmp_code);
7214         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
7215                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
7216 }
7217
7218 /**
7219  * Add GTP item to matcher and to the value.
7220  *
7221  * @param[in, out] matcher
7222  *   Flow matcher.
7223  * @param[in, out] key
7224  *   Flow matcher value.
7225  * @param[in] item
7226  *   Flow pattern to translate.
7227  * @param[in] inner
7228  *   Item is inner pattern.
7229  */
7230 static void
7231 flow_dv_translate_item_gtp(void *matcher, void *key,
7232                            const struct rte_flow_item *item, int inner)
7233 {
7234         const struct rte_flow_item_gtp *gtp_m = item->mask;
7235         const struct rte_flow_item_gtp *gtp_v = item->spec;
7236         void *headers_m;
7237         void *headers_v;
7238         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
7239                                      misc_parameters_3);
7240         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
7241         uint16_t dport = RTE_GTPU_UDP_PORT;
7242
7243         if (inner) {
7244                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7245                                          inner_headers);
7246                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
7247         } else {
7248                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
7249                                          outer_headers);
7250                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
7251         }
7252         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
7253                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
7254                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
7255         }
7256         if (!gtp_v)
7257                 return;
7258         if (!gtp_m)
7259                 gtp_m = &rte_flow_item_gtp_mask;
7260         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
7261                  gtp_m->v_pt_rsv_flags);
7262         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
7263                  gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
7264         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
7265         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
7266                  gtp_v->msg_type & gtp_m->msg_type);
7267         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
7268                  rte_be_to_cpu_32(gtp_m->teid));
7269         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
7270                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
7271 }
7272
7273 /**
7274  * Add eCPRI item to matcher and to the value.
7275  *
7276  * @param[in] dev
7277  *   The devich to configure through.
7278  * @param[in, out] matcher
7279  *   Flow matcher.
7280  * @param[in, out] key
7281  *   Flow matcher value.
7282  * @param[in] item
7283  *   Flow pattern to translate.
7284  * @param[in] samples
7285  *   Sample IDs to be used in the matching.
7286  */
7287 static void
7288 flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
7289                              void *key, const struct rte_flow_item *item)
7290 {
7291         struct mlx5_priv *priv = dev->data->dev_private;
7292         const struct rte_flow_item_ecpri *ecpri_m = item->mask;
7293         const struct rte_flow_item_ecpri *ecpri_v = item->spec;
7294         void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
7295                                      misc_parameters_4);
7296         void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
7297         uint32_t *samples;
7298         void *dw_m;
7299         void *dw_v;
7300
7301         if (!ecpri_v)
7302                 return;
7303         if (!ecpri_m)
7304                 ecpri_m = &rte_flow_item_ecpri_mask;
7305         /*
7306          * Maximal four DW samples are supported in a single matching now.
7307          * Two are used now for a eCPRI matching:
7308          * 1. Type: one byte, mask should be 0x00ff0000 in network order
7309          * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
7310          *    if any.
7311          */
7312         if (!ecpri_m->hdr.common.u32)
7313                 return;
7314         samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
7315         /* Need to take the whole DW as the mask to fill the entry. */
7316         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7317                             prog_sample_field_value_0);
7318         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7319                             prog_sample_field_value_0);
7320         /* Already big endian (network order) in the header. */
7321         *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
7322         *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
7323         /* Sample#0, used for matching type, offset 0. */
7324         MLX5_SET(fte_match_set_misc4, misc4_m,
7325                  prog_sample_field_id_0, samples[0]);
7326         /* It makes no sense to set the sample ID in the mask field. */
7327         MLX5_SET(fte_match_set_misc4, misc4_v,
7328                  prog_sample_field_id_0, samples[0]);
7329         /*
7330          * Checking if message body part needs to be matched.
7331          * Some wildcard rules only matching type field should be supported.
7332          */
7333         if (ecpri_m->hdr.dummy[0]) {
7334                 switch (ecpri_v->hdr.common.type) {
7335                 case RTE_ECPRI_MSG_TYPE_IQ_DATA:
7336                 case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
7337                 case RTE_ECPRI_MSG_TYPE_DLY_MSR:
7338                         dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
7339                                             prog_sample_field_value_1);
7340                         dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
7341                                             prog_sample_field_value_1);
7342                         *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
7343                         *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
7344                         /* Sample#1, to match message body, offset 4. */
7345                         MLX5_SET(fte_match_set_misc4, misc4_m,
7346                                  prog_sample_field_id_1, samples[1]);
7347                         MLX5_SET(fte_match_set_misc4, misc4_v,
7348                                  prog_sample_field_id_1, samples[1]);
7349                         break;
7350                 default:
7351                         /* Others, do not match any sample ID. */
7352                         break;
7353                 }
7354         }
7355 }
7356
7357 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
7358
7359 #define HEADER_IS_ZERO(match_criteria, headers)                              \
7360         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
7361                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
7362
7363 /**
7364  * Calculate flow matcher enable bitmap.
7365  *
7366  * @param match_criteria
7367  *   Pointer to flow matcher criteria.
7368  *
7369  * @return
7370  *   Bitmap of enabled fields.
7371  */
7372 static uint8_t
7373 flow_dv_matcher_enable(uint32_t *match_criteria)
7374 {
7375         uint8_t match_criteria_enable;
7376
7377         match_criteria_enable =
7378                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
7379                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
7380         match_criteria_enable |=
7381                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
7382                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
7383         match_criteria_enable |=
7384                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
7385                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
7386         match_criteria_enable |=
7387                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
7388                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
7389         match_criteria_enable |=
7390                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
7391                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
7392         match_criteria_enable |=
7393                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
7394                 MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
7395         return match_criteria_enable;
7396 }
7397
7398
7399 /**
7400  * Get a flow table.
7401  *
7402  * @param[in, out] dev
7403  *   Pointer to rte_eth_dev structure.
7404  * @param[in] table_id
7405  *   Table id to use.
7406  * @param[in] egress
7407  *   Direction of the table.
7408  * @param[in] transfer
7409  *   E-Switch or NIC flow.
7410  * @param[out] error
7411  *   pointer to error structure.
7412  *
7413  * @return
7414  *   Returns tables resource based on the index, NULL in case of failed.
7415  */
7416 static struct mlx5_flow_tbl_resource *
7417 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
7418                          uint32_t table_id, uint8_t egress,
7419                          uint8_t transfer,
7420                          struct rte_flow_error *error)
7421 {
7422         struct mlx5_priv *priv = dev->data->dev_private;
7423         struct mlx5_dev_ctx_shared *sh = priv->sh;
7424         struct mlx5_flow_tbl_resource *tbl;
7425         union mlx5_flow_tbl_key table_key = {
7426                 {
7427                         .table_id = table_id,
7428                         .reserved = 0,
7429                         .domain = !!transfer,
7430                         .direction = !!egress,
7431                 }
7432         };
7433         struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
7434                                                          table_key.v64);
7435         struct mlx5_flow_tbl_data_entry *tbl_data;
7436         uint32_t idx = 0;
7437         int ret;
7438         void *domain;
7439
7440         if (pos) {
7441                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
7442                                         entry);
7443                 tbl = &tbl_data->tbl;
7444                 rte_atomic32_inc(&tbl->refcnt);
7445                 return tbl;
7446         }
7447         tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
7448         if (!tbl_data) {
7449                 rte_flow_error_set(error, ENOMEM,
7450                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7451                                    NULL,
7452                                    "cannot allocate flow table data entry");
7453                 return NULL;
7454         }
7455         tbl_data->idx = idx;
7456         tbl = &tbl_data->tbl;
7457         pos = &tbl_data->entry;
7458         if (transfer)
7459                 domain = sh->fdb_domain;
7460         else if (egress)
7461                 domain = sh->tx_domain;
7462         else
7463                 domain = sh->rx_domain;
7464         ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
7465         if (ret) {
7466                 rte_flow_error_set(error, ENOMEM,
7467                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7468                                    NULL, "cannot create flow table object");
7469                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7470                 return NULL;
7471         }
7472         /*
7473          * No multi-threads now, but still better to initialize the reference
7474          * count before insert it into the hash list.
7475          */
7476         rte_atomic32_init(&tbl->refcnt);
7477         /* Jump action reference count is initialized here. */
7478         rte_atomic32_init(&tbl_data->jump.refcnt);
7479         pos->key = table_key.v64;
7480         ret = mlx5_hlist_insert(sh->flow_tbls, pos);
7481         if (ret < 0) {
7482                 rte_flow_error_set(error, -ret,
7483                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7484                                    "cannot insert flow table data entry");
7485                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7486                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
7487         }
7488         rte_atomic32_inc(&tbl->refcnt);
7489         return tbl;
7490 }
7491
7492 /**
7493  * Release a flow table.
7494  *
7495  * @param[in] dev
7496  *   Pointer to rte_eth_dev structure.
7497  * @param[in] tbl
7498  *   Table resource to be released.
7499  *
7500  * @return
7501  *   Returns 0 if table was released, else return 1;
7502  */
7503 static int
7504 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
7505                              struct mlx5_flow_tbl_resource *tbl)
7506 {
7507         struct mlx5_priv *priv = dev->data->dev_private;
7508         struct mlx5_dev_ctx_shared *sh = priv->sh;
7509         struct mlx5_flow_tbl_data_entry *tbl_data =
7510                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
7511
7512         if (!tbl)
7513                 return 0;
7514         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
7515                 struct mlx5_hlist_entry *pos = &tbl_data->entry;
7516
7517                 mlx5_flow_os_destroy_flow_tbl(tbl->obj);
7518                 tbl->obj = NULL;
7519                 /* remove the entry from the hash list and free memory. */
7520                 mlx5_hlist_remove(sh->flow_tbls, pos);
7521                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP],
7522                                 tbl_data->idx);
7523                 return 0;
7524         }
7525         return 1;
7526 }
7527
7528 /**
7529  * Register the flow matcher.
7530  *
7531  * @param[in, out] dev
7532  *   Pointer to rte_eth_dev structure.
7533  * @param[in, out] matcher
7534  *   Pointer to flow matcher.
7535  * @param[in, out] key
7536  *   Pointer to flow table key.
7537  * @parm[in, out] dev_flow
7538  *   Pointer to the dev_flow.
7539  * @param[out] error
7540  *   pointer to error structure.
7541  *
7542  * @return
7543  *   0 on success otherwise -errno and errno is set.
7544  */
7545 static int
7546 flow_dv_matcher_register(struct rte_eth_dev *dev,
7547                          struct mlx5_flow_dv_matcher *matcher,
7548                          union mlx5_flow_tbl_key *key,
7549                          struct mlx5_flow *dev_flow,
7550                          struct rte_flow_error *error)
7551 {
7552         struct mlx5_priv *priv = dev->data->dev_private;
7553         struct mlx5_dev_ctx_shared *sh = priv->sh;
7554         struct mlx5_flow_dv_matcher *cache_matcher;
7555         struct mlx5dv_flow_matcher_attr dv_attr = {
7556                 .type = IBV_FLOW_ATTR_NORMAL,
7557                 .match_mask = (void *)&matcher->mask,
7558         };
7559         struct mlx5_flow_tbl_resource *tbl;
7560         struct mlx5_flow_tbl_data_entry *tbl_data;
7561         int ret;
7562
7563         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
7564                                        key->domain, error);
7565         if (!tbl)
7566                 return -rte_errno;      /* No need to refill the error info */
7567         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
7568         /* Lookup from cache. */
7569         LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
7570                 if (matcher->crc == cache_matcher->crc &&
7571                     matcher->priority == cache_matcher->priority &&
7572                     !memcmp((const void *)matcher->mask.buf,
7573                             (const void *)cache_matcher->mask.buf,
7574                             cache_matcher->mask.size)) {
7575                         DRV_LOG(DEBUG,
7576                                 "%s group %u priority %hd use %s "
7577                                 "matcher %p: refcnt %d++",
7578                                 key->domain ? "FDB" : "NIC", key->table_id,
7579                                 cache_matcher->priority,
7580                                 key->direction ? "tx" : "rx",
7581                                 (void *)cache_matcher,
7582                                 rte_atomic32_read(&cache_matcher->refcnt));
7583                         rte_atomic32_inc(&cache_matcher->refcnt);
7584                         dev_flow->handle->dvh.matcher = cache_matcher;
7585                         /* old matcher should not make the table ref++. */
7586                         flow_dv_tbl_resource_release(dev, tbl);
7587                         return 0;
7588                 }
7589         }
7590         /* Register new matcher. */
7591         cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
7592                                     SOCKET_ID_ANY);
7593         if (!cache_matcher) {
7594                 flow_dv_tbl_resource_release(dev, tbl);
7595                 return rte_flow_error_set(error, ENOMEM,
7596                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7597                                           "cannot allocate matcher memory");
7598         }
7599         *cache_matcher = *matcher;
7600         dv_attr.match_criteria_enable =
7601                 flow_dv_matcher_enable(cache_matcher->mask.buf);
7602         dv_attr.priority = matcher->priority;
7603         if (key->direction)
7604                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
7605         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
7606                                                &cache_matcher->matcher_object);
7607         if (ret) {
7608                 mlx5_free(cache_matcher);
7609 #ifdef HAVE_MLX5DV_DR
7610                 flow_dv_tbl_resource_release(dev, tbl);
7611 #endif
7612                 return rte_flow_error_set(error, ENOMEM,
7613                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7614                                           NULL, "cannot create matcher");
7615         }
7616         /* Save the table information */
7617         cache_matcher->tbl = tbl;
7618         rte_atomic32_init(&cache_matcher->refcnt);
7619         /* only matcher ref++, table ref++ already done above in get API. */
7620         rte_atomic32_inc(&cache_matcher->refcnt);
7621         LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
7622         dev_flow->handle->dvh.matcher = cache_matcher;
7623         DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
7624                 key->domain ? "FDB" : "NIC", key->table_id,
7625                 cache_matcher->priority,
7626                 key->direction ? "tx" : "rx", (void *)cache_matcher,
7627                 rte_atomic32_read(&cache_matcher->refcnt));
7628         return 0;
7629 }
7630
7631 /**
7632  * Find existing tag resource or create and register a new one.
7633  *
7634  * @param dev[in, out]
7635  *   Pointer to rte_eth_dev structure.
7636  * @param[in, out] tag_be24
7637  *   Tag value in big endian then R-shift 8.
7638  * @parm[in, out] dev_flow
7639  *   Pointer to the dev_flow.
7640  * @param[out] error
7641  *   pointer to error structure.
7642  *
7643  * @return
7644  *   0 on success otherwise -errno and errno is set.
7645  */
7646 static int
7647 flow_dv_tag_resource_register
7648                         (struct rte_eth_dev *dev,
7649                          uint32_t tag_be24,
7650                          struct mlx5_flow *dev_flow,
7651                          struct rte_flow_error *error)
7652 {
7653         struct mlx5_priv *priv = dev->data->dev_private;
7654         struct mlx5_dev_ctx_shared *sh = priv->sh;
7655         struct mlx5_flow_dv_tag_resource *cache_resource;
7656         struct mlx5_hlist_entry *entry;
7657         int ret;
7658
7659         /* Lookup a matching resource from cache. */
7660         entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
7661         if (entry) {
7662                 cache_resource = container_of
7663                         (entry, struct mlx5_flow_dv_tag_resource, entry);
7664                 rte_atomic32_inc(&cache_resource->refcnt);
7665                 dev_flow->handle->dvh.rix_tag = cache_resource->idx;
7666                 dev_flow->dv.tag_resource = cache_resource;
7667                 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
7668                         (void *)cache_resource,
7669                         rte_atomic32_read(&cache_resource->refcnt));
7670                 return 0;
7671         }
7672         /* Register new resource. */
7673         cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
7674                                        &dev_flow->handle->dvh.rix_tag);
7675         if (!cache_resource)
7676                 return rte_flow_error_set(error, ENOMEM,
7677                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7678                                           "cannot allocate resource memory");
7679         cache_resource->entry.key = (uint64_t)tag_be24;
7680         ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
7681                                                   &cache_resource->action);
7682         if (ret) {
7683                 mlx5_free(cache_resource);
7684                 return rte_flow_error_set(error, ENOMEM,
7685                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7686                                           NULL, "cannot create action");
7687         }
7688         rte_atomic32_init(&cache_resource->refcnt);
7689         rte_atomic32_inc(&cache_resource->refcnt);
7690         if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
7691                 mlx5_flow_os_destroy_flow_action(cache_resource->action);
7692                 mlx5_free(cache_resource);
7693                 return rte_flow_error_set(error, EEXIST,
7694                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7695                                           NULL, "cannot insert tag");
7696         }
7697         dev_flow->dv.tag_resource = cache_resource;
7698         DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
7699                 (void *)cache_resource,
7700                 rte_atomic32_read(&cache_resource->refcnt));
7701         return 0;
7702 }
7703
7704 /**
7705  * Release the tag.
7706  *
7707  * @param dev
7708  *   Pointer to Ethernet device.
7709  * @param tag_idx
7710  *   Tag index.
7711  *
7712  * @return
7713  *   1 while a reference on it exists, 0 when freed.
7714  */
7715 static int
7716 flow_dv_tag_release(struct rte_eth_dev *dev,
7717                     uint32_t tag_idx)
7718 {
7719         struct mlx5_priv *priv = dev->data->dev_private;
7720         struct mlx5_dev_ctx_shared *sh = priv->sh;
7721         struct mlx5_flow_dv_tag_resource *tag;
7722
7723         tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
7724         if (!tag)
7725                 return 0;
7726         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
7727                 dev->data->port_id, (void *)tag,
7728                 rte_atomic32_read(&tag->refcnt));
7729         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
7730                 claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
7731                 mlx5_hlist_remove(sh->tag_table, &tag->entry);
7732                 DRV_LOG(DEBUG, "port %u tag %p: removed",
7733                         dev->data->port_id, (void *)tag);
7734                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
7735                 return 0;
7736         }
7737         return 1;
7738 }
7739
7740 /**
7741  * Translate port ID action to vport.
7742  *
7743  * @param[in] dev
7744  *   Pointer to rte_eth_dev structure.
7745  * @param[in] action
7746  *   Pointer to the port ID action.
7747  * @param[out] dst_port_id
7748  *   The target port ID.
7749  * @param[out] error
7750  *   Pointer to the error structure.
7751  *
7752  * @return
7753  *   0 on success, a negative errno value otherwise and rte_errno is set.
7754  */
7755 static int
7756 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
7757                                  const struct rte_flow_action *action,
7758                                  uint32_t *dst_port_id,
7759                                  struct rte_flow_error *error)
7760 {
7761         uint32_t port;
7762         struct mlx5_priv *priv;
7763         const struct rte_flow_action_port_id *conf =
7764                         (const struct rte_flow_action_port_id *)action->conf;
7765
7766         port = conf->original ? dev->data->port_id : conf->id;
7767         priv = mlx5_port_to_eswitch_info(port, false);
7768         if (!priv)
7769                 return rte_flow_error_set(error, -rte_errno,
7770                                           RTE_FLOW_ERROR_TYPE_ACTION,
7771                                           NULL,
7772                                           "No eswitch info was found for port");
7773 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
7774         /*
7775          * This parameter is transferred to
7776          * mlx5dv_dr_action_create_dest_ib_port().
7777          */
7778         *dst_port_id = priv->dev_port;
7779 #else
7780         /*
7781          * Legacy mode, no LAG configurations is supported.
7782          * This parameter is transferred to
7783          * mlx5dv_dr_action_create_dest_vport().
7784          */
7785         *dst_port_id = priv->vport_id;
7786 #endif
7787         return 0;
7788 }
7789
7790 /**
7791  * Create a counter with aging configuration.
7792  *
7793  * @param[in] dev
7794  *   Pointer to rte_eth_dev structure.
7795  * @param[out] count
7796  *   Pointer to the counter action configuration.
7797  * @param[in] age
7798  *   Pointer to the aging action configuration.
7799  *
7800  * @return
7801  *   Index to flow counter on success, 0 otherwise.
7802  */
7803 static uint32_t
7804 flow_dv_translate_create_counter(struct rte_eth_dev *dev,
7805                                 struct mlx5_flow *dev_flow,
7806                                 const struct rte_flow_action_count *count,
7807                                 const struct rte_flow_action_age *age)
7808 {
7809         uint32_t counter;
7810         struct mlx5_age_param *age_param;
7811
7812         counter = flow_dv_counter_alloc(dev,
7813                                 count ? count->shared : 0,
7814                                 count ? count->id : 0,
7815                                 dev_flow->dv.group, !!age);
7816         if (!counter || age == NULL)
7817                 return counter;
7818         age_param  = flow_dv_counter_idx_get_age(dev, counter);
7819         /*
7820          * The counter age accuracy may have a bit delay. Have 3/4
7821          * second bias on the timeount in order to let it age in time.
7822          */
7823         age_param->context = age->context ? age->context :
7824                 (void *)(uintptr_t)(dev_flow->flow_idx);
7825         /*
7826          * The counter age accuracy may have a bit delay. Have 3/4
7827          * second bias on the timeount in order to let it age in time.
7828          */
7829         age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY;
7830         /* Set expire time in unit of 0.1 sec. */
7831         age_param->port_id = dev->data->port_id;
7832         age_param->expire = age_param->timeout +
7833                         rte_rdtsc() / (rte_get_tsc_hz() / 10);
7834         rte_atomic16_set(&age_param->state, AGE_CANDIDATE);
7835         return counter;
7836 }
7837 /**
7838  * Add Tx queue matcher
7839  *
7840  * @param[in] dev
7841  *   Pointer to the dev struct.
7842  * @param[in, out] matcher
7843  *   Flow matcher.
7844  * @param[in, out] key
7845  *   Flow matcher value.
7846  * @param[in] item
7847  *   Flow pattern to translate.
7848  * @param[in] inner
7849  *   Item is inner pattern.
7850  */
7851 static void
7852 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
7853                                 void *matcher, void *key,
7854                                 const struct rte_flow_item *item)
7855 {
7856         const struct mlx5_rte_flow_item_tx_queue *queue_m;
7857         const struct mlx5_rte_flow_item_tx_queue *queue_v;
7858         void *misc_m =
7859                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7860         void *misc_v =
7861                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7862         struct mlx5_txq_ctrl *txq;
7863         uint32_t queue;
7864
7865
7866         queue_m = (const void *)item->mask;
7867         if (!queue_m)
7868                 return;
7869         queue_v = (const void *)item->spec;
7870         if (!queue_v)
7871                 return;
7872         txq = mlx5_txq_get(dev, queue_v->queue);
7873         if (!txq)
7874                 return;
7875         queue = txq->obj->sq->id;
7876         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
7877         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
7878                  queue & queue_m->queue);
7879         mlx5_txq_release(dev, queue_v->queue);
7880 }
7881
7882 /**
7883  * Set the hash fields according to the @p flow information.
7884  *
7885  * @param[in] dev_flow
7886  *   Pointer to the mlx5_flow.
7887  * @param[in] rss_desc
7888  *   Pointer to the mlx5_flow_rss_desc.
7889  */
7890 static void
7891 flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
7892                        struct mlx5_flow_rss_desc *rss_desc)
7893 {
7894         uint64_t items = dev_flow->handle->layers;
7895         int rss_inner = 0;
7896         uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
7897
7898         dev_flow->hash_fields = 0;
7899 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
7900         if (rss_desc->level >= 2) {
7901                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
7902                 rss_inner = 1;
7903         }
7904 #endif
7905         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
7906             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
7907                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
7908                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
7909                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
7910                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
7911                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
7912                         else
7913                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
7914                 }
7915         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
7916                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
7917                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
7918                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
7919                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
7920                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
7921                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
7922                         else
7923                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
7924                 }
7925         }
7926         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
7927             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
7928                 if (rss_types & ETH_RSS_UDP) {
7929                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
7930                                 dev_flow->hash_fields |=
7931                                                 IBV_RX_HASH_SRC_PORT_UDP;
7932                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
7933                                 dev_flow->hash_fields |=
7934                                                 IBV_RX_HASH_DST_PORT_UDP;
7935                         else
7936                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
7937                 }
7938         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
7939                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
7940                 if (rss_types & ETH_RSS_TCP) {
7941                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
7942                                 dev_flow->hash_fields |=
7943                                                 IBV_RX_HASH_SRC_PORT_TCP;
7944                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
7945                                 dev_flow->hash_fields |=
7946                                                 IBV_RX_HASH_DST_PORT_TCP;
7947                         else
7948                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
7949                 }
7950         }
7951 }
7952
7953 /**
7954  * Fill the flow with DV spec, lock free
7955  * (mutex should be acquired by caller).
7956  *
7957  * @param[in] dev
7958  *   Pointer to rte_eth_dev structure.
7959  * @param[in, out] dev_flow
7960  *   Pointer to the sub flow.
7961  * @param[in] attr
7962  *   Pointer to the flow attributes.
7963  * @param[in] items
7964  *   Pointer to the list of items.
7965  * @param[in] actions
7966  *   Pointer to the list of actions.
7967  * @param[out] error
7968  *   Pointer to the error structure.
7969  *
7970  * @return
7971  *   0 on success, a negative errno value otherwise and rte_errno is set.
7972  */
7973 static int
7974 __flow_dv_translate(struct rte_eth_dev *dev,
7975                     struct mlx5_flow *dev_flow,
7976                     const struct rte_flow_attr *attr,
7977                     const struct rte_flow_item items[],
7978                     const struct rte_flow_action actions[],
7979                     struct rte_flow_error *error)
7980 {
7981         struct mlx5_priv *priv = dev->data->dev_private;
7982         struct mlx5_dev_config *dev_conf = &priv->config;
7983         struct rte_flow *flow = dev_flow->flow;
7984         struct mlx5_flow_handle *handle = dev_flow->handle;
7985         struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
7986                                               priv->rss_desc)
7987                                               [!!priv->flow_nested_idx];
7988         uint64_t item_flags = 0;
7989         uint64_t last_item = 0;
7990         uint64_t action_flags = 0;
7991         uint64_t priority = attr->priority;
7992         struct mlx5_flow_dv_matcher matcher = {
7993                 .mask = {
7994                         .size = sizeof(matcher.mask.buf) -
7995                                 MLX5_ST_SZ_BYTES(fte_match_set_misc4),
7996                 },
7997         };
7998         int actions_n = 0;
7999         bool actions_end = false;
8000         union {
8001                 struct mlx5_flow_dv_modify_hdr_resource res;
8002                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
8003                             sizeof(struct mlx5_modification_cmd) *
8004                             (MLX5_MAX_MODIFY_NUM + 1)];
8005         } mhdr_dummy;
8006         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
8007         const struct rte_flow_action_count *count = NULL;
8008         const struct rte_flow_action_age *age = NULL;
8009         union flow_dv_attr flow_attr = { .attr = 0 };
8010         uint32_t tag_be;
8011         union mlx5_flow_tbl_key tbl_key;
8012         uint32_t modify_action_position = UINT32_MAX;
8013         void *match_mask = matcher.mask.buf;
8014         void *match_value = dev_flow->dv.value.buf;
8015         uint8_t next_protocol = 0xff;
8016         struct rte_vlan_hdr vlan = { 0 };
8017         uint32_t table;
8018         int ret = 0;
8019
8020         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
8021                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
8022         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
8023                                        !!priv->fdb_def_rule, &table, error);
8024         if (ret)
8025                 return ret;
8026         dev_flow->dv.group = table;
8027         if (attr->transfer)
8028                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
8029         if (priority == MLX5_FLOW_PRIO_RSVD)
8030                 priority = dev_conf->flow_prio - 1;
8031         /* number of actions must be set to 0 in case of dirty stack. */
8032         mhdr_res->actions_num = 0;
8033         for (; !actions_end ; actions++) {
8034                 const struct rte_flow_action_queue *queue;
8035                 const struct rte_flow_action_rss *rss;
8036                 const struct rte_flow_action *action = actions;
8037                 const uint8_t *rss_key;
8038                 const struct rte_flow_action_jump *jump_data;
8039                 const struct rte_flow_action_meter *mtr;
8040                 struct mlx5_flow_tbl_resource *tbl;
8041                 uint32_t port_id = 0;
8042                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
8043                 int action_type = actions->type;
8044                 const struct rte_flow_action *found_action = NULL;
8045                 struct mlx5_flow_meter *fm = NULL;
8046
8047                 if (!mlx5_flow_os_action_supported(action_type))
8048                         return rte_flow_error_set(error, ENOTSUP,
8049                                                   RTE_FLOW_ERROR_TYPE_ACTION,
8050                                                   actions,
8051                                                   "action not supported");
8052                 switch (action_type) {
8053                 case RTE_FLOW_ACTION_TYPE_VOID:
8054                         break;
8055                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
8056                         if (flow_dv_translate_action_port_id(dev, action,
8057                                                              &port_id, error))
8058                                 return -rte_errno;
8059                         port_id_resource.port_id = port_id;
8060                         MLX5_ASSERT(!handle->rix_port_id_action);
8061                         if (flow_dv_port_id_action_resource_register
8062                             (dev, &port_id_resource, dev_flow, error))
8063                                 return -rte_errno;
8064                         dev_flow->dv.actions[actions_n++] =
8065                                         dev_flow->dv.port_id_action->action;
8066                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
8067                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
8068                         break;
8069                 case RTE_FLOW_ACTION_TYPE_FLAG:
8070                         action_flags |= MLX5_FLOW_ACTION_FLAG;
8071                         dev_flow->handle->mark = 1;
8072                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
8073                                 struct rte_flow_action_mark mark = {
8074                                         .id = MLX5_FLOW_MARK_DEFAULT,
8075                                 };
8076
8077                                 if (flow_dv_convert_action_mark(dev, &mark,
8078                                                                 mhdr_res,
8079                                                                 error))
8080                                         return -rte_errno;
8081                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
8082                                 break;
8083                         }
8084                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
8085                         /*
8086                          * Only one FLAG or MARK is supported per device flow
8087                          * right now. So the pointer to the tag resource must be
8088                          * zero before the register process.
8089                          */
8090                         MLX5_ASSERT(!handle->dvh.rix_tag);
8091                         if (flow_dv_tag_resource_register(dev, tag_be,
8092                                                           dev_flow, error))
8093                                 return -rte_errno;
8094                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8095                         dev_flow->dv.actions[actions_n++] =
8096                                         dev_flow->dv.tag_resource->action;
8097                         break;
8098                 case RTE_FLOW_ACTION_TYPE_MARK:
8099                         action_flags |= MLX5_FLOW_ACTION_MARK;
8100                         dev_flow->handle->mark = 1;
8101                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
8102                                 const struct rte_flow_action_mark *mark =
8103                                         (const struct rte_flow_action_mark *)
8104                                                 actions->conf;
8105
8106                                 if (flow_dv_convert_action_mark(dev, mark,
8107                                                                 mhdr_res,
8108                                                                 error))
8109                                         return -rte_errno;
8110                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
8111                                 break;
8112                         }
8113                         /* Fall-through */
8114                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
8115                         /* Legacy (non-extensive) MARK action. */
8116                         tag_be = mlx5_flow_mark_set
8117                               (((const struct rte_flow_action_mark *)
8118                                (actions->conf))->id);
8119                         MLX5_ASSERT(!handle->dvh.rix_tag);
8120                         if (flow_dv_tag_resource_register(dev, tag_be,
8121                                                           dev_flow, error))
8122                                 return -rte_errno;
8123                         MLX5_ASSERT(dev_flow->dv.tag_resource);
8124                         dev_flow->dv.actions[actions_n++] =
8125                                         dev_flow->dv.tag_resource->action;
8126                         break;
8127                 case RTE_FLOW_ACTION_TYPE_SET_META:
8128                         if (flow_dv_convert_action_set_meta
8129                                 (dev, mhdr_res, attr,
8130                                  (const struct rte_flow_action_set_meta *)
8131                                   actions->conf, error))
8132                                 return -rte_errno;
8133                         action_flags |= MLX5_FLOW_ACTION_SET_META;
8134                         break;
8135                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
8136                         if (flow_dv_convert_action_set_tag
8137                                 (dev, mhdr_res,
8138                                  (const struct rte_flow_action_set_tag *)
8139                                   actions->conf, error))
8140                                 return -rte_errno;
8141                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8142                         break;
8143                 case RTE_FLOW_ACTION_TYPE_DROP:
8144                         action_flags |= MLX5_FLOW_ACTION_DROP;
8145                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
8146                         break;
8147                 case RTE_FLOW_ACTION_TYPE_QUEUE:
8148                         queue = actions->conf;
8149                         rss_desc->queue_num = 1;
8150                         rss_desc->queue[0] = queue->index;
8151                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
8152                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
8153                         break;
8154                 case RTE_FLOW_ACTION_TYPE_RSS:
8155                         rss = actions->conf;
8156                         memcpy(rss_desc->queue, rss->queue,
8157                                rss->queue_num * sizeof(uint16_t));
8158                         rss_desc->queue_num = rss->queue_num;
8159                         /* NULL RSS key indicates default RSS key. */
8160                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
8161                         memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
8162                         /*
8163                          * rss->level and rss.types should be set in advance
8164                          * when expanding items for RSS.
8165                          */
8166                         action_flags |= MLX5_FLOW_ACTION_RSS;
8167                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
8168                         break;
8169                 case RTE_FLOW_ACTION_TYPE_AGE:
8170                 case RTE_FLOW_ACTION_TYPE_COUNT:
8171                         if (!dev_conf->devx) {
8172                                 return rte_flow_error_set
8173                                               (error, ENOTSUP,
8174                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8175                                                NULL,
8176                                                "count action not supported");
8177                         }
8178                         /* Save information first, will apply later. */
8179                         if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
8180                                 count = action->conf;
8181                         else
8182                                 age = action->conf;
8183                         action_flags |= MLX5_FLOW_ACTION_COUNT;
8184                         break;
8185                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
8186                         dev_flow->dv.actions[actions_n++] =
8187                                                 priv->sh->pop_vlan_action;
8188                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
8189                         break;
8190                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
8191                         if (!(action_flags &
8192                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
8193                                 flow_dev_get_vlan_info_from_items(items, &vlan);
8194                         vlan.eth_proto = rte_be_to_cpu_16
8195                              ((((const struct rte_flow_action_of_push_vlan *)
8196                                                    actions->conf)->ethertype));
8197                         found_action = mlx5_flow_find_action
8198                                         (actions + 1,
8199                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
8200                         if (found_action)
8201                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
8202                         found_action = mlx5_flow_find_action
8203                                         (actions + 1,
8204                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
8205                         if (found_action)
8206                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
8207                         if (flow_dv_create_action_push_vlan
8208                                             (dev, attr, &vlan, dev_flow, error))
8209                                 return -rte_errno;
8210                         dev_flow->dv.actions[actions_n++] =
8211                                         dev_flow->dv.push_vlan_res->action;
8212                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
8213                         break;
8214                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
8215                         /* of_vlan_push action handled this action */
8216                         MLX5_ASSERT(action_flags &
8217                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
8218                         break;
8219                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
8220                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
8221                                 break;
8222                         flow_dev_get_vlan_info_from_items(items, &vlan);
8223                         mlx5_update_vlan_vid_pcp(actions, &vlan);
8224                         /* If no VLAN push - this is a modify header action */
8225                         if (flow_dv_convert_action_modify_vlan_vid
8226                                                 (mhdr_res, actions, error))
8227                                 return -rte_errno;
8228                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
8229                         break;
8230                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
8231                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
8232                         if (flow_dv_create_action_l2_encap(dev, actions,
8233                                                            dev_flow,
8234                                                            attr->transfer,
8235                                                            error))
8236                                 return -rte_errno;
8237                         dev_flow->dv.actions[actions_n++] =
8238                                         dev_flow->dv.encap_decap->action;
8239                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
8240                         break;
8241                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
8242                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
8243                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
8244                                                            attr->transfer,
8245                                                            error))
8246                                 return -rte_errno;
8247                         dev_flow->dv.actions[actions_n++] =
8248                                         dev_flow->dv.encap_decap->action;
8249                         action_flags |= MLX5_FLOW_ACTION_DECAP;
8250                         break;
8251                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
8252                         /* Handle encap with preceding decap. */
8253                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
8254                                 if (flow_dv_create_action_raw_encap
8255                                         (dev, actions, dev_flow, attr, error))
8256                                         return -rte_errno;
8257                                 dev_flow->dv.actions[actions_n++] =
8258                                         dev_flow->dv.encap_decap->action;
8259                         } else {
8260                                 /* Handle encap without preceding decap. */
8261                                 if (flow_dv_create_action_l2_encap
8262                                     (dev, actions, dev_flow, attr->transfer,
8263                                      error))
8264                                         return -rte_errno;
8265                                 dev_flow->dv.actions[actions_n++] =
8266                                         dev_flow->dv.encap_decap->action;
8267                         }
8268                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
8269                         break;
8270                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
8271                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
8272                                 ;
8273                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
8274                                 if (flow_dv_create_action_l2_decap
8275                                     (dev, dev_flow, attr->transfer, error))
8276                                         return -rte_errno;
8277                                 dev_flow->dv.actions[actions_n++] =
8278                                         dev_flow->dv.encap_decap->action;
8279                         }
8280                         /* If decap is followed by encap, handle it at encap. */
8281                         action_flags |= MLX5_FLOW_ACTION_DECAP;
8282                         break;
8283                 case RTE_FLOW_ACTION_TYPE_JUMP:
8284                         jump_data = action->conf;
8285                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
8286                                                        jump_data->group,
8287                                                        !!priv->fdb_def_rule,
8288                                                        &table, error);
8289                         if (ret)
8290                                 return ret;
8291                         tbl = flow_dv_tbl_resource_get(dev, table,
8292                                                        attr->egress,
8293                                                        attr->transfer, error);
8294                         if (!tbl)
8295                                 return rte_flow_error_set
8296                                                 (error, errno,
8297                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8298                                                  NULL,
8299                                                  "cannot create jump action.");
8300                         if (flow_dv_jump_tbl_resource_register
8301                             (dev, tbl, dev_flow, error)) {
8302                                 flow_dv_tbl_resource_release(dev, tbl);
8303                                 return rte_flow_error_set
8304                                                 (error, errno,
8305                                                  RTE_FLOW_ERROR_TYPE_ACTION,
8306                                                  NULL,
8307                                                  "cannot create jump action.");
8308                         }
8309                         dev_flow->dv.actions[actions_n++] =
8310                                         dev_flow->dv.jump->action;
8311                         action_flags |= MLX5_FLOW_ACTION_JUMP;
8312                         dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
8313                         break;
8314                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
8315                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
8316                         if (flow_dv_convert_action_modify_mac
8317                                         (mhdr_res, actions, error))
8318                                 return -rte_errno;
8319                         action_flags |= actions->type ==
8320                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
8321                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
8322                                         MLX5_FLOW_ACTION_SET_MAC_DST;
8323                         break;
8324                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
8325                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
8326                         if (flow_dv_convert_action_modify_ipv4
8327                                         (mhdr_res, actions, error))
8328                                 return -rte_errno;
8329                         action_flags |= actions->type ==
8330                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
8331                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
8332                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
8333                         break;
8334                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
8335                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
8336                         if (flow_dv_convert_action_modify_ipv6
8337                                         (mhdr_res, actions, error))
8338                                 return -rte_errno;
8339                         action_flags |= actions->type ==
8340                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
8341                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
8342                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
8343                         break;
8344                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
8345                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
8346                         if (flow_dv_convert_action_modify_tp
8347                                         (mhdr_res, actions, items,
8348                                          &flow_attr, dev_flow, !!(action_flags &
8349                                          MLX5_FLOW_ACTION_DECAP), error))
8350                                 return -rte_errno;
8351                         action_flags |= actions->type ==
8352                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
8353                                         MLX5_FLOW_ACTION_SET_TP_SRC :
8354                                         MLX5_FLOW_ACTION_SET_TP_DST;
8355                         break;
8356                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
8357                         if (flow_dv_convert_action_modify_dec_ttl
8358                                         (mhdr_res, items, &flow_attr, dev_flow,
8359                                          !!(action_flags &
8360                                          MLX5_FLOW_ACTION_DECAP), error))
8361                                 return -rte_errno;
8362                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
8363                         break;
8364                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
8365                         if (flow_dv_convert_action_modify_ttl
8366                                         (mhdr_res, actions, items, &flow_attr,
8367                                          dev_flow, !!(action_flags &
8368                                          MLX5_FLOW_ACTION_DECAP), error))
8369                                 return -rte_errno;
8370                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
8371                         break;
8372                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
8373                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
8374                         if (flow_dv_convert_action_modify_tcp_seq
8375                                         (mhdr_res, actions, error))
8376                                 return -rte_errno;
8377                         action_flags |= actions->type ==
8378                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
8379                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
8380                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
8381                         break;
8382
8383                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
8384                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
8385                         if (flow_dv_convert_action_modify_tcp_ack
8386                                         (mhdr_res, actions, error))
8387                                 return -rte_errno;
8388                         action_flags |= actions->type ==
8389                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
8390                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
8391                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
8392                         break;
8393                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
8394                         if (flow_dv_convert_action_set_reg
8395                                         (mhdr_res, actions, error))
8396                                 return -rte_errno;
8397                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8398                         break;
8399                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
8400                         if (flow_dv_convert_action_copy_mreg
8401                                         (dev, mhdr_res, actions, error))
8402                                 return -rte_errno;
8403                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
8404                         break;
8405                 case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
8406                         action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
8407                         dev_flow->handle->fate_action =
8408                                         MLX5_FLOW_FATE_DEFAULT_MISS;
8409                         break;
8410                 case RTE_FLOW_ACTION_TYPE_METER:
8411                         mtr = actions->conf;
8412                         if (!flow->meter) {
8413                                 fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
8414                                                             attr, error);
8415                                 if (!fm)
8416                                         return rte_flow_error_set(error,
8417                                                 rte_errno,
8418                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8419                                                 NULL,
8420                                                 "meter not found "
8421                                                 "or invalid parameters");
8422                                 flow->meter = fm->idx;
8423                         }
8424                         /* Set the meter action. */
8425                         if (!fm) {
8426                                 fm = mlx5_ipool_get(priv->sh->ipool
8427                                                 [MLX5_IPOOL_MTR], flow->meter);
8428                                 if (!fm)
8429                                         return rte_flow_error_set(error,
8430                                                 rte_errno,
8431                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8432                                                 NULL,
8433                                                 "meter not found "
8434                                                 "or invalid parameters");
8435                         }
8436                         dev_flow->dv.actions[actions_n++] =
8437                                 fm->mfts->meter_action;
8438                         action_flags |= MLX5_FLOW_ACTION_METER;
8439                         break;
8440                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
8441                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
8442                                                               actions, error))
8443                                 return -rte_errno;
8444                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
8445                         break;
8446                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
8447                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
8448                                                               actions, error))
8449                                 return -rte_errno;
8450                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
8451                         break;
8452                 case RTE_FLOW_ACTION_TYPE_END:
8453                         actions_end = true;
8454                         if (mhdr_res->actions_num) {
8455                                 /* create modify action if needed. */
8456                                 if (flow_dv_modify_hdr_resource_register
8457                                         (dev, mhdr_res, dev_flow, error))
8458                                         return -rte_errno;
8459                                 dev_flow->dv.actions[modify_action_position] =
8460                                         handle->dvh.modify_hdr->action;
8461                         }
8462                         if (action_flags & MLX5_FLOW_ACTION_COUNT) {
8463                                 flow->counter =
8464                                         flow_dv_translate_create_counter(dev,
8465                                                 dev_flow, count, age);
8466
8467                                 if (!flow->counter)
8468                                         return rte_flow_error_set
8469                                                 (error, rte_errno,
8470                                                 RTE_FLOW_ERROR_TYPE_ACTION,
8471                                                 NULL,
8472                                                 "cannot create counter"
8473                                                 " object.");
8474                                 dev_flow->dv.actions[actions_n++] =
8475                                           (flow_dv_counter_get_by_idx(dev,
8476                                           flow->counter, NULL))->action;
8477                         }
8478                         break;
8479                 default:
8480                         break;
8481                 }
8482                 if (mhdr_res->actions_num &&
8483                     modify_action_position == UINT32_MAX)
8484                         modify_action_position = actions_n++;
8485         }
8486         dev_flow->dv.actions_n = actions_n;
8487         dev_flow->act_flags = action_flags;
8488         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
8489                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
8490                 int item_type = items->type;
8491
8492                 if (!mlx5_flow_os_item_supported(item_type))
8493                         return rte_flow_error_set(error, ENOTSUP,
8494                                                   RTE_FLOW_ERROR_TYPE_ITEM,
8495                                                   NULL, "item not supported");
8496                 switch (item_type) {
8497                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
8498                         flow_dv_translate_item_port_id(dev, match_mask,
8499                                                        match_value, items);
8500                         last_item = MLX5_FLOW_ITEM_PORT_ID;
8501                         break;
8502                 case RTE_FLOW_ITEM_TYPE_ETH:
8503                         flow_dv_translate_item_eth(match_mask, match_value,
8504                                                    items, tunnel,
8505                                                    dev_flow->dv.group);
8506                         matcher.priority = action_flags &
8507                                         MLX5_FLOW_ACTION_DEFAULT_MISS &&
8508                                         !dev_flow->external ?
8509                                         MLX5_PRIORITY_MAP_L3 :
8510                                         MLX5_PRIORITY_MAP_L2;
8511                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
8512                                              MLX5_FLOW_LAYER_OUTER_L2;
8513                         break;
8514                 case RTE_FLOW_ITEM_TYPE_VLAN:
8515                         flow_dv_translate_item_vlan(dev_flow,
8516                                                     match_mask, match_value,
8517                                                     items, tunnel,
8518                                                     dev_flow->dv.group);
8519                         matcher.priority = MLX5_PRIORITY_MAP_L2;
8520                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
8521                                               MLX5_FLOW_LAYER_INNER_VLAN) :
8522                                              (MLX5_FLOW_LAYER_OUTER_L2 |
8523                                               MLX5_FLOW_LAYER_OUTER_VLAN);
8524                         break;
8525                 case RTE_FLOW_ITEM_TYPE_IPV4:
8526                         mlx5_flow_tunnel_ip_check(items, next_protocol,
8527                                                   &item_flags, &tunnel);
8528                         flow_dv_translate_item_ipv4(match_mask, match_value,
8529                                                     items, item_flags, tunnel,
8530                                                     dev_flow->dv.group);
8531                         matcher.priority = MLX5_PRIORITY_MAP_L3;
8532                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
8533                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
8534                         if (items->mask != NULL &&
8535                             ((const struct rte_flow_item_ipv4 *)
8536                              items->mask)->hdr.next_proto_id) {
8537                                 next_protocol =
8538                                         ((const struct rte_flow_item_ipv4 *)
8539                                          (items->spec))->hdr.next_proto_id;
8540                                 next_protocol &=
8541                                         ((const struct rte_flow_item_ipv4 *)
8542                                          (items->mask))->hdr.next_proto_id;
8543                         } else {
8544                                 /* Reset for inner layer. */
8545                                 next_protocol = 0xff;
8546                         }
8547                         break;
8548                 case RTE_FLOW_ITEM_TYPE_IPV6:
8549                         mlx5_flow_tunnel_ip_check(items, next_protocol,
8550                                                   &item_flags, &tunnel);
8551                         flow_dv_translate_item_ipv6(match_mask, match_value,
8552                                                     items, item_flags, tunnel,
8553                                                     dev_flow->dv.group);
8554                         matcher.priority = MLX5_PRIORITY_MAP_L3;
8555                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
8556                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
8557                         if (items->mask != NULL &&
8558                             ((const struct rte_flow_item_ipv6 *)
8559                              items->mask)->hdr.proto) {
8560                                 next_protocol =
8561                                         ((const struct rte_flow_item_ipv6 *)
8562                                          items->spec)->hdr.proto;
8563                                 next_protocol &=
8564                                         ((const struct rte_flow_item_ipv6 *)
8565                                          items->mask)->hdr.proto;
8566                         } else {
8567                                 /* Reset for inner layer. */
8568                                 next_protocol = 0xff;
8569                         }
8570                         break;
8571                 case RTE_FLOW_ITEM_TYPE_TCP:
8572                         flow_dv_translate_item_tcp(match_mask, match_value,
8573                                                    items, tunnel);
8574                         matcher.priority = MLX5_PRIORITY_MAP_L4;
8575                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
8576                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
8577                         break;
8578                 case RTE_FLOW_ITEM_TYPE_UDP:
8579                         flow_dv_translate_item_udp(match_mask, match_value,
8580                                                    items, tunnel);
8581                         matcher.priority = MLX5_PRIORITY_MAP_L4;
8582                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
8583                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
8584                         break;
8585                 case RTE_FLOW_ITEM_TYPE_GRE:
8586                         flow_dv_translate_item_gre(match_mask, match_value,
8587                                                    items, tunnel);
8588                         matcher.priority = rss_desc->level >= 2 ?
8589                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8590                         last_item = MLX5_FLOW_LAYER_GRE;
8591                         break;
8592                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
8593                         flow_dv_translate_item_gre_key(match_mask,
8594                                                        match_value, items);
8595                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
8596                         break;
8597                 case RTE_FLOW_ITEM_TYPE_NVGRE:
8598                         flow_dv_translate_item_nvgre(match_mask, match_value,
8599                                                      items, tunnel);
8600                         matcher.priority = rss_desc->level >= 2 ?
8601                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8602                         last_item = MLX5_FLOW_LAYER_GRE;
8603                         break;
8604                 case RTE_FLOW_ITEM_TYPE_VXLAN:
8605                         flow_dv_translate_item_vxlan(match_mask, match_value,
8606                                                      items, tunnel);
8607                         matcher.priority = rss_desc->level >= 2 ?
8608                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8609                         last_item = MLX5_FLOW_LAYER_VXLAN;
8610                         break;
8611                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
8612                         flow_dv_translate_item_vxlan_gpe(match_mask,
8613                                                          match_value, items,
8614                                                          tunnel);
8615                         matcher.priority = rss_desc->level >= 2 ?
8616                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8617                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
8618                         break;
8619                 case RTE_FLOW_ITEM_TYPE_GENEVE:
8620                         flow_dv_translate_item_geneve(match_mask, match_value,
8621                                                       items, tunnel);
8622                         matcher.priority = rss_desc->level >= 2 ?
8623                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8624                         last_item = MLX5_FLOW_LAYER_GENEVE;
8625                         break;
8626                 case RTE_FLOW_ITEM_TYPE_MPLS:
8627                         flow_dv_translate_item_mpls(match_mask, match_value,
8628                                                     items, last_item, tunnel);
8629                         matcher.priority = rss_desc->level >= 2 ?
8630                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8631                         last_item = MLX5_FLOW_LAYER_MPLS;
8632                         break;
8633                 case RTE_FLOW_ITEM_TYPE_MARK:
8634                         flow_dv_translate_item_mark(dev, match_mask,
8635                                                     match_value, items);
8636                         last_item = MLX5_FLOW_ITEM_MARK;
8637                         break;
8638                 case RTE_FLOW_ITEM_TYPE_META:
8639                         flow_dv_translate_item_meta(dev, match_mask,
8640                                                     match_value, attr, items);
8641                         last_item = MLX5_FLOW_ITEM_METADATA;
8642                         break;
8643                 case RTE_FLOW_ITEM_TYPE_ICMP:
8644                         flow_dv_translate_item_icmp(match_mask, match_value,
8645                                                     items, tunnel);
8646                         last_item = MLX5_FLOW_LAYER_ICMP;
8647                         break;
8648                 case RTE_FLOW_ITEM_TYPE_ICMP6:
8649                         flow_dv_translate_item_icmp6(match_mask, match_value,
8650                                                       items, tunnel);
8651                         last_item = MLX5_FLOW_LAYER_ICMP6;
8652                         break;
8653                 case RTE_FLOW_ITEM_TYPE_TAG:
8654                         flow_dv_translate_item_tag(dev, match_mask,
8655                                                    match_value, items);
8656                         last_item = MLX5_FLOW_ITEM_TAG;
8657                         break;
8658                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
8659                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
8660                                                         match_value, items);
8661                         last_item = MLX5_FLOW_ITEM_TAG;
8662                         break;
8663                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
8664                         flow_dv_translate_item_tx_queue(dev, match_mask,
8665                                                         match_value,
8666                                                         items);
8667                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
8668                         break;
8669                 case RTE_FLOW_ITEM_TYPE_GTP:
8670                         flow_dv_translate_item_gtp(match_mask, match_value,
8671                                                    items, tunnel);
8672                         matcher.priority = rss_desc->level >= 2 ?
8673                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
8674                         last_item = MLX5_FLOW_LAYER_GTP;
8675                         break;
8676                 case RTE_FLOW_ITEM_TYPE_ECPRI:
8677                         if (!mlx5_flex_parser_ecpri_exist(dev)) {
8678                                 /* Create it only the first time to be used. */
8679                                 ret = mlx5_flex_parser_ecpri_alloc(dev);
8680                                 if (ret)
8681                                         return rte_flow_error_set
8682                                                 (error, -ret,
8683                                                 RTE_FLOW_ERROR_TYPE_ITEM,
8684                                                 NULL,
8685                                                 "cannot create eCPRI parser");
8686                         }
8687                         /* Adjust the length matcher and device flow value. */
8688                         matcher.mask.size = MLX5_ST_SZ_BYTES(fte_match_param);
8689                         dev_flow->dv.value.size =
8690                                         MLX5_ST_SZ_BYTES(fte_match_param);
8691                         flow_dv_translate_item_ecpri(dev, match_mask,
8692                                                      match_value, items);
8693                         /* No other protocol should follow eCPRI layer. */
8694                         last_item = MLX5_FLOW_LAYER_ECPRI;
8695                         break;
8696                 default:
8697                         break;
8698                 }
8699                 item_flags |= last_item;
8700         }
8701         /*
8702          * When E-Switch mode is enabled, we have two cases where we need to
8703          * set the source port manually.
8704          * The first one, is in case of Nic steering rule, and the second is
8705          * E-Switch rule where no port_id item was found. In both cases
8706          * the source port is set according the current port in use.
8707          */
8708         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
8709             (priv->representor || priv->master)) {
8710                 if (flow_dv_translate_item_port_id(dev, match_mask,
8711                                                    match_value, NULL))
8712                         return -rte_errno;
8713         }
8714 #ifdef RTE_LIBRTE_MLX5_DEBUG
8715         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
8716                                               dev_flow->dv.value.buf));
8717 #endif
8718         /*
8719          * Layers may be already initialized from prefix flow if this dev_flow
8720          * is the suffix flow.
8721          */
8722         handle->layers |= item_flags;
8723         if (action_flags & MLX5_FLOW_ACTION_RSS)
8724                 flow_dv_hashfields_set(dev_flow, rss_desc);
8725         /* Register matcher. */
8726         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
8727                                     matcher.mask.size);
8728         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
8729                                                      matcher.priority);
8730         /* reserved field no needs to be set to 0 here. */
8731         tbl_key.domain = attr->transfer;
8732         tbl_key.direction = attr->egress;
8733         tbl_key.table_id = dev_flow->dv.group;
8734         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
8735                 return -rte_errno;
8736         return 0;
8737 }
8738
8739 /**
8740  * Apply the flow to the NIC, lock free,
8741  * (mutex should be acquired by caller).
8742  *
8743  * @param[in] dev
8744  *   Pointer to the Ethernet device structure.
8745  * @param[in, out] flow
8746  *   Pointer to flow structure.
8747  * @param[out] error
8748  *   Pointer to error structure.
8749  *
8750  * @return
8751  *   0 on success, a negative errno value otherwise and rte_errno is set.
8752  */
8753 static int
8754 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
8755                 struct rte_flow_error *error)
8756 {
8757         struct mlx5_flow_dv_workspace *dv;
8758         struct mlx5_flow_handle *dh;
8759         struct mlx5_flow_handle_dv *dv_h;
8760         struct mlx5_flow *dev_flow;
8761         struct mlx5_priv *priv = dev->data->dev_private;
8762         uint32_t handle_idx;
8763         int n;
8764         int err;
8765         int idx;
8766
8767         for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
8768                 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
8769                 dv = &dev_flow->dv;
8770                 dh = dev_flow->handle;
8771                 dv_h = &dh->dvh;
8772                 n = dv->actions_n;
8773                 if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
8774                         if (dv->transfer) {
8775                                 dv->actions[n++] = priv->sh->esw_drop_action;
8776                         } else {
8777                                 struct mlx5_hrxq *drop_hrxq;
8778                                 drop_hrxq = mlx5_hrxq_drop_new(dev);
8779                                 if (!drop_hrxq) {
8780                                         rte_flow_error_set
8781                                                 (error, errno,
8782                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8783                                                  NULL,
8784                                                  "cannot get drop hash queue");
8785                                         goto error;
8786                                 }
8787                                 /*
8788                                  * Drop queues will be released by the specify
8789                                  * mlx5_hrxq_drop_release() function. Assign
8790                                  * the special index to hrxq to mark the queue
8791                                  * has been allocated.
8792                                  */
8793                                 dh->rix_hrxq = UINT32_MAX;
8794                                 dv->actions[n++] = drop_hrxq->action;
8795                         }
8796                 } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
8797                         struct mlx5_hrxq *hrxq;
8798                         uint32_t hrxq_idx;
8799                         struct mlx5_flow_rss_desc *rss_desc =
8800                                 &((struct mlx5_flow_rss_desc *)priv->rss_desc)
8801                                 [!!priv->flow_nested_idx];
8802
8803                         MLX5_ASSERT(rss_desc->queue_num);
8804                         hrxq_idx = mlx5_hrxq_get(dev, rss_desc->key,
8805                                                  MLX5_RSS_HASH_KEY_LEN,
8806                                                  dev_flow->hash_fields,
8807                                                  rss_desc->queue,
8808                                                  rss_desc->queue_num);
8809                         if (!hrxq_idx) {
8810                                 hrxq_idx = mlx5_hrxq_new
8811                                                 (dev, rss_desc->key,
8812                                                 MLX5_RSS_HASH_KEY_LEN,
8813                                                 dev_flow->hash_fields,
8814                                                 rss_desc->queue,
8815                                                 rss_desc->queue_num,
8816                                                 !!(dh->layers &
8817                                                 MLX5_FLOW_LAYER_TUNNEL));
8818                         }
8819                         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
8820                                               hrxq_idx);
8821                         if (!hrxq) {
8822                                 rte_flow_error_set
8823                                         (error, rte_errno,
8824                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8825                                          "cannot get hash queue");
8826                                 goto error;
8827                         }
8828                         dh->rix_hrxq = hrxq_idx;
8829                         dv->actions[n++] = hrxq->action;
8830                 } else if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS) {
8831                         if (flow_dv_default_miss_resource_register
8832                                         (dev, error)) {
8833                                 rte_flow_error_set
8834                                         (error, rte_errno,
8835                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8836                                          "cannot create default miss resource");
8837                                 goto error_default_miss;
8838                         }
8839                         dh->rix_default_fate =  MLX5_FLOW_FATE_DEFAULT_MISS;
8840                         dv->actions[n++] = priv->sh->default_miss.action;
8841                 }
8842                 err = mlx5_flow_os_create_flow(dv_h->matcher->matcher_object,
8843                                                (void *)&dv->value, n,
8844                                                dv->actions, &dh->drv_flow);
8845                 if (err) {
8846                         rte_flow_error_set(error, errno,
8847                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8848                                            NULL,
8849                                            "hardware refuses to create flow");
8850                         goto error;
8851                 }
8852                 if (priv->vmwa_context &&
8853                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
8854                         /*
8855                          * The rule contains the VLAN pattern.
8856                          * For VF we are going to create VLAN
8857                          * interface to make hypervisor set correct
8858                          * e-Switch vport context.
8859                          */
8860                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
8861                 }
8862         }
8863         return 0;
8864 error:
8865         if (dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
8866                 flow_dv_default_miss_resource_release(dev);
8867 error_default_miss:
8868         err = rte_errno; /* Save rte_errno before cleanup. */
8869         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
8870                        handle_idx, dh, next) {
8871                 /* hrxq is union, don't clear it if the flag is not set. */
8872                 if (dh->rix_hrxq) {
8873                         if (dh->fate_action == MLX5_FLOW_FATE_DROP) {
8874                                 mlx5_hrxq_drop_release(dev);
8875                                 dh->rix_hrxq = 0;
8876                         } else if (dh->fate_action == MLX5_FLOW_FATE_QUEUE) {
8877                                 mlx5_hrxq_release(dev, dh->rix_hrxq);
8878                                 dh->rix_hrxq = 0;
8879                         }
8880                 }
8881                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
8882                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
8883         }
8884         rte_errno = err; /* Restore rte_errno. */
8885         return -rte_errno;
8886 }
8887
8888 /**
8889  * Release the flow matcher.
8890  *
8891  * @param dev
8892  *   Pointer to Ethernet device.
8893  * @param handle
8894  *   Pointer to mlx5_flow_handle.
8895  *
8896  * @return
8897  *   1 while a reference on it exists, 0 when freed.
8898  */
8899 static int
8900 flow_dv_matcher_release(struct rte_eth_dev *dev,
8901                         struct mlx5_flow_handle *handle)
8902 {
8903         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
8904
8905         MLX5_ASSERT(matcher->matcher_object);
8906         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
8907                 dev->data->port_id, (void *)matcher,
8908                 rte_atomic32_read(&matcher->refcnt));
8909         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
8910                 claim_zero(mlx5_flow_os_destroy_flow_matcher
8911                            (matcher->matcher_object));
8912                 LIST_REMOVE(matcher, next);
8913                 /* table ref-- in release interface. */
8914                 flow_dv_tbl_resource_release(dev, matcher->tbl);
8915                 mlx5_free(matcher);
8916                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
8917                         dev->data->port_id, (void *)matcher);
8918                 return 0;
8919         }
8920         return 1;
8921 }
8922
8923 /**
8924  * Release an encap/decap resource.
8925  *
8926  * @param dev
8927  *   Pointer to Ethernet device.
8928  * @param handle
8929  *   Pointer to mlx5_flow_handle.
8930  *
8931  * @return
8932  *   1 while a reference on it exists, 0 when freed.
8933  */
8934 static int
8935 flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev,
8936                                      struct mlx5_flow_handle *handle)
8937 {
8938         struct mlx5_priv *priv = dev->data->dev_private;
8939         uint32_t idx = handle->dvh.rix_encap_decap;
8940         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
8941
8942         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
8943                          idx);
8944         if (!cache_resource)
8945                 return 0;
8946         MLX5_ASSERT(cache_resource->action);
8947         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
8948                 (void *)cache_resource,
8949                 rte_atomic32_read(&cache_resource->refcnt));
8950         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8951                 claim_zero(mlx5_flow_os_destroy_flow_action
8952                                                 (cache_resource->action));
8953                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP],
8954                              &priv->sh->encaps_decaps, idx,
8955                              cache_resource, next);
8956                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_DECAP_ENCAP], idx);
8957                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
8958                         (void *)cache_resource);
8959                 return 0;
8960         }
8961         return 1;
8962 }
8963
8964 /**
8965  * Release an jump to table action resource.
8966  *
8967  * @param dev
8968  *   Pointer to Ethernet device.
8969  * @param handle
8970  *   Pointer to mlx5_flow_handle.
8971  *
8972  * @return
8973  *   1 while a reference on it exists, 0 when freed.
8974  */
8975 static int
8976 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
8977                                   struct mlx5_flow_handle *handle)
8978 {
8979         struct mlx5_priv *priv = dev->data->dev_private;
8980         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
8981         struct mlx5_flow_tbl_data_entry *tbl_data;
8982
8983         tbl_data = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_JUMP],
8984                              handle->rix_jump);
8985         if (!tbl_data)
8986                 return 0;
8987         cache_resource = &tbl_data->jump;
8988         MLX5_ASSERT(cache_resource->action);
8989         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
8990                 (void *)cache_resource,
8991                 rte_atomic32_read(&cache_resource->refcnt));
8992         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8993                 claim_zero(mlx5_flow_os_destroy_flow_action
8994                                                 (cache_resource->action));
8995                 /* jump action memory free is inside the table release. */
8996                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
8997                 DRV_LOG(DEBUG, "jump table resource %p: removed",
8998                         (void *)cache_resource);
8999                 return 0;
9000         }
9001         return 1;
9002 }
9003
9004 /**
9005  * Release a default miss resource.
9006  *
9007  * @param dev
9008  *   Pointer to Ethernet device.
9009  * @return
9010  *   1 while a reference on it exists, 0 when freed.
9011  */
9012 static int
9013 flow_dv_default_miss_resource_release(struct rte_eth_dev *dev)
9014 {
9015         struct mlx5_priv *priv = dev->data->dev_private;
9016         struct mlx5_dev_ctx_shared *sh = priv->sh;
9017         struct mlx5_flow_default_miss_resource *cache_resource =
9018                         &sh->default_miss;
9019
9020         MLX5_ASSERT(cache_resource->action);
9021         DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--",
9022                         (void *)cache_resource->action,
9023                         rte_atomic32_read(&cache_resource->refcnt));
9024         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9025                 claim_zero(mlx5_glue->destroy_flow_action
9026                                 (cache_resource->action));
9027                 DRV_LOG(DEBUG, "default miss resource %p: removed",
9028                                 (void *)cache_resource->action);
9029                 return 0;
9030         }
9031         return 1;
9032 }
9033
9034 /**
9035  * Release a modify-header resource.
9036  *
9037  * @param handle
9038  *   Pointer to mlx5_flow_handle.
9039  *
9040  * @return
9041  *   1 while a reference on it exists, 0 when freed.
9042  */
9043 static int
9044 flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
9045 {
9046         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
9047                                                         handle->dvh.modify_hdr;
9048
9049         MLX5_ASSERT(cache_resource->action);
9050         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
9051                 (void *)cache_resource,
9052                 rte_atomic32_read(&cache_resource->refcnt));
9053         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9054                 claim_zero(mlx5_flow_os_destroy_flow_action
9055                                                 (cache_resource->action));
9056                 LIST_REMOVE(cache_resource, next);
9057                 mlx5_free(cache_resource);
9058                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
9059                         (void *)cache_resource);
9060                 return 0;
9061         }
9062         return 1;
9063 }
9064
9065 /**
9066  * Release port ID action resource.
9067  *
9068  * @param dev
9069  *   Pointer to Ethernet device.
9070  * @param handle
9071  *   Pointer to mlx5_flow_handle.
9072  *
9073  * @return
9074  *   1 while a reference on it exists, 0 when freed.
9075  */
9076 static int
9077 flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev,
9078                                         struct mlx5_flow_handle *handle)
9079 {
9080         struct mlx5_priv *priv = dev->data->dev_private;
9081         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
9082         uint32_t idx = handle->rix_port_id_action;
9083
9084         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
9085                                         idx);
9086         if (!cache_resource)
9087                 return 0;
9088         MLX5_ASSERT(cache_resource->action);
9089         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
9090                 (void *)cache_resource,
9091                 rte_atomic32_read(&cache_resource->refcnt));
9092         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9093                 claim_zero(mlx5_flow_os_destroy_flow_action
9094                                                 (cache_resource->action));
9095                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID],
9096                              &priv->sh->port_id_action_list, idx,
9097                              cache_resource, next);
9098                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PORT_ID], idx);
9099                 DRV_LOG(DEBUG, "port id action resource %p: removed",
9100                         (void *)cache_resource);
9101                 return 0;
9102         }
9103         return 1;
9104 }
9105
9106 /**
9107  * Release push vlan action resource.
9108  *
9109  * @param dev
9110  *   Pointer to Ethernet device.
9111  * @param handle
9112  *   Pointer to mlx5_flow_handle.
9113  *
9114  * @return
9115  *   1 while a reference on it exists, 0 when freed.
9116  */
9117 static int
9118 flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev,
9119                                           struct mlx5_flow_handle *handle)
9120 {
9121         struct mlx5_priv *priv = dev->data->dev_private;
9122         uint32_t idx = handle->dvh.rix_push_vlan;
9123         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
9124
9125         cache_resource = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
9126                                         idx);
9127         if (!cache_resource)
9128                 return 0;
9129         MLX5_ASSERT(cache_resource->action);
9130         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
9131                 (void *)cache_resource,
9132                 rte_atomic32_read(&cache_resource->refcnt));
9133         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
9134                 claim_zero(mlx5_flow_os_destroy_flow_action
9135                                                 (cache_resource->action));
9136                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN],
9137                              &priv->sh->push_vlan_action_list, idx,
9138                              cache_resource, next);
9139                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], idx);
9140                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
9141                         (void *)cache_resource);
9142                 return 0;
9143         }
9144         return 1;
9145 }
9146
9147 /**
9148  * Release the fate resource.
9149  *
9150  * @param dev
9151  *   Pointer to Ethernet device.
9152  * @param handle
9153  *   Pointer to mlx5_flow_handle.
9154  */
9155 static void
9156 flow_dv_fate_resource_release(struct rte_eth_dev *dev,
9157                                struct mlx5_flow_handle *handle)
9158 {
9159         if (!handle->rix_fate)
9160                 return;
9161         switch (handle->fate_action) {
9162         case MLX5_FLOW_FATE_DROP:
9163                 mlx5_hrxq_drop_release(dev);
9164                 break;
9165         case MLX5_FLOW_FATE_QUEUE:
9166                 mlx5_hrxq_release(dev, handle->rix_hrxq);
9167                 break;
9168         case MLX5_FLOW_FATE_JUMP:
9169                 flow_dv_jump_tbl_resource_release(dev, handle);
9170                 break;
9171         case MLX5_FLOW_FATE_PORT_ID:
9172                 flow_dv_port_id_action_resource_release(dev, handle);
9173                 break;
9174         case MLX5_FLOW_FATE_DEFAULT_MISS:
9175                 flow_dv_default_miss_resource_release(dev);
9176                 break;
9177         default:
9178                 DRV_LOG(DEBUG, "Incorrect fate action:%d", handle->fate_action);
9179                 break;
9180         }
9181         handle->rix_fate = 0;
9182 }
9183
9184 /**
9185  * Remove the flow from the NIC but keeps it in memory.
9186  * Lock free, (mutex should be acquired by caller).
9187  *
9188  * @param[in] dev
9189  *   Pointer to Ethernet device.
9190  * @param[in, out] flow
9191  *   Pointer to flow structure.
9192  */
9193 static void
9194 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
9195 {
9196         struct mlx5_flow_handle *dh;
9197         uint32_t handle_idx;
9198         struct mlx5_priv *priv = dev->data->dev_private;
9199
9200         if (!flow)
9201                 return;
9202         handle_idx = flow->dev_handles;
9203         while (handle_idx) {
9204                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9205                                     handle_idx);
9206                 if (!dh)
9207                         return;
9208                 if (dh->drv_flow) {
9209                         claim_zero(mlx5_flow_os_destroy_flow(dh->drv_flow));
9210                         dh->drv_flow = NULL;
9211                 }
9212                 if (dh->fate_action == MLX5_FLOW_FATE_DROP ||
9213                     dh->fate_action == MLX5_FLOW_FATE_QUEUE ||
9214                     dh->fate_action == MLX5_FLOW_FATE_DEFAULT_MISS)
9215                         flow_dv_fate_resource_release(dev, dh);
9216                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
9217                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
9218                 handle_idx = dh->next.next;
9219         }
9220 }
9221
9222 /**
9223  * Remove the flow from the NIC and the memory.
9224  * Lock free, (mutex should be acquired by caller).
9225  *
9226  * @param[in] dev
9227  *   Pointer to the Ethernet device structure.
9228  * @param[in, out] flow
9229  *   Pointer to flow structure.
9230  */
9231 static void
9232 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
9233 {
9234         struct mlx5_flow_handle *dev_handle;
9235         struct mlx5_priv *priv = dev->data->dev_private;
9236
9237         if (!flow)
9238                 return;
9239         __flow_dv_remove(dev, flow);
9240         if (flow->counter) {
9241                 flow_dv_counter_release(dev, flow->counter);
9242                 flow->counter = 0;
9243         }
9244         if (flow->meter) {
9245                 struct mlx5_flow_meter *fm;
9246
9247                 fm = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MTR],
9248                                     flow->meter);
9249                 if (fm)
9250                         mlx5_flow_meter_detach(fm);
9251                 flow->meter = 0;
9252         }
9253         while (flow->dev_handles) {
9254                 uint32_t tmp_idx = flow->dev_handles;
9255
9256                 dev_handle = mlx5_ipool_get(priv->sh->ipool
9257                                             [MLX5_IPOOL_MLX5_FLOW], tmp_idx);
9258                 if (!dev_handle)
9259                         return;
9260                 flow->dev_handles = dev_handle->next.next;
9261                 if (dev_handle->dvh.matcher)
9262                         flow_dv_matcher_release(dev, dev_handle);
9263                 if (dev_handle->dvh.rix_encap_decap)
9264                         flow_dv_encap_decap_resource_release(dev, dev_handle);
9265                 if (dev_handle->dvh.modify_hdr)
9266                         flow_dv_modify_hdr_resource_release(dev_handle);
9267                 if (dev_handle->dvh.rix_push_vlan)
9268                         flow_dv_push_vlan_action_resource_release(dev,
9269                                                                   dev_handle);
9270                 if (dev_handle->dvh.rix_tag)
9271                         flow_dv_tag_release(dev,
9272                                             dev_handle->dvh.rix_tag);
9273                 flow_dv_fate_resource_release(dev, dev_handle);
9274                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
9275                            tmp_idx);
9276         }
9277 }
9278
9279 /**
9280  * Query a dv flow  rule for its statistics via devx.
9281  *
9282  * @param[in] dev
9283  *   Pointer to Ethernet device.
9284  * @param[in] flow
9285  *   Pointer to the sub flow.
9286  * @param[out] data
9287  *   data retrieved by the query.
9288  * @param[out] error
9289  *   Perform verbose error reporting if not NULL.
9290  *
9291  * @return
9292  *   0 on success, a negative errno value otherwise and rte_errno is set.
9293  */
9294 static int
9295 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
9296                     void *data, struct rte_flow_error *error)
9297 {
9298         struct mlx5_priv *priv = dev->data->dev_private;
9299         struct rte_flow_query_count *qc = data;
9300
9301         if (!priv->config.devx)
9302                 return rte_flow_error_set(error, ENOTSUP,
9303                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9304                                           NULL,
9305                                           "counters are not supported");
9306         if (flow->counter) {
9307                 uint64_t pkts, bytes;
9308                 struct mlx5_flow_counter *cnt;
9309
9310                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
9311                                                  NULL);
9312                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
9313                                                &bytes);
9314
9315                 if (err)
9316                         return rte_flow_error_set(error, -err,
9317                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9318                                         NULL, "cannot read counters");
9319                 qc->hits_set = 1;
9320                 qc->bytes_set = 1;
9321                 qc->hits = pkts - cnt->hits;
9322                 qc->bytes = bytes - cnt->bytes;
9323                 if (qc->reset) {
9324                         cnt->hits = pkts;
9325                         cnt->bytes = bytes;
9326                 }
9327                 return 0;
9328         }
9329         return rte_flow_error_set(error, EINVAL,
9330                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9331                                   NULL,
9332                                   "counters are not available");
9333 }
9334
9335 /**
9336  * Query a flow.
9337  *
9338  * @see rte_flow_query()
9339  * @see rte_flow_ops
9340  */
9341 static int
9342 flow_dv_query(struct rte_eth_dev *dev,
9343               struct rte_flow *flow __rte_unused,
9344               const struct rte_flow_action *actions __rte_unused,
9345               void *data __rte_unused,
9346               struct rte_flow_error *error __rte_unused)
9347 {
9348         int ret = -EINVAL;
9349
9350         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
9351                 switch (actions->type) {
9352                 case RTE_FLOW_ACTION_TYPE_VOID:
9353                         break;
9354                 case RTE_FLOW_ACTION_TYPE_COUNT:
9355                         ret = flow_dv_query_count(dev, flow, data, error);
9356                         break;
9357                 default:
9358                         return rte_flow_error_set(error, ENOTSUP,
9359                                                   RTE_FLOW_ERROR_TYPE_ACTION,
9360                                                   actions,
9361                                                   "action not supported");
9362                 }
9363         }
9364         return ret;
9365 }
9366
9367 /**
9368  * Destroy the meter table set.
9369  * Lock free, (mutex should be acquired by caller).
9370  *
9371  * @param[in] dev
9372  *   Pointer to Ethernet device.
9373  * @param[in] tbl
9374  *   Pointer to the meter table set.
9375  *
9376  * @return
9377  *   Always 0.
9378  */
9379 static int
9380 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
9381                         struct mlx5_meter_domains_infos *tbl)
9382 {
9383         struct mlx5_priv *priv = dev->data->dev_private;
9384         struct mlx5_meter_domains_infos *mtd =
9385                                 (struct mlx5_meter_domains_infos *)tbl;
9386
9387         if (!mtd || !priv->config.dv_flow_en)
9388                 return 0;
9389         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
9390                 claim_zero(mlx5_flow_os_destroy_flow
9391                            (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
9392         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
9393                 claim_zero(mlx5_flow_os_destroy_flow
9394                            (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
9395         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
9396                 claim_zero(mlx5_flow_os_destroy_flow
9397                            (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
9398         if (mtd->egress.color_matcher)
9399                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9400                            (mtd->egress.color_matcher));
9401         if (mtd->egress.any_matcher)
9402                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9403                            (mtd->egress.any_matcher));
9404         if (mtd->egress.tbl)
9405                 flow_dv_tbl_resource_release(dev, mtd->egress.tbl);
9406         if (mtd->egress.sfx_tbl)
9407                 flow_dv_tbl_resource_release(dev, mtd->egress.sfx_tbl);
9408         if (mtd->ingress.color_matcher)
9409                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9410                            (mtd->ingress.color_matcher));
9411         if (mtd->ingress.any_matcher)
9412                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9413                            (mtd->ingress.any_matcher));
9414         if (mtd->ingress.tbl)
9415                 flow_dv_tbl_resource_release(dev, mtd->ingress.tbl);
9416         if (mtd->ingress.sfx_tbl)
9417                 flow_dv_tbl_resource_release(dev, mtd->ingress.sfx_tbl);
9418         if (mtd->transfer.color_matcher)
9419                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9420                            (mtd->transfer.color_matcher));
9421         if (mtd->transfer.any_matcher)
9422                 claim_zero(mlx5_flow_os_destroy_flow_matcher
9423                            (mtd->transfer.any_matcher));
9424         if (mtd->transfer.tbl)
9425                 flow_dv_tbl_resource_release(dev, mtd->transfer.tbl);
9426         if (mtd->transfer.sfx_tbl)
9427                 flow_dv_tbl_resource_release(dev, mtd->transfer.sfx_tbl);
9428         if (mtd->drop_actn)
9429                 claim_zero(mlx5_flow_os_destroy_flow_action(mtd->drop_actn));
9430         mlx5_free(mtd);
9431         return 0;
9432 }
9433
9434 /* Number of meter flow actions, count and jump or count and drop. */
9435 #define METER_ACTIONS 2
9436
9437 /**
9438  * Create specify domain meter table and suffix table.
9439  *
9440  * @param[in] dev
9441  *   Pointer to Ethernet device.
9442  * @param[in,out] mtb
9443  *   Pointer to DV meter table set.
9444  * @param[in] egress
9445  *   Table attribute.
9446  * @param[in] transfer
9447  *   Table attribute.
9448  * @param[in] color_reg_c_idx
9449  *   Reg C index for color match.
9450  *
9451  * @return
9452  *   0 on success, -1 otherwise and rte_errno is set.
9453  */
9454 static int
9455 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
9456                            struct mlx5_meter_domains_infos *mtb,
9457                            uint8_t egress, uint8_t transfer,
9458                            uint32_t color_reg_c_idx)
9459 {
9460         struct mlx5_priv *priv = dev->data->dev_private;
9461         struct mlx5_dev_ctx_shared *sh = priv->sh;
9462         struct mlx5_flow_dv_match_params mask = {
9463                 .size = sizeof(mask.buf),
9464         };
9465         struct mlx5_flow_dv_match_params value = {
9466                 .size = sizeof(value.buf),
9467         };
9468         struct mlx5dv_flow_matcher_attr dv_attr = {
9469                 .type = IBV_FLOW_ATTR_NORMAL,
9470                 .priority = 0,
9471                 .match_criteria_enable = 0,
9472                 .match_mask = (void *)&mask,
9473         };
9474         void *actions[METER_ACTIONS];
9475         struct mlx5_meter_domain_info *dtb;
9476         struct rte_flow_error error;
9477         int i = 0;
9478         int ret;
9479
9480         if (transfer)
9481                 dtb = &mtb->transfer;
9482         else if (egress)
9483                 dtb = &mtb->egress;
9484         else
9485                 dtb = &mtb->ingress;
9486         /* Create the meter table with METER level. */
9487         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
9488                                             egress, transfer, &error);
9489         if (!dtb->tbl) {
9490                 DRV_LOG(ERR, "Failed to create meter policer table.");
9491                 return -1;
9492         }
9493         /* Create the meter suffix table with SUFFIX level. */
9494         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
9495                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
9496                                             egress, transfer, &error);
9497         if (!dtb->sfx_tbl) {
9498                 DRV_LOG(ERR, "Failed to create meter suffix table.");
9499                 return -1;
9500         }
9501         /* Create matchers, Any and Color. */
9502         dv_attr.priority = 3;
9503         dv_attr.match_criteria_enable = 0;
9504         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
9505                                                &dtb->any_matcher);
9506         if (ret) {
9507                 DRV_LOG(ERR, "Failed to create meter"
9508                              " policer default matcher.");
9509                 goto error_exit;
9510         }
9511         dv_attr.priority = 0;
9512         dv_attr.match_criteria_enable =
9513                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
9514         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
9515                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
9516         ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, dtb->tbl->obj,
9517                                                &dtb->color_matcher);
9518         if (ret) {
9519                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
9520                 goto error_exit;
9521         }
9522         if (mtb->count_actns[RTE_MTR_DROPPED])
9523                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
9524         actions[i++] = mtb->drop_actn;
9525         /* Default rule: lowest priority, match any, actions: drop. */
9526         ret = mlx5_flow_os_create_flow(dtb->any_matcher, (void *)&value, i,
9527                                        actions,
9528                                        &dtb->policer_rules[RTE_MTR_DROPPED]);
9529         if (ret) {
9530                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
9531                 goto error_exit;
9532         }
9533         return 0;
9534 error_exit:
9535         return -1;
9536 }
9537
9538 /**
9539  * Create the needed meter and suffix tables.
9540  * Lock free, (mutex should be acquired by caller).
9541  *
9542  * @param[in] dev
9543  *   Pointer to Ethernet device.
9544  * @param[in] fm
9545  *   Pointer to the flow meter.
9546  *
9547  * @return
9548  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
9549  */
9550 static struct mlx5_meter_domains_infos *
9551 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
9552                        const struct mlx5_flow_meter *fm)
9553 {
9554         struct mlx5_priv *priv = dev->data->dev_private;
9555         struct mlx5_meter_domains_infos *mtb;
9556         int ret;
9557         int i;
9558
9559         if (!priv->mtr_en) {
9560                 rte_errno = ENOTSUP;
9561                 return NULL;
9562         }
9563         mtb = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mtb), 0, SOCKET_ID_ANY);
9564         if (!mtb) {
9565                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
9566                 return NULL;
9567         }
9568         /* Create meter count actions */
9569         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
9570                 struct mlx5_flow_counter *cnt;
9571                 if (!fm->policer_stats.cnt[i])
9572                         continue;
9573                 cnt = flow_dv_counter_get_by_idx(dev,
9574                       fm->policer_stats.cnt[i], NULL);
9575                 mtb->count_actns[i] = cnt->action;
9576         }
9577         /* Create drop action. */
9578         ret = mlx5_flow_os_create_flow_action_drop(&mtb->drop_actn);
9579         if (ret) {
9580                 DRV_LOG(ERR, "Failed to create drop action.");
9581                 goto error_exit;
9582         }
9583         /* Egress meter table. */
9584         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
9585         if (ret) {
9586                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
9587                 goto error_exit;
9588         }
9589         /* Ingress meter table. */
9590         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
9591         if (ret) {
9592                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
9593                 goto error_exit;
9594         }
9595         /* FDB meter table. */
9596         if (priv->config.dv_esw_en) {
9597                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
9598                                                  priv->mtr_color_reg);
9599                 if (ret) {
9600                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
9601                         goto error_exit;
9602                 }
9603         }
9604         return mtb;
9605 error_exit:
9606         flow_dv_destroy_mtr_tbl(dev, mtb);
9607         return NULL;
9608 }
9609
9610 /**
9611  * Destroy domain policer rule.
9612  *
9613  * @param[in] dt
9614  *   Pointer to domain table.
9615  */
9616 static void
9617 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
9618 {
9619         int i;
9620
9621         for (i = 0; i < RTE_MTR_DROPPED; i++) {
9622                 if (dt->policer_rules[i]) {
9623                         claim_zero(mlx5_flow_os_destroy_flow
9624                                    (dt->policer_rules[i]));
9625                         dt->policer_rules[i] = NULL;
9626                 }
9627         }
9628         if (dt->jump_actn) {
9629                 claim_zero(mlx5_flow_os_destroy_flow_action(dt->jump_actn));
9630                 dt->jump_actn = NULL;
9631         }
9632 }
9633
9634 /**
9635  * Destroy policer rules.
9636  *
9637  * @param[in] dev
9638  *   Pointer to Ethernet device.
9639  * @param[in] fm
9640  *   Pointer to flow meter structure.
9641  * @param[in] attr
9642  *   Pointer to flow attributes.
9643  *
9644  * @return
9645  *   Always 0.
9646  */
9647 static int
9648 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
9649                               const struct mlx5_flow_meter *fm,
9650                               const struct rte_flow_attr *attr)
9651 {
9652         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
9653
9654         if (!mtb)
9655                 return 0;
9656         if (attr->egress)
9657                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
9658         if (attr->ingress)
9659                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
9660         if (attr->transfer)
9661                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
9662         return 0;
9663 }
9664
9665 /**
9666  * Create specify domain meter policer rule.
9667  *
9668  * @param[in] fm
9669  *   Pointer to flow meter structure.
9670  * @param[in] mtb
9671  *   Pointer to DV meter table set.
9672  * @param[in] mtr_reg_c
9673  *   Color match REG_C.
9674  *
9675  * @return
9676  *   0 on success, -1 otherwise.
9677  */
9678 static int
9679 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
9680                                     struct mlx5_meter_domain_info *dtb,
9681                                     uint8_t mtr_reg_c)
9682 {
9683         struct mlx5_flow_dv_match_params matcher = {
9684                 .size = sizeof(matcher.buf),
9685         };
9686         struct mlx5_flow_dv_match_params value = {
9687                 .size = sizeof(value.buf),
9688         };
9689         struct mlx5_meter_domains_infos *mtb = fm->mfts;
9690         void *actions[METER_ACTIONS];
9691         int i;
9692         int ret = 0;
9693
9694         /* Create jump action. */
9695         if (!dtb->jump_actn)
9696                 ret = mlx5_flow_os_create_flow_action_dest_flow_tbl
9697                                 (dtb->sfx_tbl->obj, &dtb->jump_actn);
9698         if (ret) {
9699                 DRV_LOG(ERR, "Failed to create policer jump action.");
9700                 goto error;
9701         }
9702         for (i = 0; i < RTE_MTR_DROPPED; i++) {
9703                 int j = 0;
9704
9705                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
9706                                        rte_col_2_mlx5_col(i), UINT8_MAX);
9707                 if (mtb->count_actns[i])
9708                         actions[j++] = mtb->count_actns[i];
9709                 if (fm->action[i] == MTR_POLICER_ACTION_DROP)
9710                         actions[j++] = mtb->drop_actn;
9711                 else
9712                         actions[j++] = dtb->jump_actn;
9713                 ret = mlx5_flow_os_create_flow(dtb->color_matcher,
9714                                                (void *)&value, j, actions,
9715                                                &dtb->policer_rules[i]);
9716                 if (ret) {
9717                         DRV_LOG(ERR, "Failed to create policer rule.");
9718                         goto error;
9719                 }
9720         }
9721         return 0;
9722 error:
9723         rte_errno = errno;
9724         return -1;
9725 }
9726
9727 /**
9728  * Create policer rules.
9729  *
9730  * @param[in] dev
9731  *   Pointer to Ethernet device.
9732  * @param[in] fm
9733  *   Pointer to flow meter structure.
9734  * @param[in] attr
9735  *   Pointer to flow attributes.
9736  *
9737  * @return
9738  *   0 on success, -1 otherwise.
9739  */
9740 static int
9741 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
9742                              struct mlx5_flow_meter *fm,
9743                              const struct rte_flow_attr *attr)
9744 {
9745         struct mlx5_priv *priv = dev->data->dev_private;
9746         struct mlx5_meter_domains_infos *mtb = fm->mfts;
9747         int ret;
9748
9749         if (attr->egress) {
9750                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
9751                                                 priv->mtr_color_reg);
9752                 if (ret) {
9753                         DRV_LOG(ERR, "Failed to create egress policer.");
9754                         goto error;
9755                 }
9756         }
9757         if (attr->ingress) {
9758                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
9759                                                 priv->mtr_color_reg);
9760                 if (ret) {
9761                         DRV_LOG(ERR, "Failed to create ingress policer.");
9762                         goto error;
9763                 }
9764         }
9765         if (attr->transfer) {
9766                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
9767                                                 priv->mtr_color_reg);
9768                 if (ret) {
9769                         DRV_LOG(ERR, "Failed to create transfer policer.");
9770                         goto error;
9771                 }
9772         }
9773         return 0;
9774 error:
9775         flow_dv_destroy_policer_rules(dev, fm, attr);
9776         return -1;
9777 }
9778
9779 /**
9780  * Query a devx counter.
9781  *
9782  * @param[in] dev
9783  *   Pointer to the Ethernet device structure.
9784  * @param[in] cnt
9785  *   Index to the flow counter.
9786  * @param[in] clear
9787  *   Set to clear the counter statistics.
9788  * @param[out] pkts
9789  *   The statistics value of packets.
9790  * @param[out] bytes
9791  *   The statistics value of bytes.
9792  *
9793  * @return
9794  *   0 on success, otherwise return -1.
9795  */
9796 static int
9797 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
9798                       uint64_t *pkts, uint64_t *bytes)
9799 {
9800         struct mlx5_priv *priv = dev->data->dev_private;
9801         struct mlx5_flow_counter *cnt;
9802         uint64_t inn_pkts, inn_bytes;
9803         int ret;
9804
9805         if (!priv->config.devx)
9806                 return -1;
9807
9808         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
9809         if (ret)
9810                 return -1;
9811         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
9812         *pkts = inn_pkts - cnt->hits;
9813         *bytes = inn_bytes - cnt->bytes;
9814         if (clear) {
9815                 cnt->hits = inn_pkts;
9816                 cnt->bytes = inn_bytes;
9817         }
9818         return 0;
9819 }
9820
9821 /**
9822  * Get aged-out flows.
9823  *
9824  * @param[in] dev
9825  *   Pointer to the Ethernet device structure.
9826  * @param[in] context
9827  *   The address of an array of pointers to the aged-out flows contexts.
9828  * @param[in] nb_contexts
9829  *   The length of context array pointers.
9830  * @param[out] error
9831  *   Perform verbose error reporting if not NULL. Initialized in case of
9832  *   error only.
9833  *
9834  * @return
9835  *   how many contexts get in success, otherwise negative errno value.
9836  *   if nb_contexts is 0, return the amount of all aged contexts.
9837  *   if nb_contexts is not 0 , return the amount of aged flows reported
9838  *   in the context array.
9839  * @note: only stub for now
9840  */
9841 static int
9842 flow_get_aged_flows(struct rte_eth_dev *dev,
9843                     void **context,
9844                     uint32_t nb_contexts,
9845                     struct rte_flow_error *error)
9846 {
9847         struct mlx5_priv *priv = dev->data->dev_private;
9848         struct mlx5_age_info *age_info;
9849         struct mlx5_age_param *age_param;
9850         struct mlx5_flow_counter *counter;
9851         int nb_flows = 0;
9852
9853         if (nb_contexts && !context)
9854                 return rte_flow_error_set(error, EINVAL,
9855                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
9856                                           NULL,
9857                                           "Should assign at least one flow or"
9858                                           " context to get if nb_contexts != 0");
9859         age_info = GET_PORT_AGE_INFO(priv);
9860         rte_spinlock_lock(&age_info->aged_sl);
9861         TAILQ_FOREACH(counter, &age_info->aged_counters, next) {
9862                 nb_flows++;
9863                 if (nb_contexts) {
9864                         age_param = MLX5_CNT_TO_AGE(counter);
9865                         context[nb_flows - 1] = age_param->context;
9866                         if (!(--nb_contexts))
9867                                 break;
9868                 }
9869         }
9870         rte_spinlock_unlock(&age_info->aged_sl);
9871         MLX5_AGE_SET(age_info, MLX5_AGE_TRIGGER);
9872         return nb_flows;
9873 }
9874
9875 /*
9876  * Mutex-protected thunk to lock-free  __flow_dv_translate().
9877  */
9878 static int
9879 flow_dv_translate(struct rte_eth_dev *dev,
9880                   struct mlx5_flow *dev_flow,
9881                   const struct rte_flow_attr *attr,
9882                   const struct rte_flow_item items[],
9883                   const struct rte_flow_action actions[],
9884                   struct rte_flow_error *error)
9885 {
9886         int ret;
9887
9888         flow_dv_shared_lock(dev);
9889         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
9890         flow_dv_shared_unlock(dev);
9891         return ret;
9892 }
9893
9894 /*
9895  * Mutex-protected thunk to lock-free  __flow_dv_apply().
9896  */
9897 static int
9898 flow_dv_apply(struct rte_eth_dev *dev,
9899               struct rte_flow *flow,
9900               struct rte_flow_error *error)
9901 {
9902         int ret;
9903
9904         flow_dv_shared_lock(dev);
9905         ret = __flow_dv_apply(dev, flow, error);
9906         flow_dv_shared_unlock(dev);
9907         return ret;
9908 }
9909
9910 /*
9911  * Mutex-protected thunk to lock-free __flow_dv_remove().
9912  */
9913 static void
9914 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
9915 {
9916         flow_dv_shared_lock(dev);
9917         __flow_dv_remove(dev, flow);
9918         flow_dv_shared_unlock(dev);
9919 }
9920
9921 /*
9922  * Mutex-protected thunk to lock-free __flow_dv_destroy().
9923  */
9924 static void
9925 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
9926 {
9927         flow_dv_shared_lock(dev);
9928         __flow_dv_destroy(dev, flow);
9929         flow_dv_shared_unlock(dev);
9930 }
9931
9932 /*
9933  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
9934  */
9935 static uint32_t
9936 flow_dv_counter_allocate(struct rte_eth_dev *dev)
9937 {
9938         uint32_t cnt;
9939
9940         flow_dv_shared_lock(dev);
9941         cnt = flow_dv_counter_alloc(dev, 0, 0, 1, 0);
9942         flow_dv_shared_unlock(dev);
9943         return cnt;
9944 }
9945
9946 /*
9947  * Mutex-protected thunk to lock-free flow_dv_counter_release().
9948  */
9949 static void
9950 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
9951 {
9952         flow_dv_shared_lock(dev);
9953         flow_dv_counter_release(dev, cnt);
9954         flow_dv_shared_unlock(dev);
9955 }
9956
9957 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
9958         .validate = flow_dv_validate,
9959         .prepare = flow_dv_prepare,
9960         .translate = flow_dv_translate,
9961         .apply = flow_dv_apply,
9962         .remove = flow_dv_remove,
9963         .destroy = flow_dv_destroy,
9964         .query = flow_dv_query,
9965         .create_mtr_tbls = flow_dv_create_mtr_tbl,
9966         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
9967         .create_policer_rules = flow_dv_create_policer_rules,
9968         .destroy_policer_rules = flow_dv_destroy_policer_rules,
9969         .counter_alloc = flow_dv_counter_allocate,
9970         .counter_free = flow_dv_counter_free,
9971         .counter_query = flow_dv_counter_query,
9972         .get_aged_flows = flow_get_aged_flows,
9973 };
9974
9975 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */