net/mlx5: fix push VLAN action to use item info
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29 #include <rte_vxlan.h>
30 #include <rte_gtp.h>
31
32 #include <mlx5_glue.h>
33 #include <mlx5_devx_cmds.h>
34 #include <mlx5_prm.h>
35
36 #include "mlx5_defs.h"
37 #include "mlx5.h"
38 #include "mlx5_flow.h"
39 #include "mlx5_rxtx.h"
40
41 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
42
43 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
44 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
45 #endif
46
47 #ifndef HAVE_MLX5DV_DR_ESWITCH
48 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
49 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
50 #endif
51 #endif
52
53 #ifndef HAVE_MLX5DV_DR
54 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
55 #endif
56
57 /* VLAN header definitions */
58 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
59 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
60 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
61 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
62 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
63
64 union flow_dv_attr {
65         struct {
66                 uint32_t valid:1;
67                 uint32_t ipv4:1;
68                 uint32_t ipv6:1;
69                 uint32_t tcp:1;
70                 uint32_t udp:1;
71                 uint32_t reserved:27;
72         };
73         uint32_t attr;
74 };
75
76 static int
77 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
78                              struct mlx5_flow_tbl_resource *tbl);
79
80 /**
81  * Initialize flow attributes structure according to flow items' types.
82  *
83  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
84  * mode. For tunnel mode, the items to be modified are the outermost ones.
85  *
86  * @param[in] item
87  *   Pointer to item specification.
88  * @param[out] attr
89  *   Pointer to flow attributes structure.
90  * @param[in] dev_flow
91  *   Pointer to the sub flow.
92  * @param[in] tunnel_decap
93  *   Whether action is after tunnel decapsulation.
94  */
95 static void
96 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
97                   struct mlx5_flow *dev_flow, bool tunnel_decap)
98 {
99         uint64_t layers = dev_flow->handle->layers;
100
101         /*
102          * If layers is already initialized, it means this dev_flow is the
103          * suffix flow, the layers flags is set by the prefix flow. Need to
104          * use the layer flags from prefix flow as the suffix flow may not
105          * have the user defined items as the flow is split.
106          */
107         if (layers) {
108                 if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
109                         attr->ipv4 = 1;
110                 else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
111                         attr->ipv6 = 1;
112                 if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
113                         attr->tcp = 1;
114                 else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
115                         attr->udp = 1;
116                 attr->valid = 1;
117                 return;
118         }
119         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
120                 uint8_t next_protocol = 0xff;
121                 switch (item->type) {
122                 case RTE_FLOW_ITEM_TYPE_GRE:
123                 case RTE_FLOW_ITEM_TYPE_NVGRE:
124                 case RTE_FLOW_ITEM_TYPE_VXLAN:
125                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
126                 case RTE_FLOW_ITEM_TYPE_GENEVE:
127                 case RTE_FLOW_ITEM_TYPE_MPLS:
128                         if (tunnel_decap)
129                                 attr->attr = 0;
130                         break;
131                 case RTE_FLOW_ITEM_TYPE_IPV4:
132                         if (!attr->ipv6)
133                                 attr->ipv4 = 1;
134                         if (item->mask != NULL &&
135                             ((const struct rte_flow_item_ipv4 *)
136                             item->mask)->hdr.next_proto_id)
137                                 next_protocol =
138                                     ((const struct rte_flow_item_ipv4 *)
139                                       (item->spec))->hdr.next_proto_id &
140                                     ((const struct rte_flow_item_ipv4 *)
141                                       (item->mask))->hdr.next_proto_id;
142                         if ((next_protocol == IPPROTO_IPIP ||
143                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
144                                 attr->attr = 0;
145                         break;
146                 case RTE_FLOW_ITEM_TYPE_IPV6:
147                         if (!attr->ipv4)
148                                 attr->ipv6 = 1;
149                         if (item->mask != NULL &&
150                             ((const struct rte_flow_item_ipv6 *)
151                             item->mask)->hdr.proto)
152                                 next_protocol =
153                                     ((const struct rte_flow_item_ipv6 *)
154                                       (item->spec))->hdr.proto &
155                                     ((const struct rte_flow_item_ipv6 *)
156                                       (item->mask))->hdr.proto;
157                         if ((next_protocol == IPPROTO_IPIP ||
158                             next_protocol == IPPROTO_IPV6) && tunnel_decap)
159                                 attr->attr = 0;
160                         break;
161                 case RTE_FLOW_ITEM_TYPE_UDP:
162                         if (!attr->tcp)
163                                 attr->udp = 1;
164                         break;
165                 case RTE_FLOW_ITEM_TYPE_TCP:
166                         if (!attr->udp)
167                                 attr->tcp = 1;
168                         break;
169                 default:
170                         break;
171                 }
172         }
173         attr->valid = 1;
174 }
175
176 /**
177  * Convert rte_mtr_color to mlx5 color.
178  *
179  * @param[in] rcol
180  *   rte_mtr_color.
181  *
182  * @return
183  *   mlx5 color.
184  */
185 static int
186 rte_col_2_mlx5_col(enum rte_color rcol)
187 {
188         switch (rcol) {
189         case RTE_COLOR_GREEN:
190                 return MLX5_FLOW_COLOR_GREEN;
191         case RTE_COLOR_YELLOW:
192                 return MLX5_FLOW_COLOR_YELLOW;
193         case RTE_COLOR_RED:
194                 return MLX5_FLOW_COLOR_RED;
195         default:
196                 break;
197         }
198         return MLX5_FLOW_COLOR_UNDEFINED;
199 }
200
201 struct field_modify_info {
202         uint32_t size; /* Size of field in protocol header, in bytes. */
203         uint32_t offset; /* Offset of field in protocol header, in bytes. */
204         enum mlx5_modification_field id;
205 };
206
207 struct field_modify_info modify_eth[] = {
208         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
209         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
210         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
211         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
212         {0, 0, 0},
213 };
214
215 struct field_modify_info modify_vlan_out_first_vid[] = {
216         /* Size in bits !!! */
217         {12, 0, MLX5_MODI_OUT_FIRST_VID},
218         {0, 0, 0},
219 };
220
221 struct field_modify_info modify_ipv4[] = {
222         {1,  1, MLX5_MODI_OUT_IP_DSCP},
223         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
224         {4, 12, MLX5_MODI_OUT_SIPV4},
225         {4, 16, MLX5_MODI_OUT_DIPV4},
226         {0, 0, 0},
227 };
228
229 struct field_modify_info modify_ipv6[] = {
230         {1,  0, MLX5_MODI_OUT_IP_DSCP},
231         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
232         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
233         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
234         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
235         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
236         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
237         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
238         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
239         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
240         {0, 0, 0},
241 };
242
243 struct field_modify_info modify_udp[] = {
244         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
245         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
246         {0, 0, 0},
247 };
248
249 struct field_modify_info modify_tcp[] = {
250         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
251         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
252         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
253         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
254         {0, 0, 0},
255 };
256
257 static void
258 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
259                           uint8_t next_protocol, uint64_t *item_flags,
260                           int *tunnel)
261 {
262         MLX5_ASSERT(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
263                     item->type == RTE_FLOW_ITEM_TYPE_IPV6);
264         if (next_protocol == IPPROTO_IPIP) {
265                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
266                 *tunnel = 1;
267         }
268         if (next_protocol == IPPROTO_IPV6) {
269                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
270                 *tunnel = 1;
271         }
272 }
273
274 /**
275  * Acquire the synchronizing object to protect multithreaded access
276  * to shared dv context. Lock occurs only if context is actually
277  * shared, i.e. we have multiport IB device and representors are
278  * created.
279  *
280  * @param[in] dev
281  *   Pointer to the rte_eth_dev structure.
282  */
283 static void
284 flow_dv_shared_lock(struct rte_eth_dev *dev)
285 {
286         struct mlx5_priv *priv = dev->data->dev_private;
287         struct mlx5_ibv_shared *sh = priv->sh;
288
289         if (sh->dv_refcnt > 1) {
290                 int ret;
291
292                 ret = pthread_mutex_lock(&sh->dv_mutex);
293                 MLX5_ASSERT(!ret);
294                 (void)ret;
295         }
296 }
297
298 static void
299 flow_dv_shared_unlock(struct rte_eth_dev *dev)
300 {
301         struct mlx5_priv *priv = dev->data->dev_private;
302         struct mlx5_ibv_shared *sh = priv->sh;
303
304         if (sh->dv_refcnt > 1) {
305                 int ret;
306
307                 ret = pthread_mutex_unlock(&sh->dv_mutex);
308                 MLX5_ASSERT(!ret);
309                 (void)ret;
310         }
311 }
312
313 /* Update VLAN's VID/PCP based on input rte_flow_action.
314  *
315  * @param[in] action
316  *   Pointer to struct rte_flow_action.
317  * @param[out] vlan
318  *   Pointer to struct rte_vlan_hdr.
319  */
320 static void
321 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
322                          struct rte_vlan_hdr *vlan)
323 {
324         uint16_t vlan_tci;
325         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
326                 vlan_tci =
327                     ((const struct rte_flow_action_of_set_vlan_pcp *)
328                                                action->conf)->vlan_pcp;
329                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
330                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
331                 vlan->vlan_tci |= vlan_tci;
332         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
333                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
334                 vlan->vlan_tci |= rte_be_to_cpu_16
335                     (((const struct rte_flow_action_of_set_vlan_vid *)
336                                              action->conf)->vlan_vid);
337         }
338 }
339
340 /**
341  * Fetch 1, 2, 3 or 4 byte field from the byte array
342  * and return as unsigned integer in host-endian format.
343  *
344  * @param[in] data
345  *   Pointer to data array.
346  * @param[in] size
347  *   Size of field to extract.
348  *
349  * @return
350  *   converted field in host endian format.
351  */
352 static inline uint32_t
353 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
354 {
355         uint32_t ret;
356
357         switch (size) {
358         case 1:
359                 ret = *data;
360                 break;
361         case 2:
362                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
363                 break;
364         case 3:
365                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
366                 ret = (ret << 8) | *(data + sizeof(uint16_t));
367                 break;
368         case 4:
369                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
370                 break;
371         default:
372                 MLX5_ASSERT(false);
373                 ret = 0;
374                 break;
375         }
376         return ret;
377 }
378
379 /**
380  * Convert modify-header action to DV specification.
381  *
382  * Data length of each action is determined by provided field description
383  * and the item mask. Data bit offset and width of each action is determined
384  * by provided item mask.
385  *
386  * @param[in] item
387  *   Pointer to item specification.
388  * @param[in] field
389  *   Pointer to field modification information.
390  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
391  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
392  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
393  * @param[in] dcopy
394  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
395  *   Negative offset value sets the same offset as source offset.
396  *   size field is ignored, value is taken from source field.
397  * @param[in,out] resource
398  *   Pointer to the modify-header resource.
399  * @param[in] type
400  *   Type of modification.
401  * @param[out] error
402  *   Pointer to the error structure.
403  *
404  * @return
405  *   0 on success, a negative errno value otherwise and rte_errno is set.
406  */
407 static int
408 flow_dv_convert_modify_action(struct rte_flow_item *item,
409                               struct field_modify_info *field,
410                               struct field_modify_info *dcopy,
411                               struct mlx5_flow_dv_modify_hdr_resource *resource,
412                               uint32_t type, struct rte_flow_error *error)
413 {
414         uint32_t i = resource->actions_num;
415         struct mlx5_modification_cmd *actions = resource->actions;
416
417         /*
418          * The item and mask are provided in big-endian format.
419          * The fields should be presented as in big-endian format either.
420          * Mask must be always present, it defines the actual field width.
421          */
422         MLX5_ASSERT(item->mask);
423         MLX5_ASSERT(field->size);
424         do {
425                 unsigned int size_b;
426                 unsigned int off_b;
427                 uint32_t mask;
428                 uint32_t data;
429
430                 if (i >= MLX5_MAX_MODIFY_NUM)
431                         return rte_flow_error_set(error, EINVAL,
432                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
433                                  "too many items to modify");
434                 /* Fetch variable byte size mask from the array. */
435                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
436                                            field->offset, field->size);
437                 if (!mask) {
438                         ++field;
439                         continue;
440                 }
441                 /* Deduce actual data width in bits from mask value. */
442                 off_b = rte_bsf32(mask);
443                 size_b = sizeof(uint32_t) * CHAR_BIT -
444                          off_b - __builtin_clz(mask);
445                 MLX5_ASSERT(size_b);
446                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
447                 actions[i] = (struct mlx5_modification_cmd) {
448                         .action_type = type,
449                         .field = field->id,
450                         .offset = off_b,
451                         .length = size_b,
452                 };
453                 /* Convert entire record to expected big-endian format. */
454                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
455                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
456                         MLX5_ASSERT(dcopy);
457                         actions[i].dst_field = dcopy->id;
458                         actions[i].dst_offset =
459                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
460                         /* Convert entire record to big-endian format. */
461                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
462                 } else {
463                         MLX5_ASSERT(item->spec);
464                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
465                                                    field->offset, field->size);
466                         /* Shift out the trailing masked bits from data. */
467                         data = (data & mask) >> off_b;
468                         actions[i].data1 = rte_cpu_to_be_32(data);
469                 }
470                 ++i;
471                 ++field;
472         } while (field->size);
473         if (resource->actions_num == i)
474                 return rte_flow_error_set(error, EINVAL,
475                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
476                                           "invalid modification flow item");
477         resource->actions_num = i;
478         return 0;
479 }
480
481 /**
482  * Convert modify-header set IPv4 address action to DV specification.
483  *
484  * @param[in,out] resource
485  *   Pointer to the modify-header resource.
486  * @param[in] action
487  *   Pointer to action specification.
488  * @param[out] error
489  *   Pointer to the error structure.
490  *
491  * @return
492  *   0 on success, a negative errno value otherwise and rte_errno is set.
493  */
494 static int
495 flow_dv_convert_action_modify_ipv4
496                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
497                          const struct rte_flow_action *action,
498                          struct rte_flow_error *error)
499 {
500         const struct rte_flow_action_set_ipv4 *conf =
501                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
502         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
503         struct rte_flow_item_ipv4 ipv4;
504         struct rte_flow_item_ipv4 ipv4_mask;
505
506         memset(&ipv4, 0, sizeof(ipv4));
507         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
508         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
509                 ipv4.hdr.src_addr = conf->ipv4_addr;
510                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
511         } else {
512                 ipv4.hdr.dst_addr = conf->ipv4_addr;
513                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
514         }
515         item.spec = &ipv4;
516         item.mask = &ipv4_mask;
517         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
518                                              MLX5_MODIFICATION_TYPE_SET, error);
519 }
520
521 /**
522  * Convert modify-header set IPv6 address action to DV specification.
523  *
524  * @param[in,out] resource
525  *   Pointer to the modify-header resource.
526  * @param[in] action
527  *   Pointer to action specification.
528  * @param[out] error
529  *   Pointer to the error structure.
530  *
531  * @return
532  *   0 on success, a negative errno value otherwise and rte_errno is set.
533  */
534 static int
535 flow_dv_convert_action_modify_ipv6
536                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
537                          const struct rte_flow_action *action,
538                          struct rte_flow_error *error)
539 {
540         const struct rte_flow_action_set_ipv6 *conf =
541                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
542         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
543         struct rte_flow_item_ipv6 ipv6;
544         struct rte_flow_item_ipv6 ipv6_mask;
545
546         memset(&ipv6, 0, sizeof(ipv6));
547         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
548         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
549                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
550                        sizeof(ipv6.hdr.src_addr));
551                 memcpy(&ipv6_mask.hdr.src_addr,
552                        &rte_flow_item_ipv6_mask.hdr.src_addr,
553                        sizeof(ipv6.hdr.src_addr));
554         } else {
555                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
556                        sizeof(ipv6.hdr.dst_addr));
557                 memcpy(&ipv6_mask.hdr.dst_addr,
558                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
559                        sizeof(ipv6.hdr.dst_addr));
560         }
561         item.spec = &ipv6;
562         item.mask = &ipv6_mask;
563         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
564                                              MLX5_MODIFICATION_TYPE_SET, error);
565 }
566
567 /**
568  * Convert modify-header set MAC address action to DV specification.
569  *
570  * @param[in,out] resource
571  *   Pointer to the modify-header resource.
572  * @param[in] action
573  *   Pointer to action specification.
574  * @param[out] error
575  *   Pointer to the error structure.
576  *
577  * @return
578  *   0 on success, a negative errno value otherwise and rte_errno is set.
579  */
580 static int
581 flow_dv_convert_action_modify_mac
582                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
583                          const struct rte_flow_action *action,
584                          struct rte_flow_error *error)
585 {
586         const struct rte_flow_action_set_mac *conf =
587                 (const struct rte_flow_action_set_mac *)(action->conf);
588         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
589         struct rte_flow_item_eth eth;
590         struct rte_flow_item_eth eth_mask;
591
592         memset(&eth, 0, sizeof(eth));
593         memset(&eth_mask, 0, sizeof(eth_mask));
594         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
595                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
596                        sizeof(eth.src.addr_bytes));
597                 memcpy(&eth_mask.src.addr_bytes,
598                        &rte_flow_item_eth_mask.src.addr_bytes,
599                        sizeof(eth_mask.src.addr_bytes));
600         } else {
601                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
602                        sizeof(eth.dst.addr_bytes));
603                 memcpy(&eth_mask.dst.addr_bytes,
604                        &rte_flow_item_eth_mask.dst.addr_bytes,
605                        sizeof(eth_mask.dst.addr_bytes));
606         }
607         item.spec = &eth;
608         item.mask = &eth_mask;
609         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
610                                              MLX5_MODIFICATION_TYPE_SET, error);
611 }
612
613 /**
614  * Convert modify-header set VLAN VID action to DV specification.
615  *
616  * @param[in,out] resource
617  *   Pointer to the modify-header resource.
618  * @param[in] action
619  *   Pointer to action specification.
620  * @param[out] error
621  *   Pointer to the error structure.
622  *
623  * @return
624  *   0 on success, a negative errno value otherwise and rte_errno is set.
625  */
626 static int
627 flow_dv_convert_action_modify_vlan_vid
628                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
629                          const struct rte_flow_action *action,
630                          struct rte_flow_error *error)
631 {
632         const struct rte_flow_action_of_set_vlan_vid *conf =
633                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
634         int i = resource->actions_num;
635         struct mlx5_modification_cmd *actions = resource->actions;
636         struct field_modify_info *field = modify_vlan_out_first_vid;
637
638         if (i >= MLX5_MAX_MODIFY_NUM)
639                 return rte_flow_error_set(error, EINVAL,
640                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
641                          "too many items to modify");
642         actions[i] = (struct mlx5_modification_cmd) {
643                 .action_type = MLX5_MODIFICATION_TYPE_SET,
644                 .field = field->id,
645                 .length = field->size,
646                 .offset = field->offset,
647         };
648         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
649         actions[i].data1 = conf->vlan_vid;
650         actions[i].data1 = actions[i].data1 << 16;
651         resource->actions_num = ++i;
652         return 0;
653 }
654
655 /**
656  * Convert modify-header set TP action to DV specification.
657  *
658  * @param[in,out] resource
659  *   Pointer to the modify-header resource.
660  * @param[in] action
661  *   Pointer to action specification.
662  * @param[in] items
663  *   Pointer to rte_flow_item objects list.
664  * @param[in] attr
665  *   Pointer to flow attributes structure.
666  * @param[in] dev_flow
667  *   Pointer to the sub flow.
668  * @param[in] tunnel_decap
669  *   Whether action is after tunnel decapsulation.
670  * @param[out] error
671  *   Pointer to the error structure.
672  *
673  * @return
674  *   0 on success, a negative errno value otherwise and rte_errno is set.
675  */
676 static int
677 flow_dv_convert_action_modify_tp
678                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
679                          const struct rte_flow_action *action,
680                          const struct rte_flow_item *items,
681                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
682                          bool tunnel_decap, struct rte_flow_error *error)
683 {
684         const struct rte_flow_action_set_tp *conf =
685                 (const struct rte_flow_action_set_tp *)(action->conf);
686         struct rte_flow_item item;
687         struct rte_flow_item_udp udp;
688         struct rte_flow_item_udp udp_mask;
689         struct rte_flow_item_tcp tcp;
690         struct rte_flow_item_tcp tcp_mask;
691         struct field_modify_info *field;
692
693         if (!attr->valid)
694                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
695         if (attr->udp) {
696                 memset(&udp, 0, sizeof(udp));
697                 memset(&udp_mask, 0, sizeof(udp_mask));
698                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
699                         udp.hdr.src_port = conf->port;
700                         udp_mask.hdr.src_port =
701                                         rte_flow_item_udp_mask.hdr.src_port;
702                 } else {
703                         udp.hdr.dst_port = conf->port;
704                         udp_mask.hdr.dst_port =
705                                         rte_flow_item_udp_mask.hdr.dst_port;
706                 }
707                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
708                 item.spec = &udp;
709                 item.mask = &udp_mask;
710                 field = modify_udp;
711         } else {
712                 MLX5_ASSERT(attr->tcp);
713                 memset(&tcp, 0, sizeof(tcp));
714                 memset(&tcp_mask, 0, sizeof(tcp_mask));
715                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
716                         tcp.hdr.src_port = conf->port;
717                         tcp_mask.hdr.src_port =
718                                         rte_flow_item_tcp_mask.hdr.src_port;
719                 } else {
720                         tcp.hdr.dst_port = conf->port;
721                         tcp_mask.hdr.dst_port =
722                                         rte_flow_item_tcp_mask.hdr.dst_port;
723                 }
724                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
725                 item.spec = &tcp;
726                 item.mask = &tcp_mask;
727                 field = modify_tcp;
728         }
729         return flow_dv_convert_modify_action(&item, field, NULL, resource,
730                                              MLX5_MODIFICATION_TYPE_SET, error);
731 }
732
733 /**
734  * Convert modify-header set TTL action to DV specification.
735  *
736  * @param[in,out] resource
737  *   Pointer to the modify-header resource.
738  * @param[in] action
739  *   Pointer to action specification.
740  * @param[in] items
741  *   Pointer to rte_flow_item objects list.
742  * @param[in] attr
743  *   Pointer to flow attributes structure.
744  * @param[in] dev_flow
745  *   Pointer to the sub flow.
746  * @param[in] tunnel_decap
747  *   Whether action is after tunnel decapsulation.
748  * @param[out] error
749  *   Pointer to the error structure.
750  *
751  * @return
752  *   0 on success, a negative errno value otherwise and rte_errno is set.
753  */
754 static int
755 flow_dv_convert_action_modify_ttl
756                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
757                          const struct rte_flow_action *action,
758                          const struct rte_flow_item *items,
759                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
760                          bool tunnel_decap, struct rte_flow_error *error)
761 {
762         const struct rte_flow_action_set_ttl *conf =
763                 (const struct rte_flow_action_set_ttl *)(action->conf);
764         struct rte_flow_item item;
765         struct rte_flow_item_ipv4 ipv4;
766         struct rte_flow_item_ipv4 ipv4_mask;
767         struct rte_flow_item_ipv6 ipv6;
768         struct rte_flow_item_ipv6 ipv6_mask;
769         struct field_modify_info *field;
770
771         if (!attr->valid)
772                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
773         if (attr->ipv4) {
774                 memset(&ipv4, 0, sizeof(ipv4));
775                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
776                 ipv4.hdr.time_to_live = conf->ttl_value;
777                 ipv4_mask.hdr.time_to_live = 0xFF;
778                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
779                 item.spec = &ipv4;
780                 item.mask = &ipv4_mask;
781                 field = modify_ipv4;
782         } else {
783                 MLX5_ASSERT(attr->ipv6);
784                 memset(&ipv6, 0, sizeof(ipv6));
785                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
786                 ipv6.hdr.hop_limits = conf->ttl_value;
787                 ipv6_mask.hdr.hop_limits = 0xFF;
788                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
789                 item.spec = &ipv6;
790                 item.mask = &ipv6_mask;
791                 field = modify_ipv6;
792         }
793         return flow_dv_convert_modify_action(&item, field, NULL, resource,
794                                              MLX5_MODIFICATION_TYPE_SET, error);
795 }
796
797 /**
798  * Convert modify-header decrement TTL action to DV specification.
799  *
800  * @param[in,out] resource
801  *   Pointer to the modify-header resource.
802  * @param[in] action
803  *   Pointer to action specification.
804  * @param[in] items
805  *   Pointer to rte_flow_item objects list.
806  * @param[in] attr
807  *   Pointer to flow attributes structure.
808  * @param[in] dev_flow
809  *   Pointer to the sub flow.
810  * @param[in] tunnel_decap
811  *   Whether action is after tunnel decapsulation.
812  * @param[out] error
813  *   Pointer to the error structure.
814  *
815  * @return
816  *   0 on success, a negative errno value otherwise and rte_errno is set.
817  */
818 static int
819 flow_dv_convert_action_modify_dec_ttl
820                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
821                          const struct rte_flow_item *items,
822                          union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
823                          bool tunnel_decap, struct rte_flow_error *error)
824 {
825         struct rte_flow_item item;
826         struct rte_flow_item_ipv4 ipv4;
827         struct rte_flow_item_ipv4 ipv4_mask;
828         struct rte_flow_item_ipv6 ipv6;
829         struct rte_flow_item_ipv6 ipv6_mask;
830         struct field_modify_info *field;
831
832         if (!attr->valid)
833                 flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
834         if (attr->ipv4) {
835                 memset(&ipv4, 0, sizeof(ipv4));
836                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
837                 ipv4.hdr.time_to_live = 0xFF;
838                 ipv4_mask.hdr.time_to_live = 0xFF;
839                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
840                 item.spec = &ipv4;
841                 item.mask = &ipv4_mask;
842                 field = modify_ipv4;
843         } else {
844                 MLX5_ASSERT(attr->ipv6);
845                 memset(&ipv6, 0, sizeof(ipv6));
846                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
847                 ipv6.hdr.hop_limits = 0xFF;
848                 ipv6_mask.hdr.hop_limits = 0xFF;
849                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
850                 item.spec = &ipv6;
851                 item.mask = &ipv6_mask;
852                 field = modify_ipv6;
853         }
854         return flow_dv_convert_modify_action(&item, field, NULL, resource,
855                                              MLX5_MODIFICATION_TYPE_ADD, error);
856 }
857
858 /**
859  * Convert modify-header increment/decrement TCP Sequence number
860  * to DV specification.
861  *
862  * @param[in,out] resource
863  *   Pointer to the modify-header resource.
864  * @param[in] action
865  *   Pointer to action specification.
866  * @param[out] error
867  *   Pointer to the error structure.
868  *
869  * @return
870  *   0 on success, a negative errno value otherwise and rte_errno is set.
871  */
872 static int
873 flow_dv_convert_action_modify_tcp_seq
874                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
875                          const struct rte_flow_action *action,
876                          struct rte_flow_error *error)
877 {
878         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
879         uint64_t value = rte_be_to_cpu_32(*conf);
880         struct rte_flow_item item;
881         struct rte_flow_item_tcp tcp;
882         struct rte_flow_item_tcp tcp_mask;
883
884         memset(&tcp, 0, sizeof(tcp));
885         memset(&tcp_mask, 0, sizeof(tcp_mask));
886         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
887                 /*
888                  * The HW has no decrement operation, only increment operation.
889                  * To simulate decrement X from Y using increment operation
890                  * we need to add UINT32_MAX X times to Y.
891                  * Each adding of UINT32_MAX decrements Y by 1.
892                  */
893                 value *= UINT32_MAX;
894         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
895         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
896         item.type = RTE_FLOW_ITEM_TYPE_TCP;
897         item.spec = &tcp;
898         item.mask = &tcp_mask;
899         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
900                                              MLX5_MODIFICATION_TYPE_ADD, error);
901 }
902
903 /**
904  * Convert modify-header increment/decrement TCP Acknowledgment number
905  * to DV specification.
906  *
907  * @param[in,out] resource
908  *   Pointer to the modify-header resource.
909  * @param[in] action
910  *   Pointer to action specification.
911  * @param[out] error
912  *   Pointer to the error structure.
913  *
914  * @return
915  *   0 on success, a negative errno value otherwise and rte_errno is set.
916  */
917 static int
918 flow_dv_convert_action_modify_tcp_ack
919                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
920                          const struct rte_flow_action *action,
921                          struct rte_flow_error *error)
922 {
923         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
924         uint64_t value = rte_be_to_cpu_32(*conf);
925         struct rte_flow_item item;
926         struct rte_flow_item_tcp tcp;
927         struct rte_flow_item_tcp tcp_mask;
928
929         memset(&tcp, 0, sizeof(tcp));
930         memset(&tcp_mask, 0, sizeof(tcp_mask));
931         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
932                 /*
933                  * The HW has no decrement operation, only increment operation.
934                  * To simulate decrement X from Y using increment operation
935                  * we need to add UINT32_MAX X times to Y.
936                  * Each adding of UINT32_MAX decrements Y by 1.
937                  */
938                 value *= UINT32_MAX;
939         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
940         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
941         item.type = RTE_FLOW_ITEM_TYPE_TCP;
942         item.spec = &tcp;
943         item.mask = &tcp_mask;
944         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
945                                              MLX5_MODIFICATION_TYPE_ADD, error);
946 }
947
948 static enum mlx5_modification_field reg_to_field[] = {
949         [REG_NONE] = MLX5_MODI_OUT_NONE,
950         [REG_A] = MLX5_MODI_META_DATA_REG_A,
951         [REG_B] = MLX5_MODI_META_DATA_REG_B,
952         [REG_C_0] = MLX5_MODI_META_REG_C_0,
953         [REG_C_1] = MLX5_MODI_META_REG_C_1,
954         [REG_C_2] = MLX5_MODI_META_REG_C_2,
955         [REG_C_3] = MLX5_MODI_META_REG_C_3,
956         [REG_C_4] = MLX5_MODI_META_REG_C_4,
957         [REG_C_5] = MLX5_MODI_META_REG_C_5,
958         [REG_C_6] = MLX5_MODI_META_REG_C_6,
959         [REG_C_7] = MLX5_MODI_META_REG_C_7,
960 };
961
962 /**
963  * Convert register set to DV specification.
964  *
965  * @param[in,out] resource
966  *   Pointer to the modify-header resource.
967  * @param[in] action
968  *   Pointer to action specification.
969  * @param[out] error
970  *   Pointer to the error structure.
971  *
972  * @return
973  *   0 on success, a negative errno value otherwise and rte_errno is set.
974  */
975 static int
976 flow_dv_convert_action_set_reg
977                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
978                          const struct rte_flow_action *action,
979                          struct rte_flow_error *error)
980 {
981         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
982         struct mlx5_modification_cmd *actions = resource->actions;
983         uint32_t i = resource->actions_num;
984
985         if (i >= MLX5_MAX_MODIFY_NUM)
986                 return rte_flow_error_set(error, EINVAL,
987                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
988                                           "too many items to modify");
989         MLX5_ASSERT(conf->id != REG_NONE);
990         MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
991         actions[i] = (struct mlx5_modification_cmd) {
992                 .action_type = MLX5_MODIFICATION_TYPE_SET,
993                 .field = reg_to_field[conf->id],
994         };
995         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
996         actions[i].data1 = rte_cpu_to_be_32(conf->data);
997         ++i;
998         resource->actions_num = i;
999         return 0;
1000 }
1001
1002 /**
1003  * Convert SET_TAG action to DV specification.
1004  *
1005  * @param[in] dev
1006  *   Pointer to the rte_eth_dev structure.
1007  * @param[in,out] resource
1008  *   Pointer to the modify-header resource.
1009  * @param[in] conf
1010  *   Pointer to action specification.
1011  * @param[out] error
1012  *   Pointer to the error structure.
1013  *
1014  * @return
1015  *   0 on success, a negative errno value otherwise and rte_errno is set.
1016  */
1017 static int
1018 flow_dv_convert_action_set_tag
1019                         (struct rte_eth_dev *dev,
1020                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1021                          const struct rte_flow_action_set_tag *conf,
1022                          struct rte_flow_error *error)
1023 {
1024         rte_be32_t data = rte_cpu_to_be_32(conf->data);
1025         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
1026         struct rte_flow_item item = {
1027                 .spec = &data,
1028                 .mask = &mask,
1029         };
1030         struct field_modify_info reg_c_x[] = {
1031                 [1] = {0, 0, 0},
1032         };
1033         enum mlx5_modification_field reg_type;
1034         int ret;
1035
1036         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
1037         if (ret < 0)
1038                 return ret;
1039         MLX5_ASSERT(ret != REG_NONE);
1040         MLX5_ASSERT((unsigned int)ret < RTE_DIM(reg_to_field));
1041         reg_type = reg_to_field[ret];
1042         MLX5_ASSERT(reg_type > 0);
1043         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
1044         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1045                                              MLX5_MODIFICATION_TYPE_SET, error);
1046 }
1047
1048 /**
1049  * Convert internal COPY_REG action to DV specification.
1050  *
1051  * @param[in] dev
1052  *   Pointer to the rte_eth_dev structure.
1053  * @param[in,out] res
1054  *   Pointer to the modify-header resource.
1055  * @param[in] action
1056  *   Pointer to action specification.
1057  * @param[out] error
1058  *   Pointer to the error structure.
1059  *
1060  * @return
1061  *   0 on success, a negative errno value otherwise and rte_errno is set.
1062  */
1063 static int
1064 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
1065                                  struct mlx5_flow_dv_modify_hdr_resource *res,
1066                                  const struct rte_flow_action *action,
1067                                  struct rte_flow_error *error)
1068 {
1069         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
1070         rte_be32_t mask = RTE_BE32(UINT32_MAX);
1071         struct rte_flow_item item = {
1072                 .spec = NULL,
1073                 .mask = &mask,
1074         };
1075         struct field_modify_info reg_src[] = {
1076                 {4, 0, reg_to_field[conf->src]},
1077                 {0, 0, 0},
1078         };
1079         struct field_modify_info reg_dst = {
1080                 .offset = 0,
1081                 .id = reg_to_field[conf->dst],
1082         };
1083         /* Adjust reg_c[0] usage according to reported mask. */
1084         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1085                 struct mlx5_priv *priv = dev->data->dev_private;
1086                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1087
1088                 MLX5_ASSERT(reg_c0);
1089                 MLX5_ASSERT(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1090                 if (conf->dst == REG_C_0) {
1091                         /* Copy to reg_c[0], within mask only. */
1092                         reg_dst.offset = rte_bsf32(reg_c0);
1093                         /*
1094                          * Mask is ignoring the enianness, because
1095                          * there is no conversion in datapath.
1096                          */
1097 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1098                         /* Copy from destination lower bits to reg_c[0]. */
1099                         mask = reg_c0 >> reg_dst.offset;
1100 #else
1101                         /* Copy from destination upper bits to reg_c[0]. */
1102                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1103                                           rte_fls_u32(reg_c0));
1104 #endif
1105                 } else {
1106                         mask = rte_cpu_to_be_32(reg_c0);
1107 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1108                         /* Copy from reg_c[0] to destination lower bits. */
1109                         reg_dst.offset = 0;
1110 #else
1111                         /* Copy from reg_c[0] to destination upper bits. */
1112                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1113                                          (rte_fls_u32(reg_c0) -
1114                                           rte_bsf32(reg_c0));
1115 #endif
1116                 }
1117         }
1118         return flow_dv_convert_modify_action(&item,
1119                                              reg_src, &reg_dst, res,
1120                                              MLX5_MODIFICATION_TYPE_COPY,
1121                                              error);
1122 }
1123
1124 /**
1125  * Convert MARK action to DV specification. This routine is used
1126  * in extensive metadata only and requires metadata register to be
1127  * handled. In legacy mode hardware tag resource is engaged.
1128  *
1129  * @param[in] dev
1130  *   Pointer to the rte_eth_dev structure.
1131  * @param[in] conf
1132  *   Pointer to MARK action specification.
1133  * @param[in,out] resource
1134  *   Pointer to the modify-header resource.
1135  * @param[out] error
1136  *   Pointer to the error structure.
1137  *
1138  * @return
1139  *   0 on success, a negative errno value otherwise and rte_errno is set.
1140  */
1141 static int
1142 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1143                             const struct rte_flow_action_mark *conf,
1144                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1145                             struct rte_flow_error *error)
1146 {
1147         struct mlx5_priv *priv = dev->data->dev_private;
1148         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1149                                            priv->sh->dv_mark_mask);
1150         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1151         struct rte_flow_item item = {
1152                 .spec = &data,
1153                 .mask = &mask,
1154         };
1155         struct field_modify_info reg_c_x[] = {
1156                 {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
1157                 {0, 0, 0},
1158         };
1159         int reg;
1160
1161         if (!mask)
1162                 return rte_flow_error_set(error, EINVAL,
1163                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1164                                           NULL, "zero mark action mask");
1165         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1166         if (reg < 0)
1167                 return reg;
1168         MLX5_ASSERT(reg > 0);
1169         if (reg == REG_C_0) {
1170                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1171                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1172
1173                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1174                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1175                 mask = rte_cpu_to_be_32(mask << shl_c0);
1176         }
1177         reg_c_x[0].id = reg_to_field[reg];
1178         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1179                                              MLX5_MODIFICATION_TYPE_SET, error);
1180 }
1181
1182 /**
1183  * Get metadata register index for specified steering domain.
1184  *
1185  * @param[in] dev
1186  *   Pointer to the rte_eth_dev structure.
1187  * @param[in] attr
1188  *   Attributes of flow to determine steering domain.
1189  * @param[out] error
1190  *   Pointer to the error structure.
1191  *
1192  * @return
1193  *   positive index on success, a negative errno value otherwise
1194  *   and rte_errno is set.
1195  */
1196 static enum modify_reg
1197 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1198                          const struct rte_flow_attr *attr,
1199                          struct rte_flow_error *error)
1200 {
1201         int reg =
1202                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1203                                           MLX5_METADATA_FDB :
1204                                             attr->egress ?
1205                                             MLX5_METADATA_TX :
1206                                             MLX5_METADATA_RX, 0, error);
1207         if (reg < 0)
1208                 return rte_flow_error_set(error,
1209                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1210                                           NULL, "unavailable "
1211                                           "metadata register");
1212         return reg;
1213 }
1214
1215 /**
1216  * Convert SET_META action to DV specification.
1217  *
1218  * @param[in] dev
1219  *   Pointer to the rte_eth_dev structure.
1220  * @param[in,out] resource
1221  *   Pointer to the modify-header resource.
1222  * @param[in] attr
1223  *   Attributes of flow that includes this item.
1224  * @param[in] conf
1225  *   Pointer to action specification.
1226  * @param[out] error
1227  *   Pointer to the error structure.
1228  *
1229  * @return
1230  *   0 on success, a negative errno value otherwise and rte_errno is set.
1231  */
1232 static int
1233 flow_dv_convert_action_set_meta
1234                         (struct rte_eth_dev *dev,
1235                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1236                          const struct rte_flow_attr *attr,
1237                          const struct rte_flow_action_set_meta *conf,
1238                          struct rte_flow_error *error)
1239 {
1240         uint32_t data = conf->data;
1241         uint32_t mask = conf->mask;
1242         struct rte_flow_item item = {
1243                 .spec = &data,
1244                 .mask = &mask,
1245         };
1246         struct field_modify_info reg_c_x[] = {
1247                 [1] = {0, 0, 0},
1248         };
1249         int reg = flow_dv_get_metadata_reg(dev, attr, error);
1250
1251         if (reg < 0)
1252                 return reg;
1253         /*
1254          * In datapath code there is no endianness
1255          * coversions for perfromance reasons, all
1256          * pattern conversions are done in rte_flow.
1257          */
1258         if (reg == REG_C_0) {
1259                 struct mlx5_priv *priv = dev->data->dev_private;
1260                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1261                 uint32_t shl_c0;
1262
1263                 MLX5_ASSERT(msk_c0);
1264 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1265                 shl_c0 = rte_bsf32(msk_c0);
1266 #else
1267                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1268 #endif
1269                 mask <<= shl_c0;
1270                 data <<= shl_c0;
1271                 MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1272         }
1273         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1274         /* The routine expects parameters in memory as big-endian ones. */
1275         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1276                                              MLX5_MODIFICATION_TYPE_SET, error);
1277 }
1278
1279 /**
1280  * Convert modify-header set IPv4 DSCP action to DV specification.
1281  *
1282  * @param[in,out] resource
1283  *   Pointer to the modify-header resource.
1284  * @param[in] action
1285  *   Pointer to action specification.
1286  * @param[out] error
1287  *   Pointer to the error structure.
1288  *
1289  * @return
1290  *   0 on success, a negative errno value otherwise and rte_errno is set.
1291  */
1292 static int
1293 flow_dv_convert_action_modify_ipv4_dscp
1294                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1295                          const struct rte_flow_action *action,
1296                          struct rte_flow_error *error)
1297 {
1298         const struct rte_flow_action_set_dscp *conf =
1299                 (const struct rte_flow_action_set_dscp *)(action->conf);
1300         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1301         struct rte_flow_item_ipv4 ipv4;
1302         struct rte_flow_item_ipv4 ipv4_mask;
1303
1304         memset(&ipv4, 0, sizeof(ipv4));
1305         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1306         ipv4.hdr.type_of_service = conf->dscp;
1307         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1308         item.spec = &ipv4;
1309         item.mask = &ipv4_mask;
1310         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1311                                              MLX5_MODIFICATION_TYPE_SET, error);
1312 }
1313
1314 /**
1315  * Convert modify-header set IPv6 DSCP action to DV specification.
1316  *
1317  * @param[in,out] resource
1318  *   Pointer to the modify-header resource.
1319  * @param[in] action
1320  *   Pointer to action specification.
1321  * @param[out] error
1322  *   Pointer to the error structure.
1323  *
1324  * @return
1325  *   0 on success, a negative errno value otherwise and rte_errno is set.
1326  */
1327 static int
1328 flow_dv_convert_action_modify_ipv6_dscp
1329                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1330                          const struct rte_flow_action *action,
1331                          struct rte_flow_error *error)
1332 {
1333         const struct rte_flow_action_set_dscp *conf =
1334                 (const struct rte_flow_action_set_dscp *)(action->conf);
1335         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1336         struct rte_flow_item_ipv6 ipv6;
1337         struct rte_flow_item_ipv6 ipv6_mask;
1338
1339         memset(&ipv6, 0, sizeof(ipv6));
1340         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1341         /*
1342          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1343          * rdma-core only accept the DSCP bits byte aligned start from
1344          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1345          * bits in IPv6 case as rdma-core requires byte aligned value.
1346          */
1347         ipv6.hdr.vtc_flow = conf->dscp;
1348         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1349         item.spec = &ipv6;
1350         item.mask = &ipv6_mask;
1351         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1352                                              MLX5_MODIFICATION_TYPE_SET, error);
1353 }
1354
1355 /**
1356  * Validate MARK item.
1357  *
1358  * @param[in] dev
1359  *   Pointer to the rte_eth_dev structure.
1360  * @param[in] item
1361  *   Item specification.
1362  * @param[in] attr
1363  *   Attributes of flow that includes this item.
1364  * @param[out] error
1365  *   Pointer to error structure.
1366  *
1367  * @return
1368  *   0 on success, a negative errno value otherwise and rte_errno is set.
1369  */
1370 static int
1371 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1372                            const struct rte_flow_item *item,
1373                            const struct rte_flow_attr *attr __rte_unused,
1374                            struct rte_flow_error *error)
1375 {
1376         struct mlx5_priv *priv = dev->data->dev_private;
1377         struct mlx5_dev_config *config = &priv->config;
1378         const struct rte_flow_item_mark *spec = item->spec;
1379         const struct rte_flow_item_mark *mask = item->mask;
1380         const struct rte_flow_item_mark nic_mask = {
1381                 .id = priv->sh->dv_mark_mask,
1382         };
1383         int ret;
1384
1385         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1386                 return rte_flow_error_set(error, ENOTSUP,
1387                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1388                                           "extended metadata feature"
1389                                           " isn't enabled");
1390         if (!mlx5_flow_ext_mreg_supported(dev))
1391                 return rte_flow_error_set(error, ENOTSUP,
1392                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1393                                           "extended metadata register"
1394                                           " isn't supported");
1395         if (!nic_mask.id)
1396                 return rte_flow_error_set(error, ENOTSUP,
1397                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1398                                           "extended metadata register"
1399                                           " isn't available");
1400         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1401         if (ret < 0)
1402                 return ret;
1403         if (!spec)
1404                 return rte_flow_error_set(error, EINVAL,
1405                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1406                                           item->spec,
1407                                           "data cannot be empty");
1408         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1409                 return rte_flow_error_set(error, EINVAL,
1410                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1411                                           &spec->id,
1412                                           "mark id exceeds the limit");
1413         if (!mask)
1414                 mask = &nic_mask;
1415         if (!mask->id)
1416                 return rte_flow_error_set(error, EINVAL,
1417                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1418                                         "mask cannot be zero");
1419
1420         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1421                                         (const uint8_t *)&nic_mask,
1422                                         sizeof(struct rte_flow_item_mark),
1423                                         error);
1424         if (ret < 0)
1425                 return ret;
1426         return 0;
1427 }
1428
1429 /**
1430  * Validate META item.
1431  *
1432  * @param[in] dev
1433  *   Pointer to the rte_eth_dev structure.
1434  * @param[in] item
1435  *   Item specification.
1436  * @param[in] attr
1437  *   Attributes of flow that includes this item.
1438  * @param[out] error
1439  *   Pointer to error structure.
1440  *
1441  * @return
1442  *   0 on success, a negative errno value otherwise and rte_errno is set.
1443  */
1444 static int
1445 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1446                            const struct rte_flow_item *item,
1447                            const struct rte_flow_attr *attr,
1448                            struct rte_flow_error *error)
1449 {
1450         struct mlx5_priv *priv = dev->data->dev_private;
1451         struct mlx5_dev_config *config = &priv->config;
1452         const struct rte_flow_item_meta *spec = item->spec;
1453         const struct rte_flow_item_meta *mask = item->mask;
1454         struct rte_flow_item_meta nic_mask = {
1455                 .data = UINT32_MAX
1456         };
1457         int reg;
1458         int ret;
1459
1460         if (!spec)
1461                 return rte_flow_error_set(error, EINVAL,
1462                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1463                                           item->spec,
1464                                           "data cannot be empty");
1465         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1466                 if (!mlx5_flow_ext_mreg_supported(dev))
1467                         return rte_flow_error_set(error, ENOTSUP,
1468                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1469                                           "extended metadata register"
1470                                           " isn't supported");
1471                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1472                 if (reg < 0)
1473                         return reg;
1474                 if (reg == REG_B)
1475                         return rte_flow_error_set(error, ENOTSUP,
1476                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1477                                           "match on reg_b "
1478                                           "isn't supported");
1479                 if (reg != REG_A)
1480                         nic_mask.data = priv->sh->dv_meta_mask;
1481         }
1482         if (!mask)
1483                 mask = &rte_flow_item_meta_mask;
1484         if (!mask->data)
1485                 return rte_flow_error_set(error, EINVAL,
1486                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1487                                         "mask cannot be zero");
1488
1489         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1490                                         (const uint8_t *)&nic_mask,
1491                                         sizeof(struct rte_flow_item_meta),
1492                                         error);
1493         return ret;
1494 }
1495
1496 /**
1497  * Validate TAG item.
1498  *
1499  * @param[in] dev
1500  *   Pointer to the rte_eth_dev structure.
1501  * @param[in] item
1502  *   Item specification.
1503  * @param[in] attr
1504  *   Attributes of flow that includes this item.
1505  * @param[out] error
1506  *   Pointer to error structure.
1507  *
1508  * @return
1509  *   0 on success, a negative errno value otherwise and rte_errno is set.
1510  */
1511 static int
1512 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1513                           const struct rte_flow_item *item,
1514                           const struct rte_flow_attr *attr __rte_unused,
1515                           struct rte_flow_error *error)
1516 {
1517         const struct rte_flow_item_tag *spec = item->spec;
1518         const struct rte_flow_item_tag *mask = item->mask;
1519         const struct rte_flow_item_tag nic_mask = {
1520                 .data = RTE_BE32(UINT32_MAX),
1521                 .index = 0xff,
1522         };
1523         int ret;
1524
1525         if (!mlx5_flow_ext_mreg_supported(dev))
1526                 return rte_flow_error_set(error, ENOTSUP,
1527                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1528                                           "extensive metadata register"
1529                                           " isn't supported");
1530         if (!spec)
1531                 return rte_flow_error_set(error, EINVAL,
1532                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1533                                           item->spec,
1534                                           "data cannot be empty");
1535         if (!mask)
1536                 mask = &rte_flow_item_tag_mask;
1537         if (!mask->data)
1538                 return rte_flow_error_set(error, EINVAL,
1539                                         RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1540                                         "mask cannot be zero");
1541
1542         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1543                                         (const uint8_t *)&nic_mask,
1544                                         sizeof(struct rte_flow_item_tag),
1545                                         error);
1546         if (ret < 0)
1547                 return ret;
1548         if (mask->index != 0xff)
1549                 return rte_flow_error_set(error, EINVAL,
1550                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1551                                           "partial mask for tag index"
1552                                           " is not supported");
1553         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1554         if (ret < 0)
1555                 return ret;
1556         MLX5_ASSERT(ret != REG_NONE);
1557         return 0;
1558 }
1559
1560 /**
1561  * Validate vport item.
1562  *
1563  * @param[in] dev
1564  *   Pointer to the rte_eth_dev structure.
1565  * @param[in] item
1566  *   Item specification.
1567  * @param[in] attr
1568  *   Attributes of flow that includes this item.
1569  * @param[in] item_flags
1570  *   Bit-fields that holds the items detected until now.
1571  * @param[out] error
1572  *   Pointer to error structure.
1573  *
1574  * @return
1575  *   0 on success, a negative errno value otherwise and rte_errno is set.
1576  */
1577 static int
1578 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1579                               const struct rte_flow_item *item,
1580                               const struct rte_flow_attr *attr,
1581                               uint64_t item_flags,
1582                               struct rte_flow_error *error)
1583 {
1584         const struct rte_flow_item_port_id *spec = item->spec;
1585         const struct rte_flow_item_port_id *mask = item->mask;
1586         const struct rte_flow_item_port_id switch_mask = {
1587                         .id = 0xffffffff,
1588         };
1589         struct mlx5_priv *esw_priv;
1590         struct mlx5_priv *dev_priv;
1591         int ret;
1592
1593         if (!attr->transfer)
1594                 return rte_flow_error_set(error, EINVAL,
1595                                           RTE_FLOW_ERROR_TYPE_ITEM,
1596                                           NULL,
1597                                           "match on port id is valid only"
1598                                           " when transfer flag is enabled");
1599         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1600                 return rte_flow_error_set(error, ENOTSUP,
1601                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1602                                           "multiple source ports are not"
1603                                           " supported");
1604         if (!mask)
1605                 mask = &switch_mask;
1606         if (mask->id != 0xffffffff)
1607                 return rte_flow_error_set(error, ENOTSUP,
1608                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1609                                            mask,
1610                                            "no support for partial mask on"
1611                                            " \"id\" field");
1612         ret = mlx5_flow_item_acceptable
1613                                 (item, (const uint8_t *)mask,
1614                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1615                                  sizeof(struct rte_flow_item_port_id),
1616                                  error);
1617         if (ret)
1618                 return ret;
1619         if (!spec)
1620                 return 0;
1621         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1622         if (!esw_priv)
1623                 return rte_flow_error_set(error, rte_errno,
1624                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1625                                           "failed to obtain E-Switch info for"
1626                                           " port");
1627         dev_priv = mlx5_dev_to_eswitch_info(dev);
1628         if (!dev_priv)
1629                 return rte_flow_error_set(error, rte_errno,
1630                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1631                                           NULL,
1632                                           "failed to obtain E-Switch info");
1633         if (esw_priv->domain_id != dev_priv->domain_id)
1634                 return rte_flow_error_set(error, EINVAL,
1635                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1636                                           "cannot match on a port from a"
1637                                           " different E-Switch");
1638         return 0;
1639 }
1640
1641 /**
1642  * Validate GTP item.
1643  *
1644  * @param[in] dev
1645  *   Pointer to the rte_eth_dev structure.
1646  * @param[in] item
1647  *   Item specification.
1648  * @param[in] item_flags
1649  *   Bit-fields that holds the items detected until now.
1650  * @param[out] error
1651  *   Pointer to error structure.
1652  *
1653  * @return
1654  *   0 on success, a negative errno value otherwise and rte_errno is set.
1655  */
1656 static int
1657 flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
1658                           const struct rte_flow_item *item,
1659                           uint64_t item_flags,
1660                           struct rte_flow_error *error)
1661 {
1662         struct mlx5_priv *priv = dev->data->dev_private;
1663         const struct rte_flow_item_gtp *mask = item->mask;
1664         const struct rte_flow_item_gtp nic_mask = {
1665                 .msg_type = 0xff,
1666                 .teid = RTE_BE32(0xffffffff),
1667         };
1668
1669         if (!priv->config.hca_attr.tunnel_stateless_gtp)
1670                 return rte_flow_error_set(error, ENOTSUP,
1671                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1672                                           "GTP support is not enabled");
1673         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1674                 return rte_flow_error_set(error, ENOTSUP,
1675                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1676                                           "multiple tunnel layers not"
1677                                           " supported");
1678         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1679                 return rte_flow_error_set(error, EINVAL,
1680                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1681                                           "no outer UDP layer found");
1682         if (!mask)
1683                 mask = &rte_flow_item_gtp_mask;
1684         return mlx5_flow_item_acceptable
1685                 (item, (const uint8_t *)mask,
1686                  (const uint8_t *)&nic_mask,
1687                  sizeof(struct rte_flow_item_gtp),
1688                  error);
1689 }
1690
1691 /**
1692  * Validate the pop VLAN action.
1693  *
1694  * @param[in] dev
1695  *   Pointer to the rte_eth_dev structure.
1696  * @param[in] action_flags
1697  *   Holds the actions detected until now.
1698  * @param[in] action
1699  *   Pointer to the pop vlan action.
1700  * @param[in] item_flags
1701  *   The items found in this flow rule.
1702  * @param[in] attr
1703  *   Pointer to flow attributes.
1704  * @param[out] error
1705  *   Pointer to error structure.
1706  *
1707  * @return
1708  *   0 on success, a negative errno value otherwise and rte_errno is set.
1709  */
1710 static int
1711 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1712                                  uint64_t action_flags,
1713                                  const struct rte_flow_action *action,
1714                                  uint64_t item_flags,
1715                                  const struct rte_flow_attr *attr,
1716                                  struct rte_flow_error *error)
1717 {
1718         const struct mlx5_priv *priv = dev->data->dev_private;
1719
1720         (void)action;
1721         (void)attr;
1722         if (!priv->sh->pop_vlan_action)
1723                 return rte_flow_error_set(error, ENOTSUP,
1724                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1725                                           NULL,
1726                                           "pop vlan action is not supported");
1727         if (attr->egress)
1728                 return rte_flow_error_set(error, ENOTSUP,
1729                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1730                                           NULL,
1731                                           "pop vlan action not supported for "
1732                                           "egress");
1733         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1734                 return rte_flow_error_set(error, ENOTSUP,
1735                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1736                                           "no support for multiple VLAN "
1737                                           "actions");
1738         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1739                 return rte_flow_error_set(error, ENOTSUP,
1740                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1741                                           NULL,
1742                                           "cannot pop vlan without a "
1743                                           "match on (outer) vlan in the flow");
1744         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1745                 return rte_flow_error_set(error, EINVAL,
1746                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1747                                           "wrong action order, port_id should "
1748                                           "be after pop VLAN action");
1749         if (!attr->transfer && priv->representor)
1750                 return rte_flow_error_set(error, ENOTSUP,
1751                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1752                                           "pop vlan action for VF representor "
1753                                           "not supported on NIC table");
1754         return 0;
1755 }
1756
1757 /**
1758  * Get VLAN default info from vlan match info.
1759  *
1760  * @param[in] items
1761  *   the list of item specifications.
1762  * @param[out] vlan
1763  *   pointer VLAN info to fill to.
1764  *
1765  * @return
1766  *   0 on success, a negative errno value otherwise and rte_errno is set.
1767  */
1768 static void
1769 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1770                                   struct rte_vlan_hdr *vlan)
1771 {
1772         const struct rte_flow_item_vlan nic_mask = {
1773                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1774                                 MLX5DV_FLOW_VLAN_VID_MASK),
1775                 .inner_type = RTE_BE16(0xffff),
1776         };
1777
1778         if (items == NULL)
1779                 return;
1780         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1781                 int type = items->type;
1782
1783                 if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
1784                     type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
1785                         break;
1786         }
1787         if (items->type != RTE_FLOW_ITEM_TYPE_END) {
1788                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1789                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1790
1791                 if (!vlan_m)
1792                         vlan_m = &nic_mask;
1793                 /* Only full match values are accepted */
1794                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1795                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1796                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
1797                         vlan->vlan_tci |=
1798                                 rte_be_to_cpu_16(vlan_v->tci &
1799                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1800                 }
1801                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1802                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1803                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1804                         vlan->vlan_tci |=
1805                                 rte_be_to_cpu_16(vlan_v->tci &
1806                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1807                 }
1808                 if (vlan_m->inner_type == nic_mask.inner_type)
1809                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1810                                                            vlan_m->inner_type);
1811         }
1812 }
1813
1814 /**
1815  * Validate the push VLAN action.
1816  *
1817  * @param[in] dev
1818  *   Pointer to the rte_eth_dev structure.
1819  * @param[in] action_flags
1820  *   Holds the actions detected until now.
1821  * @param[in] item_flags
1822  *   The items found in this flow rule.
1823  * @param[in] action
1824  *   Pointer to the action structure.
1825  * @param[in] attr
1826  *   Pointer to flow attributes
1827  * @param[out] error
1828  *   Pointer to error structure.
1829  *
1830  * @return
1831  *   0 on success, a negative errno value otherwise and rte_errno is set.
1832  */
1833 static int
1834 flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
1835                                   uint64_t action_flags,
1836                                   uint64_t item_flags __rte_unused,
1837                                   const struct rte_flow_action *action,
1838                                   const struct rte_flow_attr *attr,
1839                                   struct rte_flow_error *error)
1840 {
1841         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1842         const struct mlx5_priv *priv = dev->data->dev_private;
1843
1844         if (!attr->transfer && attr->ingress)
1845                 return rte_flow_error_set(error, ENOTSUP,
1846                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1847                                           NULL,
1848                                           "push VLAN action not supported for "
1849                                           "ingress");
1850         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1851             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1852                 return rte_flow_error_set(error, EINVAL,
1853                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1854                                           "invalid vlan ethertype");
1855         if (action_flags & MLX5_FLOW_VLAN_ACTIONS)
1856                 return rte_flow_error_set(error, ENOTSUP,
1857                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1858                                           "no support for multiple VLAN "
1859                                           "actions");
1860         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1861                 return rte_flow_error_set(error, EINVAL,
1862                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1863                                           "wrong action order, port_id should "
1864                                           "be after push VLAN");
1865         if (!attr->transfer && priv->representor)
1866                 return rte_flow_error_set(error, ENOTSUP,
1867                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1868                                           "push vlan action for VF representor "
1869                                           "not supported on NIC table");
1870         (void)attr;
1871         return 0;
1872 }
1873
1874 /**
1875  * Validate the set VLAN PCP.
1876  *
1877  * @param[in] action_flags
1878  *   Holds the actions detected until now.
1879  * @param[in] actions
1880  *   Pointer to the list of actions remaining in the flow rule.
1881  * @param[out] error
1882  *   Pointer to error structure.
1883  *
1884  * @return
1885  *   0 on success, a negative errno value otherwise and rte_errno is set.
1886  */
1887 static int
1888 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1889                                      const struct rte_flow_action actions[],
1890                                      struct rte_flow_error *error)
1891 {
1892         const struct rte_flow_action *action = actions;
1893         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1894
1895         if (conf->vlan_pcp > 7)
1896                 return rte_flow_error_set(error, EINVAL,
1897                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1898                                           "VLAN PCP value is too big");
1899         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1900                 return rte_flow_error_set(error, ENOTSUP,
1901                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1902                                           "set VLAN PCP action must follow "
1903                                           "the push VLAN action");
1904         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1905                 return rte_flow_error_set(error, ENOTSUP,
1906                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1907                                           "Multiple VLAN PCP modification are "
1908                                           "not supported");
1909         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1910                 return rte_flow_error_set(error, EINVAL,
1911                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1912                                           "wrong action order, port_id should "
1913                                           "be after set VLAN PCP");
1914         return 0;
1915 }
1916
1917 /**
1918  * Validate the set VLAN VID.
1919  *
1920  * @param[in] item_flags
1921  *   Holds the items detected in this rule.
1922  * @param[in] action_flags
1923  *   Holds the actions detected until now.
1924  * @param[in] actions
1925  *   Pointer to the list of actions remaining in the flow rule.
1926  * @param[out] error
1927  *   Pointer to error structure.
1928  *
1929  * @return
1930  *   0 on success, a negative errno value otherwise and rte_errno is set.
1931  */
1932 static int
1933 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1934                                      uint64_t action_flags,
1935                                      const struct rte_flow_action actions[],
1936                                      struct rte_flow_error *error)
1937 {
1938         const struct rte_flow_action *action = actions;
1939         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1940
1941         if (conf->vlan_vid > RTE_BE16(0xFFE))
1942                 return rte_flow_error_set(error, EINVAL,
1943                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1944                                           "VLAN VID value is too big");
1945         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
1946             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1947                 return rte_flow_error_set(error, ENOTSUP,
1948                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1949                                           "set VLAN VID action must follow push"
1950                                           " VLAN action or match on VLAN item");
1951         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1952                 return rte_flow_error_set(error, ENOTSUP,
1953                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1954                                           "Multiple VLAN VID modifications are "
1955                                           "not supported");
1956         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1957                 return rte_flow_error_set(error, EINVAL,
1958                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1959                                           "wrong action order, port_id should "
1960                                           "be after set VLAN VID");
1961         return 0;
1962 }
1963
1964 /*
1965  * Validate the FLAG action.
1966  *
1967  * @param[in] dev
1968  *   Pointer to the rte_eth_dev structure.
1969  * @param[in] action_flags
1970  *   Holds the actions detected until now.
1971  * @param[in] attr
1972  *   Pointer to flow attributes
1973  * @param[out] error
1974  *   Pointer to error structure.
1975  *
1976  * @return
1977  *   0 on success, a negative errno value otherwise and rte_errno is set.
1978  */
1979 static int
1980 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
1981                              uint64_t action_flags,
1982                              const struct rte_flow_attr *attr,
1983                              struct rte_flow_error *error)
1984 {
1985         struct mlx5_priv *priv = dev->data->dev_private;
1986         struct mlx5_dev_config *config = &priv->config;
1987         int ret;
1988
1989         /* Fall back if no extended metadata register support. */
1990         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1991                 return mlx5_flow_validate_action_flag(action_flags, attr,
1992                                                       error);
1993         /* Extensive metadata mode requires registers. */
1994         if (!mlx5_flow_ext_mreg_supported(dev))
1995                 return rte_flow_error_set(error, ENOTSUP,
1996                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1997                                           "no metadata registers "
1998                                           "to support flag action");
1999         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
2000                 return rte_flow_error_set(error, ENOTSUP,
2001                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2002                                           "extended metadata register"
2003                                           " isn't available");
2004         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2005         if (ret < 0)
2006                 return ret;
2007         MLX5_ASSERT(ret > 0);
2008         if (action_flags & MLX5_FLOW_ACTION_MARK)
2009                 return rte_flow_error_set(error, EINVAL,
2010                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2011                                           "can't mark and flag in same flow");
2012         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2013                 return rte_flow_error_set(error, EINVAL,
2014                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2015                                           "can't have 2 flag"
2016                                           " actions in same flow");
2017         return 0;
2018 }
2019
2020 /**
2021  * Validate MARK action.
2022  *
2023  * @param[in] dev
2024  *   Pointer to the rte_eth_dev structure.
2025  * @param[in] action
2026  *   Pointer to action.
2027  * @param[in] action_flags
2028  *   Holds the actions detected until now.
2029  * @param[in] attr
2030  *   Pointer to flow attributes
2031  * @param[out] error
2032  *   Pointer to error structure.
2033  *
2034  * @return
2035  *   0 on success, a negative errno value otherwise and rte_errno is set.
2036  */
2037 static int
2038 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
2039                              const struct rte_flow_action *action,
2040                              uint64_t action_flags,
2041                              const struct rte_flow_attr *attr,
2042                              struct rte_flow_error *error)
2043 {
2044         struct mlx5_priv *priv = dev->data->dev_private;
2045         struct mlx5_dev_config *config = &priv->config;
2046         const struct rte_flow_action_mark *mark = action->conf;
2047         int ret;
2048
2049         /* Fall back if no extended metadata register support. */
2050         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
2051                 return mlx5_flow_validate_action_mark(action, action_flags,
2052                                                       attr, error);
2053         /* Extensive metadata mode requires registers. */
2054         if (!mlx5_flow_ext_mreg_supported(dev))
2055                 return rte_flow_error_set(error, ENOTSUP,
2056                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2057                                           "no metadata registers "
2058                                           "to support mark action");
2059         if (!priv->sh->dv_mark_mask)
2060                 return rte_flow_error_set(error, ENOTSUP,
2061                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2062                                           "extended metadata register"
2063                                           " isn't available");
2064         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2065         if (ret < 0)
2066                 return ret;
2067         MLX5_ASSERT(ret > 0);
2068         if (!mark)
2069                 return rte_flow_error_set(error, EINVAL,
2070                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2071                                           "configuration cannot be null");
2072         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
2073                 return rte_flow_error_set(error, EINVAL,
2074                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2075                                           &mark->id,
2076                                           "mark id exceeds the limit");
2077         if (action_flags & MLX5_FLOW_ACTION_FLAG)
2078                 return rte_flow_error_set(error, EINVAL,
2079                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2080                                           "can't flag and mark in same flow");
2081         if (action_flags & MLX5_FLOW_ACTION_MARK)
2082                 return rte_flow_error_set(error, EINVAL,
2083                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2084                                           "can't have 2 mark actions in same"
2085                                           " flow");
2086         return 0;
2087 }
2088
2089 /**
2090  * Validate SET_META action.
2091  *
2092  * @param[in] dev
2093  *   Pointer to the rte_eth_dev structure.
2094  * @param[in] action
2095  *   Pointer to the action structure.
2096  * @param[in] action_flags
2097  *   Holds the actions detected until now.
2098  * @param[in] attr
2099  *   Pointer to flow attributes
2100  * @param[out] error
2101  *   Pointer to error structure.
2102  *
2103  * @return
2104  *   0 on success, a negative errno value otherwise and rte_errno is set.
2105  */
2106 static int
2107 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
2108                                  const struct rte_flow_action *action,
2109                                  uint64_t action_flags __rte_unused,
2110                                  const struct rte_flow_attr *attr,
2111                                  struct rte_flow_error *error)
2112 {
2113         const struct rte_flow_action_set_meta *conf;
2114         uint32_t nic_mask = UINT32_MAX;
2115         int reg;
2116
2117         if (!mlx5_flow_ext_mreg_supported(dev))
2118                 return rte_flow_error_set(error, ENOTSUP,
2119                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2120                                           "extended metadata register"
2121                                           " isn't supported");
2122         reg = flow_dv_get_metadata_reg(dev, attr, error);
2123         if (reg < 0)
2124                 return reg;
2125         if (reg != REG_A && reg != REG_B) {
2126                 struct mlx5_priv *priv = dev->data->dev_private;
2127
2128                 nic_mask = priv->sh->dv_meta_mask;
2129         }
2130         if (!(action->conf))
2131                 return rte_flow_error_set(error, EINVAL,
2132                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2133                                           "configuration cannot be null");
2134         conf = (const struct rte_flow_action_set_meta *)action->conf;
2135         if (!conf->mask)
2136                 return rte_flow_error_set(error, EINVAL,
2137                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2138                                           "zero mask doesn't have any effect");
2139         if (conf->mask & ~nic_mask)
2140                 return rte_flow_error_set(error, EINVAL,
2141                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2142                                           "meta data must be within reg C0");
2143         return 0;
2144 }
2145
2146 /**
2147  * Validate SET_TAG action.
2148  *
2149  * @param[in] dev
2150  *   Pointer to the rte_eth_dev structure.
2151  * @param[in] action
2152  *   Pointer to the action structure.
2153  * @param[in] action_flags
2154  *   Holds the actions detected until now.
2155  * @param[in] attr
2156  *   Pointer to flow attributes
2157  * @param[out] error
2158  *   Pointer to error structure.
2159  *
2160  * @return
2161  *   0 on success, a negative errno value otherwise and rte_errno is set.
2162  */
2163 static int
2164 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2165                                 const struct rte_flow_action *action,
2166                                 uint64_t action_flags,
2167                                 const struct rte_flow_attr *attr,
2168                                 struct rte_flow_error *error)
2169 {
2170         const struct rte_flow_action_set_tag *conf;
2171         const uint64_t terminal_action_flags =
2172                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2173                 MLX5_FLOW_ACTION_RSS;
2174         int ret;
2175
2176         if (!mlx5_flow_ext_mreg_supported(dev))
2177                 return rte_flow_error_set(error, ENOTSUP,
2178                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2179                                           "extensive metadata register"
2180                                           " isn't supported");
2181         if (!(action->conf))
2182                 return rte_flow_error_set(error, EINVAL,
2183                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2184                                           "configuration cannot be null");
2185         conf = (const struct rte_flow_action_set_tag *)action->conf;
2186         if (!conf->mask)
2187                 return rte_flow_error_set(error, EINVAL,
2188                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2189                                           "zero mask doesn't have any effect");
2190         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2191         if (ret < 0)
2192                 return ret;
2193         if (!attr->transfer && attr->ingress &&
2194             (action_flags & terminal_action_flags))
2195                 return rte_flow_error_set(error, EINVAL,
2196                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2197                                           "set_tag has no effect"
2198                                           " with terminal actions");
2199         return 0;
2200 }
2201
2202 /**
2203  * Validate count action.
2204  *
2205  * @param[in] dev
2206  *   Pointer to rte_eth_dev structure.
2207  * @param[out] error
2208  *   Pointer to error structure.
2209  *
2210  * @return
2211  *   0 on success, a negative errno value otherwise and rte_errno is set.
2212  */
2213 static int
2214 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2215                               struct rte_flow_error *error)
2216 {
2217         struct mlx5_priv *priv = dev->data->dev_private;
2218
2219         if (!priv->config.devx)
2220                 goto notsup_err;
2221 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2222         return 0;
2223 #endif
2224 notsup_err:
2225         return rte_flow_error_set
2226                       (error, ENOTSUP,
2227                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2228                        NULL,
2229                        "count action not supported");
2230 }
2231
2232 /**
2233  * Validate the L2 encap action.
2234  *
2235  * @param[in] dev
2236  *   Pointer to the rte_eth_dev structure.
2237  * @param[in] action_flags
2238  *   Holds the actions detected until now.
2239  * @param[in] action
2240  *   Pointer to the action structure.
2241  * @param[in] attr
2242  *   Pointer to flow attributes.
2243  * @param[out] error
2244  *   Pointer to error structure.
2245  *
2246  * @return
2247  *   0 on success, a negative errno value otherwise and rte_errno is set.
2248  */
2249 static int
2250 flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
2251                                  uint64_t action_flags,
2252                                  const struct rte_flow_action *action,
2253                                  const struct rte_flow_attr *attr,
2254                                  struct rte_flow_error *error)
2255 {
2256         const struct mlx5_priv *priv = dev->data->dev_private;
2257
2258         if (!(action->conf))
2259                 return rte_flow_error_set(error, EINVAL,
2260                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2261                                           "configuration cannot be null");
2262         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
2263                 return rte_flow_error_set(error, EINVAL,
2264                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2265                                           "can only have a single encap action "
2266                                           "in a flow");
2267         if (!attr->transfer && priv->representor)
2268                 return rte_flow_error_set(error, ENOTSUP,
2269                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2270                                           "encap action for VF representor "
2271                                           "not supported on NIC table");
2272         return 0;
2273 }
2274
2275 /**
2276  * Validate a decap action.
2277  *
2278  * @param[in] dev
2279  *   Pointer to the rte_eth_dev structure.
2280  * @param[in] action_flags
2281  *   Holds the actions detected until now.
2282  * @param[in] attr
2283  *   Pointer to flow attributes
2284  * @param[out] error
2285  *   Pointer to error structure.
2286  *
2287  * @return
2288  *   0 on success, a negative errno value otherwise and rte_errno is set.
2289  */
2290 static int
2291 flow_dv_validate_action_decap(struct rte_eth_dev *dev,
2292                               uint64_t action_flags,
2293                               const struct rte_flow_attr *attr,
2294                               struct rte_flow_error *error)
2295 {
2296         const struct mlx5_priv *priv = dev->data->dev_private;
2297
2298         if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
2299                 return rte_flow_error_set(error, ENOTSUP,
2300                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2301                                           action_flags &
2302                                           MLX5_FLOW_ACTION_DECAP ? "can only "
2303                                           "have a single decap action" : "decap "
2304                                           "after encap is not supported");
2305         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2306                 return rte_flow_error_set(error, EINVAL,
2307                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2308                                           "can't have decap action after"
2309                                           " modify action");
2310         if (attr->egress)
2311                 return rte_flow_error_set(error, ENOTSUP,
2312                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2313                                           NULL,
2314                                           "decap action not supported for "
2315                                           "egress");
2316         if (!attr->transfer && priv->representor)
2317                 return rte_flow_error_set(error, ENOTSUP,
2318                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2319                                           "decap action for VF representor "
2320                                           "not supported on NIC table");
2321         return 0;
2322 }
2323
2324 const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
2325
2326 /**
2327  * Validate the raw encap and decap actions.
2328  *
2329  * @param[in] dev
2330  *   Pointer to the rte_eth_dev structure.
2331  * @param[in] decap
2332  *   Pointer to the decap action.
2333  * @param[in] encap
2334  *   Pointer to the encap action.
2335  * @param[in] attr
2336  *   Pointer to flow attributes
2337  * @param[in/out] action_flags
2338  *   Holds the actions detected until now.
2339  * @param[out] actions_n
2340  *   pointer to the number of actions counter.
2341  * @param[out] error
2342  *   Pointer to error structure.
2343  *
2344  * @return
2345  *   0 on success, a negative errno value otherwise and rte_errno is set.
2346  */
2347 static int
2348 flow_dv_validate_action_raw_encap_decap
2349         (struct rte_eth_dev *dev,
2350          const struct rte_flow_action_raw_decap *decap,
2351          const struct rte_flow_action_raw_encap *encap,
2352          const struct rte_flow_attr *attr, uint64_t *action_flags,
2353          int *actions_n, struct rte_flow_error *error)
2354 {
2355         const struct mlx5_priv *priv = dev->data->dev_private;
2356         int ret;
2357
2358         if (encap && (!encap->size || !encap->data))
2359                 return rte_flow_error_set(error, EINVAL,
2360                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2361                                           "raw encap data cannot be empty");
2362         if (decap && encap) {
2363                 if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
2364                     encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2365                         /* L3 encap. */
2366                         decap = NULL;
2367                 else if (encap->size <=
2368                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2369                            decap->size >
2370                            MLX5_ENCAPSULATION_DECISION_SIZE)
2371                         /* L3 decap. */
2372                         encap = NULL;
2373                 else if (encap->size >
2374                            MLX5_ENCAPSULATION_DECISION_SIZE &&
2375                            decap->size >
2376                            MLX5_ENCAPSULATION_DECISION_SIZE)
2377                         /* 2 L2 actions: encap and decap. */
2378                         ;
2379                 else
2380                         return rte_flow_error_set(error,
2381                                 ENOTSUP,
2382                                 RTE_FLOW_ERROR_TYPE_ACTION,
2383                                 NULL, "unsupported too small "
2384                                 "raw decap and too small raw "
2385                                 "encap combination");
2386         }
2387         if (decap) {
2388                 ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
2389                                                     error);
2390                 if (ret < 0)
2391                         return ret;
2392                 *action_flags |= MLX5_FLOW_ACTION_DECAP;
2393                 ++(*actions_n);
2394         }
2395         if (encap) {
2396                 if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
2397                         return rte_flow_error_set(error, ENOTSUP,
2398                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2399                                                   NULL,
2400                                                   "small raw encap size");
2401                 if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
2402                         return rte_flow_error_set(error, EINVAL,
2403                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2404                                                   NULL,
2405                                                   "more than one encap action");
2406                 if (!attr->transfer && priv->representor)
2407                         return rte_flow_error_set
2408                                         (error, ENOTSUP,
2409                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2410                                          "encap action for VF representor "
2411                                          "not supported on NIC table");
2412                 *action_flags |= MLX5_FLOW_ACTION_ENCAP;
2413                 ++(*actions_n);
2414         }
2415         return 0;
2416 }
2417
2418 /**
2419  * Find existing encap/decap resource or create and register a new one.
2420  *
2421  * @param[in, out] dev
2422  *   Pointer to rte_eth_dev structure.
2423  * @param[in, out] resource
2424  *   Pointer to encap/decap resource.
2425  * @parm[in, out] dev_flow
2426  *   Pointer to the dev_flow.
2427  * @param[out] error
2428  *   pointer to error structure.
2429  *
2430  * @return
2431  *   0 on success otherwise -errno and errno is set.
2432  */
2433 static int
2434 flow_dv_encap_decap_resource_register
2435                         (struct rte_eth_dev *dev,
2436                          struct mlx5_flow_dv_encap_decap_resource *resource,
2437                          struct mlx5_flow *dev_flow,
2438                          struct rte_flow_error *error)
2439 {
2440         struct mlx5_priv *priv = dev->data->dev_private;
2441         struct mlx5_ibv_shared *sh = priv->sh;
2442         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2443         struct mlx5dv_dr_domain *domain;
2444
2445         resource->flags = dev_flow->dv.group ? 0 : 1;
2446         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2447                 domain = sh->fdb_domain;
2448         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2449                 domain = sh->rx_domain;
2450         else
2451                 domain = sh->tx_domain;
2452         /* Lookup a matching resource from cache. */
2453         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
2454                 if (resource->reformat_type == cache_resource->reformat_type &&
2455                     resource->ft_type == cache_resource->ft_type &&
2456                     resource->flags == cache_resource->flags &&
2457                     resource->size == cache_resource->size &&
2458                     !memcmp((const void *)resource->buf,
2459                             (const void *)cache_resource->buf,
2460                             resource->size)) {
2461                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2462                                 (void *)cache_resource,
2463                                 rte_atomic32_read(&cache_resource->refcnt));
2464                         rte_atomic32_inc(&cache_resource->refcnt);
2465                         dev_flow->handle->dvh.encap_decap = cache_resource;
2466                         return 0;
2467                 }
2468         }
2469         /* Register new encap/decap resource. */
2470         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2471         if (!cache_resource)
2472                 return rte_flow_error_set(error, ENOMEM,
2473                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2474                                           "cannot allocate resource memory");
2475         *cache_resource = *resource;
2476         cache_resource->verbs_action =
2477                 mlx5_glue->dv_create_flow_action_packet_reformat
2478                         (sh->ctx, cache_resource->reformat_type,
2479                          cache_resource->ft_type, domain, cache_resource->flags,
2480                          cache_resource->size,
2481                          (cache_resource->size ? cache_resource->buf : NULL));
2482         if (!cache_resource->verbs_action) {
2483                 rte_free(cache_resource);
2484                 return rte_flow_error_set(error, ENOMEM,
2485                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2486                                           NULL, "cannot create action");
2487         }
2488         rte_atomic32_init(&cache_resource->refcnt);
2489         rte_atomic32_inc(&cache_resource->refcnt);
2490         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
2491         dev_flow->handle->dvh.encap_decap = cache_resource;
2492         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2493                 (void *)cache_resource,
2494                 rte_atomic32_read(&cache_resource->refcnt));
2495         return 0;
2496 }
2497
2498 /**
2499  * Find existing table jump resource or create and register a new one.
2500  *
2501  * @param[in, out] dev
2502  *   Pointer to rte_eth_dev structure.
2503  * @param[in, out] tbl
2504  *   Pointer to flow table resource.
2505  * @parm[in, out] dev_flow
2506  *   Pointer to the dev_flow.
2507  * @param[out] error
2508  *   pointer to error structure.
2509  *
2510  * @return
2511  *   0 on success otherwise -errno and errno is set.
2512  */
2513 static int
2514 flow_dv_jump_tbl_resource_register
2515                         (struct rte_eth_dev *dev __rte_unused,
2516                          struct mlx5_flow_tbl_resource *tbl,
2517                          struct mlx5_flow *dev_flow,
2518                          struct rte_flow_error *error)
2519 {
2520         struct mlx5_flow_tbl_data_entry *tbl_data =
2521                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2522         int cnt;
2523
2524         MLX5_ASSERT(tbl);
2525         cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
2526         if (!cnt) {
2527                 tbl_data->jump.action =
2528                         mlx5_glue->dr_create_flow_action_dest_flow_tbl
2529                         (tbl->obj);
2530                 if (!tbl_data->jump.action)
2531                         return rte_flow_error_set(error, ENOMEM,
2532                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2533                                         NULL, "cannot create jump action");
2534                 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2535                         (void *)&tbl_data->jump, cnt);
2536         } else {
2537                 /* old jump should not make the table ref++. */
2538                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
2539                 MLX5_ASSERT(tbl_data->jump.action);
2540                 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2541                         (void *)&tbl_data->jump, cnt);
2542         }
2543         rte_atomic32_inc(&tbl_data->jump.refcnt);
2544         dev_flow->handle->dvh.jump = &tbl_data->jump;
2545         return 0;
2546 }
2547
2548 /**
2549  * Find existing table port ID resource or create and register a new one.
2550  *
2551  * @param[in, out] dev
2552  *   Pointer to rte_eth_dev structure.
2553  * @param[in, out] resource
2554  *   Pointer to port ID action resource.
2555  * @parm[in, out] dev_flow
2556  *   Pointer to the dev_flow.
2557  * @param[out] error
2558  *   pointer to error structure.
2559  *
2560  * @return
2561  *   0 on success otherwise -errno and errno is set.
2562  */
2563 static int
2564 flow_dv_port_id_action_resource_register
2565                         (struct rte_eth_dev *dev,
2566                          struct mlx5_flow_dv_port_id_action_resource *resource,
2567                          struct mlx5_flow *dev_flow,
2568                          struct rte_flow_error *error)
2569 {
2570         struct mlx5_priv *priv = dev->data->dev_private;
2571         struct mlx5_ibv_shared *sh = priv->sh;
2572         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2573
2574         /* Lookup a matching resource from cache. */
2575         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
2576                 if (resource->port_id == cache_resource->port_id) {
2577                         DRV_LOG(DEBUG, "port id action resource resource %p: "
2578                                 "refcnt %d++",
2579                                 (void *)cache_resource,
2580                                 rte_atomic32_read(&cache_resource->refcnt));
2581                         rte_atomic32_inc(&cache_resource->refcnt);
2582                         dev_flow->handle->dvh.port_id_action = cache_resource;
2583                         return 0;
2584                 }
2585         }
2586         /* Register new port id action resource. */
2587         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2588         if (!cache_resource)
2589                 return rte_flow_error_set(error, ENOMEM,
2590                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2591                                           "cannot allocate resource memory");
2592         *cache_resource = *resource;
2593         /*
2594          * Depending on rdma_core version the glue routine calls
2595          * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
2596          * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
2597          */
2598         cache_resource->action =
2599                 mlx5_glue->dr_create_flow_action_dest_port
2600                         (priv->sh->fdb_domain, resource->port_id);
2601         if (!cache_resource->action) {
2602                 rte_free(cache_resource);
2603                 return rte_flow_error_set(error, ENOMEM,
2604                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2605                                           NULL, "cannot create action");
2606         }
2607         rte_atomic32_init(&cache_resource->refcnt);
2608         rte_atomic32_inc(&cache_resource->refcnt);
2609         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
2610         dev_flow->handle->dvh.port_id_action = cache_resource;
2611         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
2612                 (void *)cache_resource,
2613                 rte_atomic32_read(&cache_resource->refcnt));
2614         return 0;
2615 }
2616
2617 /**
2618  * Find existing push vlan resource or create and register a new one.
2619  *
2620  * @param [in, out] dev
2621  *   Pointer to rte_eth_dev structure.
2622  * @param[in, out] resource
2623  *   Pointer to port ID action resource.
2624  * @parm[in, out] dev_flow
2625  *   Pointer to the dev_flow.
2626  * @param[out] error
2627  *   pointer to error structure.
2628  *
2629  * @return
2630  *   0 on success otherwise -errno and errno is set.
2631  */
2632 static int
2633 flow_dv_push_vlan_action_resource_register
2634                        (struct rte_eth_dev *dev,
2635                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
2636                         struct mlx5_flow *dev_flow,
2637                         struct rte_flow_error *error)
2638 {
2639         struct mlx5_priv *priv = dev->data->dev_private;
2640         struct mlx5_ibv_shared *sh = priv->sh;
2641         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
2642         struct mlx5dv_dr_domain *domain;
2643
2644         /* Lookup a matching resource from cache. */
2645         LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
2646                 if (resource->vlan_tag == cache_resource->vlan_tag &&
2647                     resource->ft_type == cache_resource->ft_type) {
2648                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
2649                                 "refcnt %d++",
2650                                 (void *)cache_resource,
2651                                 rte_atomic32_read(&cache_resource->refcnt));
2652                         rte_atomic32_inc(&cache_resource->refcnt);
2653                         dev_flow->handle->dvh.push_vlan_res = cache_resource;
2654                         return 0;
2655                 }
2656         }
2657         /* Register new push_vlan action resource. */
2658         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2659         if (!cache_resource)
2660                 return rte_flow_error_set(error, ENOMEM,
2661                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2662                                           "cannot allocate resource memory");
2663         *cache_resource = *resource;
2664         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2665                 domain = sh->fdb_domain;
2666         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2667                 domain = sh->rx_domain;
2668         else
2669                 domain = sh->tx_domain;
2670         cache_resource->action =
2671                 mlx5_glue->dr_create_flow_action_push_vlan(domain,
2672                                                            resource->vlan_tag);
2673         if (!cache_resource->action) {
2674                 rte_free(cache_resource);
2675                 return rte_flow_error_set(error, ENOMEM,
2676                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2677                                           NULL, "cannot create action");
2678         }
2679         rte_atomic32_init(&cache_resource->refcnt);
2680         rte_atomic32_inc(&cache_resource->refcnt);
2681         LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
2682         dev_flow->handle->dvh.push_vlan_res = cache_resource;
2683         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
2684                 (void *)cache_resource,
2685                 rte_atomic32_read(&cache_resource->refcnt));
2686         return 0;
2687 }
2688 /**
2689  * Get the size of specific rte_flow_item_type
2690  *
2691  * @param[in] item_type
2692  *   Tested rte_flow_item_type.
2693  *
2694  * @return
2695  *   sizeof struct item_type, 0 if void or irrelevant.
2696  */
2697 static size_t
2698 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
2699 {
2700         size_t retval;
2701
2702         switch (item_type) {
2703         case RTE_FLOW_ITEM_TYPE_ETH:
2704                 retval = sizeof(struct rte_flow_item_eth);
2705                 break;
2706         case RTE_FLOW_ITEM_TYPE_VLAN:
2707                 retval = sizeof(struct rte_flow_item_vlan);
2708                 break;
2709         case RTE_FLOW_ITEM_TYPE_IPV4:
2710                 retval = sizeof(struct rte_flow_item_ipv4);
2711                 break;
2712         case RTE_FLOW_ITEM_TYPE_IPV6:
2713                 retval = sizeof(struct rte_flow_item_ipv6);
2714                 break;
2715         case RTE_FLOW_ITEM_TYPE_UDP:
2716                 retval = sizeof(struct rte_flow_item_udp);
2717                 break;
2718         case RTE_FLOW_ITEM_TYPE_TCP:
2719                 retval = sizeof(struct rte_flow_item_tcp);
2720                 break;
2721         case RTE_FLOW_ITEM_TYPE_VXLAN:
2722                 retval = sizeof(struct rte_flow_item_vxlan);
2723                 break;
2724         case RTE_FLOW_ITEM_TYPE_GRE:
2725                 retval = sizeof(struct rte_flow_item_gre);
2726                 break;
2727         case RTE_FLOW_ITEM_TYPE_NVGRE:
2728                 retval = sizeof(struct rte_flow_item_nvgre);
2729                 break;
2730         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2731                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
2732                 break;
2733         case RTE_FLOW_ITEM_TYPE_MPLS:
2734                 retval = sizeof(struct rte_flow_item_mpls);
2735                 break;
2736         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
2737         default:
2738                 retval = 0;
2739                 break;
2740         }
2741         return retval;
2742 }
2743
2744 #define MLX5_ENCAP_IPV4_VERSION         0x40
2745 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
2746 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
2747 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
2748 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
2749 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
2750 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
2751
2752 /**
2753  * Convert the encap action data from list of rte_flow_item to raw buffer
2754  *
2755  * @param[in] items
2756  *   Pointer to rte_flow_item objects list.
2757  * @param[out] buf
2758  *   Pointer to the output buffer.
2759  * @param[out] size
2760  *   Pointer to the output buffer size.
2761  * @param[out] error
2762  *   Pointer to the error structure.
2763  *
2764  * @return
2765  *   0 on success, a negative errno value otherwise and rte_errno is set.
2766  */
2767 static int
2768 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2769                            size_t *size, struct rte_flow_error *error)
2770 {
2771         struct rte_ether_hdr *eth = NULL;
2772         struct rte_vlan_hdr *vlan = NULL;
2773         struct rte_ipv4_hdr *ipv4 = NULL;
2774         struct rte_ipv6_hdr *ipv6 = NULL;
2775         struct rte_udp_hdr *udp = NULL;
2776         struct rte_vxlan_hdr *vxlan = NULL;
2777         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
2778         struct rte_gre_hdr *gre = NULL;
2779         size_t len;
2780         size_t temp_size = 0;
2781
2782         if (!items)
2783                 return rte_flow_error_set(error, EINVAL,
2784                                           RTE_FLOW_ERROR_TYPE_ACTION,
2785                                           NULL, "invalid empty data");
2786         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2787                 len = flow_dv_get_item_len(items->type);
2788                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
2789                         return rte_flow_error_set(error, EINVAL,
2790                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2791                                                   (void *)items->type,
2792                                                   "items total size is too big"
2793                                                   " for encap action");
2794                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
2795                 switch (items->type) {
2796                 case RTE_FLOW_ITEM_TYPE_ETH:
2797                         eth = (struct rte_ether_hdr *)&buf[temp_size];
2798                         break;
2799                 case RTE_FLOW_ITEM_TYPE_VLAN:
2800                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
2801                         if (!eth)
2802                                 return rte_flow_error_set(error, EINVAL,
2803                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2804                                                 (void *)items->type,
2805                                                 "eth header not found");
2806                         if (!eth->ether_type)
2807                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2808                         break;
2809                 case RTE_FLOW_ITEM_TYPE_IPV4:
2810                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
2811                         if (!vlan && !eth)
2812                                 return rte_flow_error_set(error, EINVAL,
2813                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2814                                                 (void *)items->type,
2815                                                 "neither eth nor vlan"
2816                                                 " header found");
2817                         if (vlan && !vlan->eth_proto)
2818                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2819                         else if (eth && !eth->ether_type)
2820                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2821                         if (!ipv4->version_ihl)
2822                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
2823                                                     MLX5_ENCAP_IPV4_IHL_MIN;
2824                         if (!ipv4->time_to_live)
2825                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
2826                         break;
2827                 case RTE_FLOW_ITEM_TYPE_IPV6:
2828                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
2829                         if (!vlan && !eth)
2830                                 return rte_flow_error_set(error, EINVAL,
2831                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2832                                                 (void *)items->type,
2833                                                 "neither eth nor vlan"
2834                                                 " header found");
2835                         if (vlan && !vlan->eth_proto)
2836                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2837                         else if (eth && !eth->ether_type)
2838                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2839                         if (!ipv6->vtc_flow)
2840                                 ipv6->vtc_flow =
2841                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
2842                         if (!ipv6->hop_limits)
2843                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
2844                         break;
2845                 case RTE_FLOW_ITEM_TYPE_UDP:
2846                         udp = (struct rte_udp_hdr *)&buf[temp_size];
2847                         if (!ipv4 && !ipv6)
2848                                 return rte_flow_error_set(error, EINVAL,
2849                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2850                                                 (void *)items->type,
2851                                                 "ip header not found");
2852                         if (ipv4 && !ipv4->next_proto_id)
2853                                 ipv4->next_proto_id = IPPROTO_UDP;
2854                         else if (ipv6 && !ipv6->proto)
2855                                 ipv6->proto = IPPROTO_UDP;
2856                         break;
2857                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2858                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
2859                         if (!udp)
2860                                 return rte_flow_error_set(error, EINVAL,
2861                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2862                                                 (void *)items->type,
2863                                                 "udp header not found");
2864                         if (!udp->dst_port)
2865                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
2866                         if (!vxlan->vx_flags)
2867                                 vxlan->vx_flags =
2868                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
2869                         break;
2870                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2871                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
2872                         if (!udp)
2873                                 return rte_flow_error_set(error, EINVAL,
2874                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2875                                                 (void *)items->type,
2876                                                 "udp header not found");
2877                         if (!vxlan_gpe->proto)
2878                                 return rte_flow_error_set(error, EINVAL,
2879                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2880                                                 (void *)items->type,
2881                                                 "next protocol not found");
2882                         if (!udp->dst_port)
2883                                 udp->dst_port =
2884                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
2885                         if (!vxlan_gpe->vx_flags)
2886                                 vxlan_gpe->vx_flags =
2887                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
2888                         break;
2889                 case RTE_FLOW_ITEM_TYPE_GRE:
2890                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2891                         gre = (struct rte_gre_hdr *)&buf[temp_size];
2892                         if (!gre->proto)
2893                                 return rte_flow_error_set(error, EINVAL,
2894                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2895                                                 (void *)items->type,
2896                                                 "next protocol not found");
2897                         if (!ipv4 && !ipv6)
2898                                 return rte_flow_error_set(error, EINVAL,
2899                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2900                                                 (void *)items->type,
2901                                                 "ip header not found");
2902                         if (ipv4 && !ipv4->next_proto_id)
2903                                 ipv4->next_proto_id = IPPROTO_GRE;
2904                         else if (ipv6 && !ipv6->proto)
2905                                 ipv6->proto = IPPROTO_GRE;
2906                         break;
2907                 case RTE_FLOW_ITEM_TYPE_VOID:
2908                         break;
2909                 default:
2910                         return rte_flow_error_set(error, EINVAL,
2911                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2912                                                   (void *)items->type,
2913                                                   "unsupported item type");
2914                         break;
2915                 }
2916                 temp_size += len;
2917         }
2918         *size = temp_size;
2919         return 0;
2920 }
2921
2922 static int
2923 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
2924 {
2925         struct rte_ether_hdr *eth = NULL;
2926         struct rte_vlan_hdr *vlan = NULL;
2927         struct rte_ipv6_hdr *ipv6 = NULL;
2928         struct rte_udp_hdr *udp = NULL;
2929         char *next_hdr;
2930         uint16_t proto;
2931
2932         eth = (struct rte_ether_hdr *)data;
2933         next_hdr = (char *)(eth + 1);
2934         proto = RTE_BE16(eth->ether_type);
2935
2936         /* VLAN skipping */
2937         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
2938                 vlan = (struct rte_vlan_hdr *)next_hdr;
2939                 proto = RTE_BE16(vlan->eth_proto);
2940                 next_hdr += sizeof(struct rte_vlan_hdr);
2941         }
2942
2943         /* HW calculates IPv4 csum. no need to proceed */
2944         if (proto == RTE_ETHER_TYPE_IPV4)
2945                 return 0;
2946
2947         /* non IPv4/IPv6 header. not supported */
2948         if (proto != RTE_ETHER_TYPE_IPV6) {
2949                 return rte_flow_error_set(error, ENOTSUP,
2950                                           RTE_FLOW_ERROR_TYPE_ACTION,
2951                                           NULL, "Cannot offload non IPv4/IPv6");
2952         }
2953
2954         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
2955
2956         /* ignore non UDP */
2957         if (ipv6->proto != IPPROTO_UDP)
2958                 return 0;
2959
2960         udp = (struct rte_udp_hdr *)(ipv6 + 1);
2961         udp->dgram_cksum = 0;
2962
2963         return 0;
2964 }
2965
2966 /**
2967  * Convert L2 encap action to DV specification.
2968  *
2969  * @param[in] dev
2970  *   Pointer to rte_eth_dev structure.
2971  * @param[in] action
2972  *   Pointer to action structure.
2973  * @param[in, out] dev_flow
2974  *   Pointer to the mlx5_flow.
2975  * @param[in] transfer
2976  *   Mark if the flow is E-Switch flow.
2977  * @param[out] error
2978  *   Pointer to the error structure.
2979  *
2980  * @return
2981  *   0 on success, a negative errno value otherwise and rte_errno is set.
2982  */
2983 static int
2984 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2985                                const struct rte_flow_action *action,
2986                                struct mlx5_flow *dev_flow,
2987                                uint8_t transfer,
2988                                struct rte_flow_error *error)
2989 {
2990         const struct rte_flow_item *encap_data;
2991         const struct rte_flow_action_raw_encap *raw_encap_data;
2992         struct mlx5_flow_dv_encap_decap_resource res = {
2993                 .reformat_type =
2994                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2995                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2996                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2997         };
2998
2999         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3000                 raw_encap_data =
3001                         (const struct rte_flow_action_raw_encap *)action->conf;
3002                 res.size = raw_encap_data->size;
3003                 memcpy(res.buf, raw_encap_data->data, res.size);
3004         } else {
3005                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
3006                         encap_data =
3007                                 ((const struct rte_flow_action_vxlan_encap *)
3008                                                 action->conf)->definition;
3009                 else
3010                         encap_data =
3011                                 ((const struct rte_flow_action_nvgre_encap *)
3012                                                 action->conf)->definition;
3013                 if (flow_dv_convert_encap_data(encap_data, res.buf,
3014                                                &res.size, error))
3015                         return -rte_errno;
3016         }
3017         if (flow_dv_zero_encap_udp_csum(res.buf, error))
3018                 return -rte_errno;
3019         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3020                 return rte_flow_error_set(error, EINVAL,
3021                                           RTE_FLOW_ERROR_TYPE_ACTION,
3022                                           NULL, "can't create L2 encap action");
3023         return 0;
3024 }
3025
3026 /**
3027  * Convert L2 decap action to DV specification.
3028  *
3029  * @param[in] dev
3030  *   Pointer to rte_eth_dev structure.
3031  * @param[in, out] dev_flow
3032  *   Pointer to the mlx5_flow.
3033  * @param[in] transfer
3034  *   Mark if the flow is E-Switch flow.
3035  * @param[out] error
3036  *   Pointer to the error structure.
3037  *
3038  * @return
3039  *   0 on success, a negative errno value otherwise and rte_errno is set.
3040  */
3041 static int
3042 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
3043                                struct mlx5_flow *dev_flow,
3044                                uint8_t transfer,
3045                                struct rte_flow_error *error)
3046 {
3047         struct mlx5_flow_dv_encap_decap_resource res = {
3048                 .size = 0,
3049                 .reformat_type =
3050                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
3051                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
3052                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
3053         };
3054
3055         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3056                 return rte_flow_error_set(error, EINVAL,
3057                                           RTE_FLOW_ERROR_TYPE_ACTION,
3058                                           NULL, "can't create L2 decap action");
3059         return 0;
3060 }
3061
3062 /**
3063  * Convert raw decap/encap (L3 tunnel) action to DV specification.
3064  *
3065  * @param[in] dev
3066  *   Pointer to rte_eth_dev structure.
3067  * @param[in] action
3068  *   Pointer to action structure.
3069  * @param[in, out] dev_flow
3070  *   Pointer to the mlx5_flow.
3071  * @param[in] attr
3072  *   Pointer to the flow attributes.
3073  * @param[out] error
3074  *   Pointer to the error structure.
3075  *
3076  * @return
3077  *   0 on success, a negative errno value otherwise and rte_errno is set.
3078  */
3079 static int
3080 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
3081                                 const struct rte_flow_action *action,
3082                                 struct mlx5_flow *dev_flow,
3083                                 const struct rte_flow_attr *attr,
3084                                 struct rte_flow_error *error)
3085 {
3086         const struct rte_flow_action_raw_encap *encap_data;
3087         struct mlx5_flow_dv_encap_decap_resource res;
3088
3089         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
3090         res.size = encap_data->size;
3091         memcpy(res.buf, encap_data->data, res.size);
3092         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
3093                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
3094                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
3095         if (attr->transfer)
3096                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3097         else
3098                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3099                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3100         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
3101                 return rte_flow_error_set(error, EINVAL,
3102                                           RTE_FLOW_ERROR_TYPE_ACTION,
3103                                           NULL, "can't create encap action");
3104         return 0;
3105 }
3106
3107 /**
3108  * Create action push VLAN.
3109  *
3110  * @param[in] dev
3111  *   Pointer to rte_eth_dev structure.
3112  * @param[in] attr
3113  *   Pointer to the flow attributes.
3114  * @param[in] vlan
3115  *   Pointer to the vlan to push to the Ethernet header.
3116  * @param[in, out] dev_flow
3117  *   Pointer to the mlx5_flow.
3118  * @param[out] error
3119  *   Pointer to the error structure.
3120  *
3121  * @return
3122  *   0 on success, a negative errno value otherwise and rte_errno is set.
3123  */
3124 static int
3125 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3126                                 const struct rte_flow_attr *attr,
3127                                 const struct rte_vlan_hdr *vlan,
3128                                 struct mlx5_flow *dev_flow,
3129                                 struct rte_flow_error *error)
3130 {
3131         struct mlx5_flow_dv_push_vlan_action_resource res;
3132
3133         res.vlan_tag =
3134                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3135                                  vlan->vlan_tci);
3136         if (attr->transfer)
3137                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3138         else
3139                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3140                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3141         return flow_dv_push_vlan_action_resource_register
3142                                             (dev, &res, dev_flow, error);
3143 }
3144
3145 /**
3146  * Validate the modify-header actions.
3147  *
3148  * @param[in] action_flags
3149  *   Holds the actions detected until now.
3150  * @param[in] action
3151  *   Pointer to the modify action.
3152  * @param[out] error
3153  *   Pointer to error structure.
3154  *
3155  * @return
3156  *   0 on success, a negative errno value otherwise and rte_errno is set.
3157  */
3158 static int
3159 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3160                                    const struct rte_flow_action *action,
3161                                    struct rte_flow_error *error)
3162 {
3163         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3164                 return rte_flow_error_set(error, EINVAL,
3165                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3166                                           NULL, "action configuration not set");
3167         if (action_flags & MLX5_FLOW_ACTION_ENCAP)
3168                 return rte_flow_error_set(error, EINVAL,
3169                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3170                                           "can't have encap action before"
3171                                           " modify action");
3172         return 0;
3173 }
3174
3175 /**
3176  * Validate the modify-header MAC address actions.
3177  *
3178  * @param[in] action_flags
3179  *   Holds the actions detected until now.
3180  * @param[in] action
3181  *   Pointer to the modify action.
3182  * @param[in] item_flags
3183  *   Holds the items detected.
3184  * @param[out] error
3185  *   Pointer to error structure.
3186  *
3187  * @return
3188  *   0 on success, a negative errno value otherwise and rte_errno is set.
3189  */
3190 static int
3191 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3192                                    const struct rte_flow_action *action,
3193                                    const uint64_t item_flags,
3194                                    struct rte_flow_error *error)
3195 {
3196         int ret = 0;
3197
3198         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3199         if (!ret) {
3200                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3201                         return rte_flow_error_set(error, EINVAL,
3202                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3203                                                   NULL,
3204                                                   "no L2 item in pattern");
3205         }
3206         return ret;
3207 }
3208
3209 /**
3210  * Validate the modify-header IPv4 address actions.
3211  *
3212  * @param[in] action_flags
3213  *   Holds the actions detected until now.
3214  * @param[in] action
3215  *   Pointer to the modify action.
3216  * @param[in] item_flags
3217  *   Holds the items detected.
3218  * @param[out] error
3219  *   Pointer to error structure.
3220  *
3221  * @return
3222  *   0 on success, a negative errno value otherwise and rte_errno is set.
3223  */
3224 static int
3225 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3226                                     const struct rte_flow_action *action,
3227                                     const uint64_t item_flags,
3228                                     struct rte_flow_error *error)
3229 {
3230         int ret = 0;
3231         uint64_t layer;
3232
3233         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3234         if (!ret) {
3235                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3236                                  MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3237                                  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3238                 if (!(item_flags & layer))
3239                         return rte_flow_error_set(error, EINVAL,
3240                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3241                                                   NULL,
3242                                                   "no ipv4 item in pattern");
3243         }
3244         return ret;
3245 }
3246
3247 /**
3248  * Validate the modify-header IPv6 address actions.
3249  *
3250  * @param[in] action_flags
3251  *   Holds the actions detected until now.
3252  * @param[in] action
3253  *   Pointer to the modify action.
3254  * @param[in] item_flags
3255  *   Holds the items detected.
3256  * @param[out] error
3257  *   Pointer to error structure.
3258  *
3259  * @return
3260  *   0 on success, a negative errno value otherwise and rte_errno is set.
3261  */
3262 static int
3263 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3264                                     const struct rte_flow_action *action,
3265                                     const uint64_t item_flags,
3266                                     struct rte_flow_error *error)
3267 {
3268         int ret = 0;
3269         uint64_t layer;
3270
3271         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3272         if (!ret) {
3273                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3274                                  MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3275                                  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3276                 if (!(item_flags & layer))
3277                         return rte_flow_error_set(error, EINVAL,
3278                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3279                                                   NULL,
3280                                                   "no ipv6 item in pattern");
3281         }
3282         return ret;
3283 }
3284
3285 /**
3286  * Validate the modify-header TP actions.
3287  *
3288  * @param[in] action_flags
3289  *   Holds the actions detected until now.
3290  * @param[in] action
3291  *   Pointer to the modify action.
3292  * @param[in] item_flags
3293  *   Holds the items detected.
3294  * @param[out] error
3295  *   Pointer to error structure.
3296  *
3297  * @return
3298  *   0 on success, a negative errno value otherwise and rte_errno is set.
3299  */
3300 static int
3301 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3302                                   const struct rte_flow_action *action,
3303                                   const uint64_t item_flags,
3304                                   struct rte_flow_error *error)
3305 {
3306         int ret = 0;
3307         uint64_t layer;
3308
3309         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3310         if (!ret) {
3311                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3312                                  MLX5_FLOW_LAYER_INNER_L4 :
3313                                  MLX5_FLOW_LAYER_OUTER_L4;
3314                 if (!(item_flags & layer))
3315                         return rte_flow_error_set(error, EINVAL,
3316                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3317                                                   NULL, "no transport layer "
3318                                                   "in pattern");
3319         }
3320         return ret;
3321 }
3322
3323 /**
3324  * Validate the modify-header actions of increment/decrement
3325  * TCP Sequence-number.
3326  *
3327  * @param[in] action_flags
3328  *   Holds the actions detected until now.
3329  * @param[in] action
3330  *   Pointer to the modify action.
3331  * @param[in] item_flags
3332  *   Holds the items detected.
3333  * @param[out] error
3334  *   Pointer to error structure.
3335  *
3336  * @return
3337  *   0 on success, a negative errno value otherwise and rte_errno is set.
3338  */
3339 static int
3340 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3341                                        const struct rte_flow_action *action,
3342                                        const uint64_t item_flags,
3343                                        struct rte_flow_error *error)
3344 {
3345         int ret = 0;
3346         uint64_t layer;
3347
3348         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3349         if (!ret) {
3350                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3351                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3352                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3353                 if (!(item_flags & layer))
3354                         return rte_flow_error_set(error, EINVAL,
3355                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3356                                                   NULL, "no TCP item in"
3357                                                   " pattern");
3358                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3359                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3360                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3361                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3362                         return rte_flow_error_set(error, EINVAL,
3363                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3364                                                   NULL,
3365                                                   "cannot decrease and increase"
3366                                                   " TCP sequence number"
3367                                                   " at the same time");
3368         }
3369         return ret;
3370 }
3371
3372 /**
3373  * Validate the modify-header actions of increment/decrement
3374  * TCP Acknowledgment number.
3375  *
3376  * @param[in] action_flags
3377  *   Holds the actions detected until now.
3378  * @param[in] action
3379  *   Pointer to the modify action.
3380  * @param[in] item_flags
3381  *   Holds the items detected.
3382  * @param[out] error
3383  *   Pointer to error structure.
3384  *
3385  * @return
3386  *   0 on success, a negative errno value otherwise and rte_errno is set.
3387  */
3388 static int
3389 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3390                                        const struct rte_flow_action *action,
3391                                        const uint64_t item_flags,
3392                                        struct rte_flow_error *error)
3393 {
3394         int ret = 0;
3395         uint64_t layer;
3396
3397         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3398         if (!ret) {
3399                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3400                                  MLX5_FLOW_LAYER_INNER_L4_TCP :
3401                                  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3402                 if (!(item_flags & layer))
3403                         return rte_flow_error_set(error, EINVAL,
3404                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3405                                                   NULL, "no TCP item in"
3406                                                   " pattern");
3407                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3408                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3409                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3410                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3411                         return rte_flow_error_set(error, EINVAL,
3412                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3413                                                   NULL,
3414                                                   "cannot decrease and increase"
3415                                                   " TCP acknowledgment number"
3416                                                   " at the same time");
3417         }
3418         return ret;
3419 }
3420
3421 /**
3422  * Validate the modify-header TTL actions.
3423  *
3424  * @param[in] action_flags
3425  *   Holds the actions detected until now.
3426  * @param[in] action
3427  *   Pointer to the modify action.
3428  * @param[in] item_flags
3429  *   Holds the items detected.
3430  * @param[out] error
3431  *   Pointer to error structure.
3432  *
3433  * @return
3434  *   0 on success, a negative errno value otherwise and rte_errno is set.
3435  */
3436 static int
3437 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3438                                    const struct rte_flow_action *action,
3439                                    const uint64_t item_flags,
3440                                    struct rte_flow_error *error)
3441 {
3442         int ret = 0;
3443         uint64_t layer;
3444
3445         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3446         if (!ret) {
3447                 layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
3448                                  MLX5_FLOW_LAYER_INNER_L3 :
3449                                  MLX5_FLOW_LAYER_OUTER_L3;
3450                 if (!(item_flags & layer))
3451                         return rte_flow_error_set(error, EINVAL,
3452                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3453                                                   NULL,
3454                                                   "no IP protocol in pattern");
3455         }
3456         return ret;
3457 }
3458
3459 /**
3460  * Validate jump action.
3461  *
3462  * @param[in] action
3463  *   Pointer to the jump action.
3464  * @param[in] action_flags
3465  *   Holds the actions detected until now.
3466  * @param[in] attributes
3467  *   Pointer to flow attributes
3468  * @param[in] external
3469  *   Action belongs to flow rule created by request external to PMD.
3470  * @param[out] error
3471  *   Pointer to error structure.
3472  *
3473  * @return
3474  *   0 on success, a negative errno value otherwise and rte_errno is set.
3475  */
3476 static int
3477 flow_dv_validate_action_jump(const struct rte_flow_action *action,
3478                              uint64_t action_flags,
3479                              const struct rte_flow_attr *attributes,
3480                              bool external, struct rte_flow_error *error)
3481 {
3482         uint32_t target_group, table;
3483         int ret = 0;
3484
3485         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3486                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3487                 return rte_flow_error_set(error, EINVAL,
3488                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3489                                           "can't have 2 fate actions in"
3490                                           " same flow");
3491         if (action_flags & MLX5_FLOW_ACTION_METER)
3492                 return rte_flow_error_set(error, ENOTSUP,
3493                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3494                                           "jump with meter not support");
3495         if (!action->conf)
3496                 return rte_flow_error_set(error, EINVAL,
3497                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3498                                           NULL, "action configuration not set");
3499         target_group =
3500                 ((const struct rte_flow_action_jump *)action->conf)->group;
3501         ret = mlx5_flow_group_to_table(attributes, external, target_group,
3502                                        true, &table, error);
3503         if (ret)
3504                 return ret;
3505         if (attributes->group == target_group)
3506                 return rte_flow_error_set(error, EINVAL,
3507                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3508                                           "target group must be other than"
3509                                           " the current flow group");
3510         return 0;
3511 }
3512
3513 /*
3514  * Validate the port_id action.
3515  *
3516  * @param[in] dev
3517  *   Pointer to rte_eth_dev structure.
3518  * @param[in] action_flags
3519  *   Bit-fields that holds the actions detected until now.
3520  * @param[in] action
3521  *   Port_id RTE action structure.
3522  * @param[in] attr
3523  *   Attributes of flow that includes this action.
3524  * @param[out] error
3525  *   Pointer to error structure.
3526  *
3527  * @return
3528  *   0 on success, a negative errno value otherwise and rte_errno is set.
3529  */
3530 static int
3531 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3532                                 uint64_t action_flags,
3533                                 const struct rte_flow_action *action,
3534                                 const struct rte_flow_attr *attr,
3535                                 struct rte_flow_error *error)
3536 {
3537         const struct rte_flow_action_port_id *port_id;
3538         struct mlx5_priv *act_priv;
3539         struct mlx5_priv *dev_priv;
3540         uint16_t port;
3541
3542         if (!attr->transfer)
3543                 return rte_flow_error_set(error, ENOTSUP,
3544                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3545                                           NULL,
3546                                           "port id action is valid in transfer"
3547                                           " mode only");
3548         if (!action || !action->conf)
3549                 return rte_flow_error_set(error, ENOTSUP,
3550                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3551                                           NULL,
3552                                           "port id action parameters must be"
3553                                           " specified");
3554         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3555                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3556                 return rte_flow_error_set(error, EINVAL,
3557                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3558                                           "can have only one fate actions in"
3559                                           " a flow");
3560         dev_priv = mlx5_dev_to_eswitch_info(dev);
3561         if (!dev_priv)
3562                 return rte_flow_error_set(error, rte_errno,
3563                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3564                                           NULL,
3565                                           "failed to obtain E-Switch info");
3566         port_id = action->conf;
3567         port = port_id->original ? dev->data->port_id : port_id->id;
3568         act_priv = mlx5_port_to_eswitch_info(port, false);
3569         if (!act_priv)
3570                 return rte_flow_error_set
3571                                 (error, rte_errno,
3572                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
3573                                  "failed to obtain E-Switch port id for port");
3574         if (act_priv->domain_id != dev_priv->domain_id)
3575                 return rte_flow_error_set
3576                                 (error, EINVAL,
3577                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3578                                  "port does not belong to"
3579                                  " E-Switch being configured");
3580         return 0;
3581 }
3582
3583 /**
3584  * Get the maximum number of modify header actions.
3585  *
3586  * @param dev
3587  *   Pointer to rte_eth_dev structure.
3588  * @param flags
3589  *   Flags bits to check if root level.
3590  *
3591  * @return
3592  *   Max number of modify header actions device can support.
3593  */
3594 static unsigned int
3595 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev, uint64_t flags)
3596 {
3597         /*
3598          * There's no way to directly query the max cap. Although it has to be
3599          * acquried by iterative trial, it is a safe assumption that more
3600          * actions are supported by FW if extensive metadata register is
3601          * supported. (Only in the root table)
3602          */
3603         if (!(flags & MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL))
3604                 return MLX5_MAX_MODIFY_NUM;
3605         else
3606                 return mlx5_flow_ext_mreg_supported(dev) ?
3607                                         MLX5_ROOT_TBL_MODIFY_NUM :
3608                                         MLX5_ROOT_TBL_MODIFY_NUM_NO_MREG;
3609 }
3610
3611 /**
3612  * Validate the meter action.
3613  *
3614  * @param[in] dev
3615  *   Pointer to rte_eth_dev structure.
3616  * @param[in] action_flags
3617  *   Bit-fields that holds the actions detected until now.
3618  * @param[in] action
3619  *   Pointer to the meter action.
3620  * @param[in] attr
3621  *   Attributes of flow that includes this action.
3622  * @param[out] error
3623  *   Pointer to error structure.
3624  *
3625  * @return
3626  *   0 on success, a negative errno value otherwise and rte_ernno is set.
3627  */
3628 static int
3629 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
3630                                 uint64_t action_flags,
3631                                 const struct rte_flow_action *action,
3632                                 const struct rte_flow_attr *attr,
3633                                 struct rte_flow_error *error)
3634 {
3635         struct mlx5_priv *priv = dev->data->dev_private;
3636         const struct rte_flow_action_meter *am = action->conf;
3637         struct mlx5_flow_meter *fm;
3638
3639         if (!am)
3640                 return rte_flow_error_set(error, EINVAL,
3641                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3642                                           "meter action conf is NULL");
3643
3644         if (action_flags & MLX5_FLOW_ACTION_METER)
3645                 return rte_flow_error_set(error, ENOTSUP,
3646                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3647                                           "meter chaining not support");
3648         if (action_flags & MLX5_FLOW_ACTION_JUMP)
3649                 return rte_flow_error_set(error, ENOTSUP,
3650                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3651                                           "meter with jump not support");
3652         if (!priv->mtr_en)
3653                 return rte_flow_error_set(error, ENOTSUP,
3654                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3655                                           NULL,
3656                                           "meter action not supported");
3657         fm = mlx5_flow_meter_find(priv, am->mtr_id);
3658         if (!fm)
3659                 return rte_flow_error_set(error, EINVAL,
3660                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3661                                           "Meter not found");
3662         if (fm->ref_cnt && (!(fm->attr.transfer == attr->transfer ||
3663               (!fm->attr.ingress && !attr->ingress && attr->egress) ||
3664               (!fm->attr.egress && !attr->egress && attr->ingress))))
3665                 return rte_flow_error_set(error, EINVAL,
3666                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3667                                           "Flow attributes are either invalid "
3668                                           "or have a conflict with current "
3669                                           "meter attributes");
3670         return 0;
3671 }
3672
3673 /**
3674  * Validate the modify-header IPv4 DSCP actions.
3675  *
3676  * @param[in] action_flags
3677  *   Holds the actions detected until now.
3678  * @param[in] action
3679  *   Pointer to the modify action.
3680  * @param[in] item_flags
3681  *   Holds the items detected.
3682  * @param[out] error
3683  *   Pointer to error structure.
3684  *
3685  * @return
3686  *   0 on success, a negative errno value otherwise and rte_errno is set.
3687  */
3688 static int
3689 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
3690                                          const struct rte_flow_action *action,
3691                                          const uint64_t item_flags,
3692                                          struct rte_flow_error *error)
3693 {
3694         int ret = 0;
3695
3696         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3697         if (!ret) {
3698                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3699                         return rte_flow_error_set(error, EINVAL,
3700                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3701                                                   NULL,
3702                                                   "no ipv4 item in pattern");
3703         }
3704         return ret;
3705 }
3706
3707 /**
3708  * Validate the modify-header IPv6 DSCP actions.
3709  *
3710  * @param[in] action_flags
3711  *   Holds the actions detected until now.
3712  * @param[in] action
3713  *   Pointer to the modify action.
3714  * @param[in] item_flags
3715  *   Holds the items detected.
3716  * @param[out] error
3717  *   Pointer to error structure.
3718  *
3719  * @return
3720  *   0 on success, a negative errno value otherwise and rte_errno is set.
3721  */
3722 static int
3723 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
3724                                          const struct rte_flow_action *action,
3725                                          const uint64_t item_flags,
3726                                          struct rte_flow_error *error)
3727 {
3728         int ret = 0;
3729
3730         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3731         if (!ret) {
3732                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3733                         return rte_flow_error_set(error, EINVAL,
3734                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3735                                                   NULL,
3736                                                   "no ipv6 item in pattern");
3737         }
3738         return ret;
3739 }
3740
3741 /**
3742  * Find existing modify-header resource or create and register a new one.
3743  *
3744  * @param dev[in, out]
3745  *   Pointer to rte_eth_dev structure.
3746  * @param[in, out] resource
3747  *   Pointer to modify-header resource.
3748  * @parm[in, out] dev_flow
3749  *   Pointer to the dev_flow.
3750  * @param[out] error
3751  *   pointer to error structure.
3752  *
3753  * @return
3754  *   0 on success otherwise -errno and errno is set.
3755  */
3756 static int
3757 flow_dv_modify_hdr_resource_register
3758                         (struct rte_eth_dev *dev,
3759                          struct mlx5_flow_dv_modify_hdr_resource *resource,
3760                          struct mlx5_flow *dev_flow,
3761                          struct rte_flow_error *error)
3762 {
3763         struct mlx5_priv *priv = dev->data->dev_private;
3764         struct mlx5_ibv_shared *sh = priv->sh;
3765         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
3766         struct mlx5dv_dr_domain *ns;
3767         uint32_t actions_len;
3768
3769         resource->flags = dev_flow->dv.group ? 0 :
3770                           MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
3771         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
3772                                     resource->flags))
3773                 return rte_flow_error_set(error, EOVERFLOW,
3774                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3775                                           "too many modify header items");
3776         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3777                 ns = sh->fdb_domain;
3778         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
3779                 ns = sh->tx_domain;
3780         else
3781                 ns = sh->rx_domain;
3782         /* Lookup a matching resource from cache. */
3783         actions_len = resource->actions_num * sizeof(resource->actions[0]);
3784         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
3785                 if (resource->ft_type == cache_resource->ft_type &&
3786                     resource->actions_num == cache_resource->actions_num &&
3787                     resource->flags == cache_resource->flags &&
3788                     !memcmp((const void *)resource->actions,
3789                             (const void *)cache_resource->actions,
3790                             actions_len)) {
3791                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
3792                                 (void *)cache_resource,
3793                                 rte_atomic32_read(&cache_resource->refcnt));
3794                         rte_atomic32_inc(&cache_resource->refcnt);
3795                         dev_flow->handle->dvh.modify_hdr = cache_resource;
3796                         return 0;
3797                 }
3798         }
3799         /* Register new modify-header resource. */
3800         cache_resource = rte_calloc(__func__, 1,
3801                                     sizeof(*cache_resource) + actions_len, 0);
3802         if (!cache_resource)
3803                 return rte_flow_error_set(error, ENOMEM,
3804                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3805                                           "cannot allocate resource memory");
3806         *cache_resource = *resource;
3807         rte_memcpy(cache_resource->actions, resource->actions, actions_len);
3808         cache_resource->verbs_action =
3809                 mlx5_glue->dv_create_flow_action_modify_header
3810                                         (sh->ctx, cache_resource->ft_type, ns,
3811                                          cache_resource->flags, actions_len,
3812                                          (uint64_t *)cache_resource->actions);
3813         if (!cache_resource->verbs_action) {
3814                 rte_free(cache_resource);
3815                 return rte_flow_error_set(error, ENOMEM,
3816                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3817                                           NULL, "cannot create action");
3818         }
3819         rte_atomic32_init(&cache_resource->refcnt);
3820         rte_atomic32_inc(&cache_resource->refcnt);
3821         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
3822         dev_flow->handle->dvh.modify_hdr = cache_resource;
3823         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
3824                 (void *)cache_resource,
3825                 rte_atomic32_read(&cache_resource->refcnt));
3826         return 0;
3827 }
3828
3829 /**
3830  * Get DV flow counter by index.
3831  *
3832  * @param[in] dev
3833  *   Pointer to the Ethernet device structure.
3834  * @param[in] idx
3835  *   mlx5 flow counter index in the container.
3836  * @param[out] ppool
3837  *   mlx5 flow counter pool in the container,
3838  *
3839  * @return
3840  *   Pointer to the counter, NULL otherwise.
3841  */
3842 static struct mlx5_flow_counter *
3843 flow_dv_counter_get_by_idx(struct rte_eth_dev *dev,
3844                            uint32_t idx,
3845                            struct mlx5_flow_counter_pool **ppool)
3846 {
3847         struct mlx5_priv *priv = dev->data->dev_private;
3848         struct mlx5_pools_container *cont;
3849         struct mlx5_flow_counter_pool *pool;
3850         uint32_t batch = 0;
3851
3852         idx--;
3853         if (idx >= MLX5_CNT_BATCH_OFFSET) {
3854                 idx -= MLX5_CNT_BATCH_OFFSET;
3855                 batch = 1;
3856         }
3857         cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0);
3858         MLX5_ASSERT(idx / MLX5_COUNTERS_PER_POOL < cont->n);
3859         pool = cont->pools[idx / MLX5_COUNTERS_PER_POOL];
3860         MLX5_ASSERT(pool);
3861         if (ppool)
3862                 *ppool = pool;
3863         return &pool->counters_raw[idx % MLX5_COUNTERS_PER_POOL];
3864 }
3865
3866 /**
3867  * Get a pool by devx counter ID.
3868  *
3869  * @param[in] cont
3870  *   Pointer to the counter container.
3871  * @param[in] id
3872  *   The counter devx ID.
3873  *
3874  * @return
3875  *   The counter pool pointer if exists, NULL otherwise,
3876  */
3877 static struct mlx5_flow_counter_pool *
3878 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
3879 {
3880         uint32_t i;
3881         uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
3882
3883         for (i = 0; i < n_valid; i++) {
3884                 struct mlx5_flow_counter_pool *pool = cont->pools[i];
3885                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
3886                            MLX5_COUNTERS_PER_POOL;
3887
3888                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL) {
3889                         /*
3890                          * Move the pool to the head, as counter allocate
3891                          * always gets the first pool in the container.
3892                          */
3893                         if (pool != TAILQ_FIRST(&cont->pool_list)) {
3894                                 TAILQ_REMOVE(&cont->pool_list, pool, next);
3895                                 TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
3896                         }
3897                         return pool;
3898                 }
3899         }
3900         return NULL;
3901 }
3902
3903 /**
3904  * Allocate a new memory for the counter values wrapped by all the needed
3905  * management.
3906  *
3907  * @param[in] dev
3908  *   Pointer to the Ethernet device structure.
3909  * @param[in] raws_n
3910  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
3911  *
3912  * @return
3913  *   The new memory management pointer on success, otherwise NULL and rte_errno
3914  *   is set.
3915  */
3916 static struct mlx5_counter_stats_mem_mng *
3917 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
3918 {
3919         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
3920                                         (dev->data->dev_private))->sh;
3921         struct mlx5_devx_mkey_attr mkey_attr;
3922         struct mlx5_counter_stats_mem_mng *mem_mng;
3923         volatile struct flow_counter_stats *raw_data;
3924         int size = (sizeof(struct flow_counter_stats) *
3925                         MLX5_COUNTERS_PER_POOL +
3926                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
3927                         sizeof(struct mlx5_counter_stats_mem_mng);
3928         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
3929         int i;
3930
3931         if (!mem) {
3932                 rte_errno = ENOMEM;
3933                 return NULL;
3934         }
3935         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
3936         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
3937         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
3938                                                  IBV_ACCESS_LOCAL_WRITE);
3939         if (!mem_mng->umem) {
3940                 rte_errno = errno;
3941                 rte_free(mem);
3942                 return NULL;
3943         }
3944         mkey_attr.addr = (uintptr_t)mem;
3945         mkey_attr.size = size;
3946         mkey_attr.umem_id = mem_mng->umem->umem_id;
3947         mkey_attr.pd = sh->pdn;
3948         mkey_attr.log_entity_size = 0;
3949         mkey_attr.pg_access = 0;
3950         mkey_attr.klm_array = NULL;
3951         mkey_attr.klm_num = 0;
3952         mkey_attr.relaxed_ordering = 1;
3953         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
3954         if (!mem_mng->dm) {
3955                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
3956                 rte_errno = errno;
3957                 rte_free(mem);
3958                 return NULL;
3959         }
3960         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
3961         raw_data = (volatile struct flow_counter_stats *)mem;
3962         for (i = 0; i < raws_n; ++i) {
3963                 mem_mng->raws[i].mem_mng = mem_mng;
3964                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
3965         }
3966         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
3967         return mem_mng;
3968 }
3969
3970 /**
3971  * Resize a counter container.
3972  *
3973  * @param[in] dev
3974  *   Pointer to the Ethernet device structure.
3975  * @param[in] batch
3976  *   Whether the pool is for counter that was allocated by batch command.
3977  *
3978  * @return
3979  *   The new container pointer on success, otherwise NULL and rte_errno is set.
3980  */
3981 static struct mlx5_pools_container *
3982 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
3983 {
3984         struct mlx5_priv *priv = dev->data->dev_private;
3985         struct mlx5_pools_container *cont =
3986                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
3987         struct mlx5_pools_container *new_cont =
3988                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
3989         struct mlx5_counter_stats_mem_mng *mem_mng = NULL;
3990         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
3991         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
3992         int i;
3993
3994         /* Fallback mode has no background thread. Skip the check. */
3995         if (!priv->counter_fallback &&
3996             cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
3997                 /* The last resize still hasn't detected by the host thread. */
3998                 rte_errno = EAGAIN;
3999                 return NULL;
4000         }
4001         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
4002         if (!new_cont->pools) {
4003                 rte_errno = ENOMEM;
4004                 return NULL;
4005         }
4006         if (cont->n)
4007                 memcpy(new_cont->pools, cont->pools, cont->n *
4008                        sizeof(struct mlx5_flow_counter_pool *));
4009         /*
4010          * Fallback mode query the counter directly, no background query
4011          * resources are needed.
4012          */
4013         if (!priv->counter_fallback) {
4014                 mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
4015                         MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
4016                 if (!mem_mng) {
4017                         rte_free(new_cont->pools);
4018                         return NULL;
4019                 }
4020                 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
4021                         LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
4022                                          mem_mng->raws +
4023                                          MLX5_CNT_CONTAINER_RESIZE +
4024                                          i, next);
4025         } else {
4026                 /*
4027                  * Release the old container pools directly as no background
4028                  * thread helps that.
4029                  */
4030                 rte_free(cont->pools);
4031         }
4032         new_cont->n = resize;
4033         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
4034         TAILQ_INIT(&new_cont->pool_list);
4035         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
4036         new_cont->init_mem_mng = mem_mng;
4037         rte_cio_wmb();
4038          /* Flip the master container. */
4039         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
4040         return new_cont;
4041 }
4042
4043 /**
4044  * Query a devx flow counter.
4045  *
4046  * @param[in] dev
4047  *   Pointer to the Ethernet device structure.
4048  * @param[in] cnt
4049  *   Index to the flow counter.
4050  * @param[out] pkts
4051  *   The statistics value of packets.
4052  * @param[out] bytes
4053  *   The statistics value of bytes.
4054  *
4055  * @return
4056  *   0 on success, otherwise a negative errno value and rte_errno is set.
4057  */
4058 static inline int
4059 _flow_dv_query_count(struct rte_eth_dev *dev, uint32_t counter, uint64_t *pkts,
4060                      uint64_t *bytes)
4061 {
4062         struct mlx5_priv *priv = dev->data->dev_private;
4063         struct mlx5_flow_counter_pool *pool = NULL;
4064         struct mlx5_flow_counter *cnt;
4065         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4066         int offset;
4067
4068         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4069         MLX5_ASSERT(pool);
4070         if (counter < MLX5_CNT_BATCH_OFFSET) {
4071                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4072                 if (priv->counter_fallback)
4073                         return mlx5_devx_cmd_flow_counter_query(cnt_ext->dcs, 0,
4074                                         0, pkts, bytes, 0, NULL, NULL, 0);
4075         }
4076
4077         rte_spinlock_lock(&pool->sl);
4078         /*
4079          * The single counters allocation may allocate smaller ID than the
4080          * current allocated in parallel to the host reading.
4081          * In this case the new counter values must be reported as 0.
4082          */
4083         if (unlikely(cnt_ext && cnt_ext->dcs->id < pool->raw->min_dcs_id)) {
4084                 *pkts = 0;
4085                 *bytes = 0;
4086         } else {
4087                 offset = cnt - &pool->counters_raw[0];
4088                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
4089                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
4090         }
4091         rte_spinlock_unlock(&pool->sl);
4092         return 0;
4093 }
4094
4095 /**
4096  * Create and initialize a new counter pool.
4097  *
4098  * @param[in] dev
4099  *   Pointer to the Ethernet device structure.
4100  * @param[out] dcs
4101  *   The devX counter handle.
4102  * @param[in] batch
4103  *   Whether the pool is for counter that was allocated by batch command.
4104  * @param[in/out] cont_cur
4105  *   Pointer to the container pointer, it will be update in pool resize.
4106  *
4107  * @return
4108  *   The pool container pointer on success, NULL otherwise and rte_errno is set.
4109  */
4110 static struct mlx5_pools_container *
4111 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4112                     uint32_t batch)
4113 {
4114         struct mlx5_priv *priv = dev->data->dev_private;
4115         struct mlx5_flow_counter_pool *pool;
4116         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4117                                                                0);
4118         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
4119         uint32_t size;
4120
4121         if (cont->n == n_valid) {
4122                 cont = flow_dv_container_resize(dev, batch);
4123                 if (!cont)
4124                         return NULL;
4125         }
4126         size = sizeof(*pool);
4127         if (!batch)
4128                 size += MLX5_COUNTERS_PER_POOL *
4129                         sizeof(struct mlx5_flow_counter_ext);
4130         pool = rte_calloc(__func__, 1, size, 0);
4131         if (!pool) {
4132                 rte_errno = ENOMEM;
4133                 return NULL;
4134         }
4135         pool->min_dcs = dcs;
4136         if (!priv->counter_fallback)
4137                 pool->raw = cont->init_mem_mng->raws + n_valid %
4138                                                      MLX5_CNT_CONTAINER_RESIZE;
4139         pool->raw_hw = NULL;
4140         rte_spinlock_init(&pool->sl);
4141         /*
4142          * The generation of the new allocated counters in this pool is 0, 2 in
4143          * the pool generation makes all the counters valid for allocation.
4144          * The start and end query generation protect the counters be released
4145          * between the query and update gap period will not be reallocated
4146          * without the last query finished and stats updated to the memory.
4147          */
4148         rte_atomic64_set(&pool->start_query_gen, 0x2);
4149         /*
4150          * There's no background query thread for fallback mode, set the
4151          * end_query_gen to the maximum value since no need to wait for
4152          * statistics update.
4153          */
4154         rte_atomic64_set(&pool->end_query_gen, priv->counter_fallback ?
4155                          INT64_MAX : 0x2);
4156         TAILQ_INIT(&pool->counters);
4157         TAILQ_INSERT_HEAD(&cont->pool_list, pool, next);
4158         pool->index = n_valid;
4159         cont->pools[n_valid] = pool;
4160         /* Pool initialization must be updated before host thread access. */
4161         rte_cio_wmb();
4162         rte_atomic16_add(&cont->n_valid, 1);
4163         return cont;
4164 }
4165
4166 /**
4167  * Prepare a new counter and/or a new counter pool.
4168  *
4169  * @param[in] dev
4170  *   Pointer to the Ethernet device structure.
4171  * @param[out] cnt_free
4172  *   Where to put the pointer of a new counter.
4173  * @param[in] batch
4174  *   Whether the pool is for counter that was allocated by batch command.
4175  *
4176  * @return
4177  *   The counter container pointer and @p cnt_free is set on success,
4178  *   NULL otherwise and rte_errno is set.
4179  */
4180 static struct mlx5_pools_container *
4181 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4182                              struct mlx5_flow_counter **cnt_free,
4183                              uint32_t batch)
4184 {
4185         struct mlx5_priv *priv = dev->data->dev_private;
4186         struct mlx5_pools_container *cont;
4187         struct mlx5_flow_counter_pool *pool;
4188         struct mlx5_devx_obj *dcs = NULL;
4189         struct mlx5_flow_counter *cnt;
4190         uint32_t i;
4191
4192         cont = MLX5_CNT_CONTAINER(priv->sh, batch, 0);
4193         if (!batch) {
4194                 /* bulk_bitmap must be 0 for single counter allocation. */
4195                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4196                 if (!dcs)
4197                         return NULL;
4198                 pool = flow_dv_find_pool_by_id(cont, dcs->id);
4199                 if (!pool) {
4200                         cont = flow_dv_pool_create(dev, dcs, batch);
4201                         if (!cont) {
4202                                 mlx5_devx_cmd_destroy(dcs);
4203                                 return NULL;
4204                         }
4205                         pool = TAILQ_FIRST(&cont->pool_list);
4206                 } else if (dcs->id < pool->min_dcs->id) {
4207                         rte_atomic64_set(&pool->a64_dcs,
4208                                          (int64_t)(uintptr_t)dcs);
4209                 }
4210                 i = dcs->id % MLX5_COUNTERS_PER_POOL;
4211                 cnt = &pool->counters_raw[i];
4212                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
4213                 MLX5_GET_POOL_CNT_EXT(pool, i)->dcs = dcs;
4214                 *cnt_free = cnt;
4215                 return cont;
4216         }
4217         /* bulk_bitmap is in 128 counters units. */
4218         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
4219                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4220         if (!dcs) {
4221                 rte_errno = ENODATA;
4222                 return NULL;
4223         }
4224         cont = flow_dv_pool_create(dev, dcs, batch);
4225         if (!cont) {
4226                 mlx5_devx_cmd_destroy(dcs);
4227                 return NULL;
4228         }
4229         pool = TAILQ_FIRST(&cont->pool_list);
4230         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
4231                 cnt = &pool->counters_raw[i];
4232                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
4233         }
4234         *cnt_free = &pool->counters_raw[0];
4235         return cont;
4236 }
4237
4238 /**
4239  * Search for existed shared counter.
4240  *
4241  * @param[in] cont
4242  *   Pointer to the relevant counter pool container.
4243  * @param[in] id
4244  *   The shared counter ID to search.
4245  * @param[out] ppool
4246  *   mlx5 flow counter pool in the container,
4247  *
4248  * @return
4249  *   NULL if not existed, otherwise pointer to the shared extend counter.
4250  */
4251 static struct mlx5_flow_counter_ext *
4252 flow_dv_counter_shared_search(struct mlx5_pools_container *cont, uint32_t id,
4253                               struct mlx5_flow_counter_pool **ppool)
4254 {
4255         static struct mlx5_flow_counter_ext *cnt;
4256         struct mlx5_flow_counter_pool *pool;
4257         uint32_t i;
4258         uint32_t n_valid = rte_atomic16_read(&cont->n_valid);
4259
4260         for (i = 0; i < n_valid; i++) {
4261                 pool = cont->pools[i];
4262                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
4263                         cnt = MLX5_GET_POOL_CNT_EXT(pool, i);
4264                         if (cnt->ref_cnt && cnt->shared && cnt->id == id) {
4265                                 if (ppool)
4266                                         *ppool = cont->pools[i];
4267                                 return cnt;
4268                         }
4269                 }
4270         }
4271         return NULL;
4272 }
4273
4274 /**
4275  * Allocate a flow counter.
4276  *
4277  * @param[in] dev
4278  *   Pointer to the Ethernet device structure.
4279  * @param[in] shared
4280  *   Indicate if this counter is shared with other flows.
4281  * @param[in] id
4282  *   Counter identifier.
4283  * @param[in] group
4284  *   Counter flow group.
4285  *
4286  * @return
4287  *   Index to flow counter on success, 0 otherwise and rte_errno is set.
4288  */
4289 static uint32_t
4290 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
4291                       uint16_t group)
4292 {
4293         struct mlx5_priv *priv = dev->data->dev_private;
4294         struct mlx5_flow_counter_pool *pool = NULL;
4295         struct mlx5_flow_counter *cnt_free = NULL;
4296         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4297         /*
4298          * Currently group 0 flow counter cannot be assigned to a flow if it is
4299          * not the first one in the batch counter allocation, so it is better
4300          * to allocate counters one by one for these flows in a separate
4301          * container.
4302          * A counter can be shared between different groups so need to take
4303          * shared counters from the single container.
4304          */
4305         uint32_t batch = (group && !shared && !priv->counter_fallback) ? 1 : 0;
4306         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4307                                                                0);
4308         uint32_t cnt_idx;
4309
4310         if (!priv->config.devx) {
4311                 rte_errno = ENOTSUP;
4312                 return 0;
4313         }
4314         if (shared) {
4315                 cnt_ext = flow_dv_counter_shared_search(cont, id, &pool);
4316                 if (cnt_ext) {
4317                         if (cnt_ext->ref_cnt + 1 == 0) {
4318                                 rte_errno = E2BIG;
4319                                 return 0;
4320                         }
4321                         cnt_ext->ref_cnt++;
4322                         cnt_idx = pool->index * MLX5_COUNTERS_PER_POOL +
4323                                   (cnt_ext->dcs->id % MLX5_COUNTERS_PER_POOL)
4324                                   + 1;
4325                         return cnt_idx;
4326                 }
4327         }
4328         /* Pools which has a free counters are in the start. */
4329         TAILQ_FOREACH(pool, &cont->pool_list, next) {
4330                 /*
4331                  * The free counter reset values must be updated between the
4332                  * counter release to the counter allocation, so, at least one
4333                  * query must be done in this time. ensure it by saving the
4334                  * query generation in the release time.
4335                  * The free list is sorted according to the generation - so if
4336                  * the first one is not updated, all the others are not
4337                  * updated too.
4338                  */
4339                 cnt_free = TAILQ_FIRST(&pool->counters);
4340                 if (cnt_free && cnt_free->query_gen <
4341                     rte_atomic64_read(&pool->end_query_gen))
4342                         break;
4343                 cnt_free = NULL;
4344         }
4345         if (!cnt_free) {
4346                 cont = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
4347                 if (!cont)
4348                         return 0;
4349                 pool = TAILQ_FIRST(&cont->pool_list);
4350         }
4351         if (!batch)
4352                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt_free);
4353         /* Create a DV counter action only in the first time usage. */
4354         if (!cnt_free->action) {
4355                 uint16_t offset;
4356                 struct mlx5_devx_obj *dcs;
4357
4358                 if (batch) {
4359                         offset = cnt_free - &pool->counters_raw[0];
4360                         dcs = pool->min_dcs;
4361                 } else {
4362                         offset = 0;
4363                         dcs = cnt_ext->dcs;
4364                 }
4365                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
4366                                         (dcs->obj, offset);
4367                 if (!cnt_free->action) {
4368                         rte_errno = errno;
4369                         return 0;
4370                 }
4371         }
4372         cnt_idx = MLX5_MAKE_CNT_IDX(pool->index,
4373                                     (cnt_free - pool->counters_raw));
4374         cnt_idx += batch * MLX5_CNT_BATCH_OFFSET;
4375         /* Update the counter reset values. */
4376         if (_flow_dv_query_count(dev, cnt_idx, &cnt_free->hits,
4377                                  &cnt_free->bytes))
4378                 return 0;
4379         if (cnt_ext) {
4380                 cnt_ext->shared = shared;
4381                 cnt_ext->ref_cnt = 1;
4382                 cnt_ext->id = id;
4383         }
4384         if (!priv->counter_fallback && !priv->sh->cmng.query_thread_on)
4385                 /* Start the asynchronous batch query by the host thread. */
4386                 mlx5_set_query_alarm(priv->sh);
4387         TAILQ_REMOVE(&pool->counters, cnt_free, next);
4388         if (TAILQ_EMPTY(&pool->counters)) {
4389                 /* Move the pool to the end of the container pool list. */
4390                 TAILQ_REMOVE(&cont->pool_list, pool, next);
4391                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
4392         }
4393         return cnt_idx;
4394 }
4395
4396 /**
4397  * Release a flow counter.
4398  *
4399  * @param[in] dev
4400  *   Pointer to the Ethernet device structure.
4401  * @param[in] counter
4402  *   Index to the counter handler.
4403  */
4404 static void
4405 flow_dv_counter_release(struct rte_eth_dev *dev, uint32_t counter)
4406 {
4407         struct mlx5_flow_counter_pool *pool = NULL;
4408         struct mlx5_flow_counter *cnt;
4409         struct mlx5_flow_counter_ext *cnt_ext = NULL;
4410
4411         if (!counter)
4412                 return;
4413         cnt = flow_dv_counter_get_by_idx(dev, counter, &pool);
4414         MLX5_ASSERT(pool);
4415         if (counter < MLX5_CNT_BATCH_OFFSET) {
4416                 cnt_ext = MLX5_CNT_TO_CNT_EXT(pool, cnt);
4417                 if (cnt_ext && --cnt_ext->ref_cnt)
4418                         return;
4419         }
4420         /* Put the counter in the end - the last updated one. */
4421         TAILQ_INSERT_TAIL(&pool->counters, cnt, next);
4422         /*
4423          * Counters released between query trigger and handler need
4424          * to wait the next round of query. Since the packets arrive
4425          * in the gap period will not be taken into account to the
4426          * old counter.
4427          */
4428         cnt->query_gen = rte_atomic64_read(&pool->start_query_gen);
4429 }
4430
4431 /**
4432  * Verify the @p attributes will be correctly understood by the NIC and store
4433  * them in the @p flow if everything is correct.
4434  *
4435  * @param[in] dev
4436  *   Pointer to dev struct.
4437  * @param[in] attributes
4438  *   Pointer to flow attributes
4439  * @param[in] external
4440  *   This flow rule is created by request external to PMD.
4441  * @param[out] error
4442  *   Pointer to error structure.
4443  *
4444  * @return
4445  *   0 on success, a negative errno value otherwise and rte_errno is set.
4446  */
4447 static int
4448 flow_dv_validate_attributes(struct rte_eth_dev *dev,
4449                             const struct rte_flow_attr *attributes,
4450                             bool external __rte_unused,
4451                             struct rte_flow_error *error)
4452 {
4453         struct mlx5_priv *priv = dev->data->dev_private;
4454         uint32_t priority_max = priv->config.flow_prio - 1;
4455
4456 #ifndef HAVE_MLX5DV_DR
4457         if (attributes->group)
4458                 return rte_flow_error_set(error, ENOTSUP,
4459                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4460                                           NULL,
4461                                           "groups are not supported");
4462 #else
4463         uint32_t table;
4464         int ret;
4465
4466         ret = mlx5_flow_group_to_table(attributes, external,
4467                                        attributes->group, !!priv->fdb_def_rule,
4468                                        &table, error);
4469         if (ret)
4470                 return ret;
4471 #endif
4472         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
4473             attributes->priority >= priority_max)
4474                 return rte_flow_error_set(error, ENOTSUP,
4475                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4476                                           NULL,
4477                                           "priority out of range");
4478         if (attributes->transfer) {
4479                 if (!priv->config.dv_esw_en)
4480                         return rte_flow_error_set
4481                                 (error, ENOTSUP,
4482                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4483                                  "E-Switch dr is not supported");
4484                 if (!(priv->representor || priv->master))
4485                         return rte_flow_error_set
4486                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4487                                  NULL, "E-Switch configuration can only be"
4488                                  " done by a master or a representor device");
4489                 if (attributes->egress)
4490                         return rte_flow_error_set
4491                                 (error, ENOTSUP,
4492                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
4493                                  "egress is not supported");
4494         }
4495         if (!(attributes->egress ^ attributes->ingress))
4496                 return rte_flow_error_set(error, ENOTSUP,
4497                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4498                                           "must specify exactly one of "
4499                                           "ingress or egress");
4500         return 0;
4501 }
4502
4503 /**
4504  * Internal validation function. For validating both actions and items.
4505  *
4506  * @param[in] dev
4507  *   Pointer to the rte_eth_dev structure.
4508  * @param[in] attr
4509  *   Pointer to the flow attributes.
4510  * @param[in] items
4511  *   Pointer to the list of items.
4512  * @param[in] actions
4513  *   Pointer to the list of actions.
4514  * @param[in] external
4515  *   This flow rule is created by request external to PMD.
4516  * @param[out] error
4517  *   Pointer to the error structure.
4518  *
4519  * @return
4520  *   0 on success, a negative errno value otherwise and rte_errno is set.
4521  */
4522 static int
4523 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4524                  const struct rte_flow_item items[],
4525                  const struct rte_flow_action actions[],
4526                  bool external, struct rte_flow_error *error)
4527 {
4528         int ret;
4529         uint64_t action_flags = 0;
4530         uint64_t item_flags = 0;
4531         uint64_t last_item = 0;
4532         uint8_t next_protocol = 0xff;
4533         uint16_t ether_type = 0;
4534         int actions_n = 0;
4535         uint8_t item_ipv6_proto = 0;
4536         const struct rte_flow_item *gre_item = NULL;
4537         const struct rte_flow_action_raw_decap *decap;
4538         const struct rte_flow_action_raw_encap *encap;
4539         const struct rte_flow_action_rss *rss;
4540         const struct rte_flow_item_tcp nic_tcp_mask = {
4541                 .hdr = {
4542                         .tcp_flags = 0xFF,
4543                         .src_port = RTE_BE16(UINT16_MAX),
4544                         .dst_port = RTE_BE16(UINT16_MAX),
4545                 }
4546         };
4547         const struct rte_flow_item_ipv4 nic_ipv4_mask = {
4548                 .hdr = {
4549                         .src_addr = RTE_BE32(0xffffffff),
4550                         .dst_addr = RTE_BE32(0xffffffff),
4551                         .type_of_service = 0xff,
4552                         .next_proto_id = 0xff,
4553                         .time_to_live = 0xff,
4554                 },
4555         };
4556         const struct rte_flow_item_ipv6 nic_ipv6_mask = {
4557                 .hdr = {
4558                         .src_addr =
4559                         "\xff\xff\xff\xff\xff\xff\xff\xff"
4560                         "\xff\xff\xff\xff\xff\xff\xff\xff",
4561                         .dst_addr =
4562                         "\xff\xff\xff\xff\xff\xff\xff\xff"
4563                         "\xff\xff\xff\xff\xff\xff\xff\xff",
4564                         .vtc_flow = RTE_BE32(0xffffffff),
4565                         .proto = 0xff,
4566                         .hop_limits = 0xff,
4567                 },
4568         };
4569         struct mlx5_priv *priv = dev->data->dev_private;
4570         struct mlx5_dev_config *dev_conf = &priv->config;
4571         uint16_t queue_index = 0xFFFF;
4572
4573         if (items == NULL)
4574                 return -1;
4575         ret = flow_dv_validate_attributes(dev, attr, external, error);
4576         if (ret < 0)
4577                 return ret;
4578         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4579                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4580                 int type = items->type;
4581
4582                 switch (type) {
4583                 case RTE_FLOW_ITEM_TYPE_VOID:
4584                         break;
4585                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4586                         ret = flow_dv_validate_item_port_id
4587                                         (dev, items, attr, item_flags, error);
4588                         if (ret < 0)
4589                                 return ret;
4590                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4591                         break;
4592                 case RTE_FLOW_ITEM_TYPE_ETH:
4593                         ret = mlx5_flow_validate_item_eth(items, item_flags,
4594                                                           error);
4595                         if (ret < 0)
4596                                 return ret;
4597                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4598                                              MLX5_FLOW_LAYER_OUTER_L2;
4599                         if (items->mask != NULL && items->spec != NULL) {
4600                                 ether_type =
4601                                         ((const struct rte_flow_item_eth *)
4602                                          items->spec)->type;
4603                                 ether_type &=
4604                                         ((const struct rte_flow_item_eth *)
4605                                          items->mask)->type;
4606                                 ether_type = rte_be_to_cpu_16(ether_type);
4607                         } else {
4608                                 ether_type = 0;
4609                         }
4610                         break;
4611                 case RTE_FLOW_ITEM_TYPE_VLAN:
4612                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
4613                                                            dev, error);
4614                         if (ret < 0)
4615                                 return ret;
4616                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
4617                                              MLX5_FLOW_LAYER_OUTER_VLAN;
4618                         if (items->mask != NULL && items->spec != NULL) {
4619                                 ether_type =
4620                                         ((const struct rte_flow_item_vlan *)
4621                                          items->spec)->inner_type;
4622                                 ether_type &=
4623                                         ((const struct rte_flow_item_vlan *)
4624                                          items->mask)->inner_type;
4625                                 ether_type = rte_be_to_cpu_16(ether_type);
4626                         } else {
4627                                 ether_type = 0;
4628                         }
4629                         break;
4630                 case RTE_FLOW_ITEM_TYPE_IPV4:
4631                         mlx5_flow_tunnel_ip_check(items, next_protocol,
4632                                                   &item_flags, &tunnel);
4633                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
4634                                                            last_item,
4635                                                            ether_type,
4636                                                            &nic_ipv4_mask,
4637                                                            error);
4638                         if (ret < 0)
4639                                 return ret;
4640                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4641                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4642                         if (items->mask != NULL &&
4643                             ((const struct rte_flow_item_ipv4 *)
4644                              items->mask)->hdr.next_proto_id) {
4645                                 next_protocol =
4646                                         ((const struct rte_flow_item_ipv4 *)
4647                                          (items->spec))->hdr.next_proto_id;
4648                                 next_protocol &=
4649                                         ((const struct rte_flow_item_ipv4 *)
4650                                          (items->mask))->hdr.next_proto_id;
4651                         } else {
4652                                 /* Reset for inner layer. */
4653                                 next_protocol = 0xff;
4654                         }
4655                         break;
4656                 case RTE_FLOW_ITEM_TYPE_IPV6:
4657                         mlx5_flow_tunnel_ip_check(items, next_protocol,
4658                                                   &item_flags, &tunnel);
4659                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
4660                                                            last_item,
4661                                                            ether_type,
4662                                                            &nic_ipv6_mask,
4663                                                            error);
4664                         if (ret < 0)
4665                                 return ret;
4666                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4667                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4668                         if (items->mask != NULL &&
4669                             ((const struct rte_flow_item_ipv6 *)
4670                              items->mask)->hdr.proto) {
4671                                 item_ipv6_proto =
4672                                         ((const struct rte_flow_item_ipv6 *)
4673                                          items->spec)->hdr.proto;
4674                                 next_protocol =
4675                                         ((const struct rte_flow_item_ipv6 *)
4676                                          items->spec)->hdr.proto;
4677                                 next_protocol &=
4678                                         ((const struct rte_flow_item_ipv6 *)
4679                                          items->mask)->hdr.proto;
4680                         } else {
4681                                 /* Reset for inner layer. */
4682                                 next_protocol = 0xff;
4683                         }
4684                         break;
4685                 case RTE_FLOW_ITEM_TYPE_TCP:
4686                         ret = mlx5_flow_validate_item_tcp
4687                                                 (items, item_flags,
4688                                                  next_protocol,
4689                                                  &nic_tcp_mask,
4690                                                  error);
4691                         if (ret < 0)
4692                                 return ret;
4693                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
4694                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
4695                         break;
4696                 case RTE_FLOW_ITEM_TYPE_UDP:
4697                         ret = mlx5_flow_validate_item_udp(items, item_flags,
4698                                                           next_protocol,
4699                                                           error);
4700                         if (ret < 0)
4701                                 return ret;
4702                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
4703                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
4704                         break;
4705                 case RTE_FLOW_ITEM_TYPE_GRE:
4706                         ret = mlx5_flow_validate_item_gre(items, item_flags,
4707                                                           next_protocol, error);
4708                         if (ret < 0)
4709                                 return ret;
4710                         gre_item = items;
4711                         last_item = MLX5_FLOW_LAYER_GRE;
4712                         break;
4713                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4714                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
4715                                                             next_protocol,
4716                                                             error);
4717                         if (ret < 0)
4718                                 return ret;
4719                         last_item = MLX5_FLOW_LAYER_NVGRE;
4720                         break;
4721                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
4722                         ret = mlx5_flow_validate_item_gre_key
4723                                 (items, item_flags, gre_item, error);
4724                         if (ret < 0)
4725                                 return ret;
4726                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
4727                         break;
4728                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4729                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
4730                                                             error);
4731                         if (ret < 0)
4732                                 return ret;
4733                         last_item = MLX5_FLOW_LAYER_VXLAN;
4734                         break;
4735                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4736                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
4737                                                                 item_flags, dev,
4738                                                                 error);
4739                         if (ret < 0)
4740                                 return ret;
4741                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4742                         break;
4743                 case RTE_FLOW_ITEM_TYPE_GENEVE:
4744                         ret = mlx5_flow_validate_item_geneve(items,
4745                                                              item_flags, dev,
4746                                                              error);
4747                         if (ret < 0)
4748                                 return ret;
4749                         last_item = MLX5_FLOW_LAYER_GENEVE;
4750                         break;
4751                 case RTE_FLOW_ITEM_TYPE_MPLS:
4752                         ret = mlx5_flow_validate_item_mpls(dev, items,
4753                                                            item_flags,
4754                                                            last_item, error);
4755                         if (ret < 0)
4756                                 return ret;
4757                         last_item = MLX5_FLOW_LAYER_MPLS;
4758                         break;
4759
4760                 case RTE_FLOW_ITEM_TYPE_MARK:
4761                         ret = flow_dv_validate_item_mark(dev, items, attr,
4762                                                          error);
4763                         if (ret < 0)
4764                                 return ret;
4765                         last_item = MLX5_FLOW_ITEM_MARK;
4766                         break;
4767                 case RTE_FLOW_ITEM_TYPE_META:
4768                         ret = flow_dv_validate_item_meta(dev, items, attr,
4769                                                          error);
4770                         if (ret < 0)
4771                                 return ret;
4772                         last_item = MLX5_FLOW_ITEM_METADATA;
4773                         break;
4774                 case RTE_FLOW_ITEM_TYPE_ICMP:
4775                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
4776                                                            next_protocol,
4777                                                            error);
4778                         if (ret < 0)
4779                                 return ret;
4780                         last_item = MLX5_FLOW_LAYER_ICMP;
4781                         break;
4782                 case RTE_FLOW_ITEM_TYPE_ICMP6:
4783                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
4784                                                             next_protocol,
4785                                                             error);
4786                         if (ret < 0)
4787                                 return ret;
4788                         item_ipv6_proto = IPPROTO_ICMPV6;
4789                         last_item = MLX5_FLOW_LAYER_ICMP6;
4790                         break;
4791                 case RTE_FLOW_ITEM_TYPE_TAG:
4792                         ret = flow_dv_validate_item_tag(dev, items,
4793                                                         attr, error);
4794                         if (ret < 0)
4795                                 return ret;
4796                         last_item = MLX5_FLOW_ITEM_TAG;
4797                         break;
4798                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
4799                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
4800                         break;
4801                 case RTE_FLOW_ITEM_TYPE_GTP:
4802                         ret = flow_dv_validate_item_gtp(dev, items, item_flags,
4803                                                         error);
4804                         if (ret < 0)
4805                                 return ret;
4806                         last_item = MLX5_FLOW_LAYER_GTP;
4807                         break;
4808                 default:
4809                         return rte_flow_error_set(error, ENOTSUP,
4810                                                   RTE_FLOW_ERROR_TYPE_ITEM,
4811                                                   NULL, "item not supported");
4812                 }
4813                 item_flags |= last_item;
4814         }
4815         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4816                 int type = actions->type;
4817                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4818                         return rte_flow_error_set(error, ENOTSUP,
4819                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4820                                                   actions, "too many actions");
4821                 switch (type) {
4822                 case RTE_FLOW_ACTION_TYPE_VOID:
4823                         break;
4824                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4825                         ret = flow_dv_validate_action_port_id(dev,
4826                                                               action_flags,
4827                                                               actions,
4828                                                               attr,
4829                                                               error);
4830                         if (ret)
4831                                 return ret;
4832                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4833                         ++actions_n;
4834                         break;
4835                 case RTE_FLOW_ACTION_TYPE_FLAG:
4836                         ret = flow_dv_validate_action_flag(dev, action_flags,
4837                                                            attr, error);
4838                         if (ret < 0)
4839                                 return ret;
4840                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
4841                                 /* Count all modify-header actions as one. */
4842                                 if (!(action_flags &
4843                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
4844                                         ++actions_n;
4845                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
4846                                                 MLX5_FLOW_ACTION_MARK_EXT;
4847                         } else {
4848                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
4849                                 ++actions_n;
4850                         }
4851                         break;
4852                 case RTE_FLOW_ACTION_TYPE_MARK:
4853                         ret = flow_dv_validate_action_mark(dev, actions,
4854                                                            action_flags,
4855                                                            attr, error);
4856                         if (ret < 0)
4857                                 return ret;
4858                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
4859                                 /* Count all modify-header actions as one. */
4860                                 if (!(action_flags &
4861                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
4862                                         ++actions_n;
4863                                 action_flags |= MLX5_FLOW_ACTION_MARK |
4864                                                 MLX5_FLOW_ACTION_MARK_EXT;
4865                         } else {
4866                                 action_flags |= MLX5_FLOW_ACTION_MARK;
4867                                 ++actions_n;
4868                         }
4869                         break;
4870                 case RTE_FLOW_ACTION_TYPE_SET_META:
4871                         ret = flow_dv_validate_action_set_meta(dev, actions,
4872                                                                action_flags,
4873                                                                attr, error);
4874                         if (ret < 0)
4875                                 return ret;
4876                         /* Count all modify-header actions as one action. */
4877                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4878                                 ++actions_n;
4879                         action_flags |= MLX5_FLOW_ACTION_SET_META;
4880                         break;
4881                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
4882                         ret = flow_dv_validate_action_set_tag(dev, actions,
4883                                                               action_flags,
4884                                                               attr, error);
4885                         if (ret < 0)
4886                                 return ret;
4887                         /* Count all modify-header actions as one action. */
4888                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4889                                 ++actions_n;
4890                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
4891                         break;
4892                 case RTE_FLOW_ACTION_TYPE_DROP:
4893                         ret = mlx5_flow_validate_action_drop(action_flags,
4894                                                              attr, error);
4895                         if (ret < 0)
4896                                 return ret;
4897                         action_flags |= MLX5_FLOW_ACTION_DROP;
4898                         ++actions_n;
4899                         break;
4900                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4901                         ret = mlx5_flow_validate_action_queue(actions,
4902                                                               action_flags, dev,
4903                                                               attr, error);
4904                         if (ret < 0)
4905                                 return ret;
4906                         queue_index = ((const struct rte_flow_action_queue *)
4907                                                         (actions->conf))->index;
4908                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
4909                         ++actions_n;
4910                         break;
4911                 case RTE_FLOW_ACTION_TYPE_RSS:
4912                         rss = actions->conf;
4913                         ret = mlx5_flow_validate_action_rss(actions,
4914                                                             action_flags, dev,
4915                                                             attr, item_flags,
4916                                                             error);
4917                         if (ret < 0)
4918                                 return ret;
4919                         if (rss != NULL && rss->queue_num)
4920                                 queue_index = rss->queue[0];
4921                         action_flags |= MLX5_FLOW_ACTION_RSS;
4922                         ++actions_n;
4923                         break;
4924                 case RTE_FLOW_ACTION_TYPE_COUNT:
4925                         ret = flow_dv_validate_action_count(dev, error);
4926                         if (ret < 0)
4927                                 return ret;
4928                         action_flags |= MLX5_FLOW_ACTION_COUNT;
4929                         ++actions_n;
4930                         break;
4931                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
4932                         if (flow_dv_validate_action_pop_vlan(dev,
4933                                                              action_flags,
4934                                                              actions,
4935                                                              item_flags, attr,
4936                                                              error))
4937                                 return -rte_errno;
4938                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
4939                         ++actions_n;
4940                         break;
4941                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4942                         ret = flow_dv_validate_action_push_vlan(dev,
4943                                                                 action_flags,
4944                                                                 item_flags,
4945                                                                 actions, attr,
4946                                                                 error);
4947                         if (ret < 0)
4948                                 return ret;
4949                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
4950                         ++actions_n;
4951                         break;
4952                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4953                         ret = flow_dv_validate_action_set_vlan_pcp
4954                                                 (action_flags, actions, error);
4955                         if (ret < 0)
4956                                 return ret;
4957                         /* Count PCP with push_vlan command. */
4958                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
4959                         break;
4960                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4961                         ret = flow_dv_validate_action_set_vlan_vid
4962                                                 (item_flags, action_flags,
4963                                                  actions, error);
4964                         if (ret < 0)
4965                                 return ret;
4966                         /* Count VID with push_vlan command. */
4967                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
4968                         break;
4969                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4970                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4971                         ret = flow_dv_validate_action_l2_encap(dev,
4972                                                                action_flags,
4973                                                                actions, attr,
4974                                                                error);
4975                         if (ret < 0)
4976                                 return ret;
4977                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
4978                         ++actions_n;
4979                         break;
4980                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4981                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4982                         ret = flow_dv_validate_action_decap(dev, action_flags,
4983                                                             attr, error);
4984                         if (ret < 0)
4985                                 return ret;
4986                         action_flags |= MLX5_FLOW_ACTION_DECAP;
4987                         ++actions_n;
4988                         break;
4989                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4990                         ret = flow_dv_validate_action_raw_encap_decap
4991                                 (dev, NULL, actions->conf, attr, &action_flags,
4992                                  &actions_n, error);
4993                         if (ret < 0)
4994                                 return ret;
4995                         break;
4996                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4997                         decap = actions->conf;
4998                         while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
4999                                 ;
5000                         if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5001                                 encap = NULL;
5002                                 actions--;
5003                         } else {
5004                                 encap = actions->conf;
5005                         }
5006                         ret = flow_dv_validate_action_raw_encap_decap
5007                                            (dev,
5008                                             decap ? decap : &empty_decap, encap,
5009                                             attr, &action_flags, &actions_n,
5010                                             error);
5011                         if (ret < 0)
5012                                 return ret;
5013                         break;
5014                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5015                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5016                         ret = flow_dv_validate_action_modify_mac(action_flags,
5017                                                                  actions,
5018                                                                  item_flags,
5019                                                                  error);
5020                         if (ret < 0)
5021                                 return ret;
5022                         /* Count all modify-header actions as one action. */
5023                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5024                                 ++actions_n;
5025                         action_flags |= actions->type ==
5026                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5027                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
5028                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
5029                         break;
5030
5031                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5032                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5033                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
5034                                                                   actions,
5035                                                                   item_flags,
5036                                                                   error);
5037                         if (ret < 0)
5038                                 return ret;
5039                         /* Count all modify-header actions as one action. */
5040                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5041                                 ++actions_n;
5042                         action_flags |= actions->type ==
5043                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5044                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
5045                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
5046                         break;
5047                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5048                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5049                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
5050                                                                   actions,
5051                                                                   item_flags,
5052                                                                   error);
5053                         if (ret < 0)
5054                                 return ret;
5055                         if (item_ipv6_proto == IPPROTO_ICMPV6)
5056                                 return rte_flow_error_set(error, ENOTSUP,
5057                                         RTE_FLOW_ERROR_TYPE_ACTION,
5058                                         actions,
5059                                         "Can't change header "
5060                                         "with ICMPv6 proto");
5061                         /* Count all modify-header actions as one action. */
5062                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5063                                 ++actions_n;
5064                         action_flags |= actions->type ==
5065                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5066                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
5067                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
5068                         break;
5069                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5070                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5071                         ret = flow_dv_validate_action_modify_tp(action_flags,
5072                                                                 actions,
5073                                                                 item_flags,
5074                                                                 error);
5075                         if (ret < 0)
5076                                 return ret;
5077                         /* Count all modify-header actions as one action. */
5078                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5079                                 ++actions_n;
5080                         action_flags |= actions->type ==
5081                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5082                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
5083                                                 MLX5_FLOW_ACTION_SET_TP_DST;
5084                         break;
5085                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5086                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5087                         ret = flow_dv_validate_action_modify_ttl(action_flags,
5088                                                                  actions,
5089                                                                  item_flags,
5090                                                                  error);
5091                         if (ret < 0)
5092                                 return ret;
5093                         /* Count all modify-header actions as one action. */
5094                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5095                                 ++actions_n;
5096                         action_flags |= actions->type ==
5097                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
5098                                                 MLX5_FLOW_ACTION_SET_TTL :
5099                                                 MLX5_FLOW_ACTION_DEC_TTL;
5100                         break;
5101                 case RTE_FLOW_ACTION_TYPE_JUMP:
5102                         ret = flow_dv_validate_action_jump(actions,
5103                                                            action_flags,
5104                                                            attr, external,
5105                                                            error);
5106                         if (ret)
5107                                 return ret;
5108                         ++actions_n;
5109                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5110                         break;
5111                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5112                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5113                         ret = flow_dv_validate_action_modify_tcp_seq
5114                                                                 (action_flags,
5115                                                                  actions,
5116                                                                  item_flags,
5117                                                                  error);
5118                         if (ret < 0)
5119                                 return ret;
5120                         /* Count all modify-header actions as one action. */
5121                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5122                                 ++actions_n;
5123                         action_flags |= actions->type ==
5124                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5125                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
5126                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5127                         break;
5128                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5129                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5130                         ret = flow_dv_validate_action_modify_tcp_ack
5131                                                                 (action_flags,
5132                                                                  actions,
5133                                                                  item_flags,
5134                                                                  error);
5135                         if (ret < 0)
5136                                 return ret;
5137                         /* Count all modify-header actions as one action. */
5138                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5139                                 ++actions_n;
5140                         action_flags |= actions->type ==
5141                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5142                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
5143                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
5144                         break;
5145                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5146                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
5147                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
5148                         break;
5149                 case RTE_FLOW_ACTION_TYPE_METER:
5150                         ret = mlx5_flow_validate_action_meter(dev,
5151                                                               action_flags,
5152                                                               actions, attr,
5153                                                               error);
5154                         if (ret < 0)
5155                                 return ret;
5156                         action_flags |= MLX5_FLOW_ACTION_METER;
5157                         ++actions_n;
5158                         break;
5159                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
5160                         ret = flow_dv_validate_action_modify_ipv4_dscp
5161                                                          (action_flags,
5162                                                           actions,
5163                                                           item_flags,
5164                                                           error);
5165                         if (ret < 0)
5166                                 return ret;
5167                         /* Count all modify-header actions as one action. */
5168                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5169                                 ++actions_n;
5170                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
5171                         break;
5172                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
5173                         ret = flow_dv_validate_action_modify_ipv6_dscp
5174                                                                 (action_flags,
5175                                                                  actions,
5176                                                                  item_flags,
5177                                                                  error);
5178                         if (ret < 0)
5179                                 return ret;
5180                         /* Count all modify-header actions as one action. */
5181                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
5182                                 ++actions_n;
5183                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
5184                         break;
5185                 default:
5186                         return rte_flow_error_set(error, ENOTSUP,
5187                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5188                                                   actions,
5189                                                   "action not supported");
5190                 }
5191         }
5192         /*
5193          * Validate the drop action mutual exclusion with other actions.
5194          * Drop action is mutually-exclusive with any other action, except for
5195          * Count action.
5196          */
5197         if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
5198             (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
5199                 return rte_flow_error_set(error, EINVAL,
5200                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
5201                                           "Drop action is mutually-exclusive "
5202                                           "with any other action, except for "
5203                                           "Count action");
5204         /* Eswitch has few restrictions on using items and actions */
5205         if (attr->transfer) {
5206                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5207                     action_flags & MLX5_FLOW_ACTION_FLAG)
5208                         return rte_flow_error_set(error, ENOTSUP,
5209                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5210                                                   NULL,
5211                                                   "unsupported action FLAG");
5212                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5213                     action_flags & MLX5_FLOW_ACTION_MARK)
5214                         return rte_flow_error_set(error, ENOTSUP,
5215                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5216                                                   NULL,
5217                                                   "unsupported action MARK");
5218                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
5219                         return rte_flow_error_set(error, ENOTSUP,
5220                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5221                                                   NULL,
5222                                                   "unsupported action QUEUE");
5223                 if (action_flags & MLX5_FLOW_ACTION_RSS)
5224                         return rte_flow_error_set(error, ENOTSUP,
5225                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5226                                                   NULL,
5227                                                   "unsupported action RSS");
5228                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5229                         return rte_flow_error_set(error, EINVAL,
5230                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5231                                                   actions,
5232                                                   "no fate action is found");
5233         } else {
5234                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
5235                         return rte_flow_error_set(error, EINVAL,
5236                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5237                                                   actions,
5238                                                   "no fate action is found");
5239         }
5240         /* Continue validation for Xcap actions.*/
5241         if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
5242             mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
5243                 if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
5244                     MLX5_FLOW_XCAP_ACTIONS)
5245                         return rte_flow_error_set(error, ENOTSUP,
5246                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5247                                                   NULL, "encap and decap "
5248                                                   "combination aren't supported");
5249                 if (!attr->transfer && attr->ingress && (action_flags &
5250                                                         MLX5_FLOW_ACTION_ENCAP))
5251                         return rte_flow_error_set(error, ENOTSUP,
5252                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5253                                                   NULL, "encap is not supported"
5254                                                   " for ingress traffic");
5255         }
5256         return 0;
5257 }
5258
5259 /**
5260  * Internal preparation function. Allocates the DV flow size,
5261  * this size is constant.
5262  *
5263  * @param[in] dev
5264  *   Pointer to the rte_eth_dev structure.
5265  * @param[in] attr
5266  *   Pointer to the flow attributes.
5267  * @param[in] items
5268  *   Pointer to the list of items.
5269  * @param[in] actions
5270  *   Pointer to the list of actions.
5271  * @param[out] error
5272  *   Pointer to the error structure.
5273  *
5274  * @return
5275  *   Pointer to mlx5_flow object on success,
5276  *   otherwise NULL and rte_errno is set.
5277  */
5278 static struct mlx5_flow *
5279 flow_dv_prepare(struct rte_eth_dev *dev,
5280                 const struct rte_flow_attr *attr __rte_unused,
5281                 const struct rte_flow_item items[] __rte_unused,
5282                 const struct rte_flow_action actions[] __rte_unused,
5283                 struct rte_flow_error *error)
5284 {
5285         size_t size = sizeof(struct mlx5_flow_handle);
5286         struct mlx5_flow *dev_flow;
5287         struct mlx5_flow_handle *dev_handle;
5288         struct mlx5_priv *priv = dev->data->dev_private;
5289
5290         /* In case of corrupting the memory. */
5291         if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
5292                 rte_flow_error_set(error, ENOSPC,
5293                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5294                                    "not free temporary device flow");
5295                 return NULL;
5296         }
5297         dev_handle = rte_calloc(__func__, 1, size, 0);
5298         if (!dev_handle) {
5299                 rte_flow_error_set(error, ENOMEM,
5300                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5301                                    "not enough memory to create flow handle");
5302                 return NULL;
5303         }
5304         /* No multi-thread supporting. */
5305         dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
5306         dev_flow->handle = dev_handle;
5307         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
5308         /*
5309          * The matching value needs to be cleared to 0 before using. In the
5310          * past, it will be automatically cleared when using rte_*alloc
5311          * API. The time consumption will be almost the same as before.
5312          */
5313         memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
5314         dev_flow->ingress = attr->ingress;
5315         dev_flow->dv.transfer = attr->transfer;
5316         return dev_flow;
5317 }
5318
5319 #ifdef RTE_LIBRTE_MLX5_DEBUG
5320 /**
5321  * Sanity check for match mask and value. Similar to check_valid_spec() in
5322  * kernel driver. If unmasked bit is present in value, it returns failure.
5323  *
5324  * @param match_mask
5325  *   pointer to match mask buffer.
5326  * @param match_value
5327  *   pointer to match value buffer.
5328  *
5329  * @return
5330  *   0 if valid, -EINVAL otherwise.
5331  */
5332 static int
5333 flow_dv_check_valid_spec(void *match_mask, void *match_value)
5334 {
5335         uint8_t *m = match_mask;
5336         uint8_t *v = match_value;
5337         unsigned int i;
5338
5339         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
5340                 if (v[i] & ~m[i]) {
5341                         DRV_LOG(ERR,
5342                                 "match_value differs from match_criteria"
5343                                 " %p[%u] != %p[%u]",
5344                                 match_value, i, match_mask, i);
5345                         return -EINVAL;
5346                 }
5347         }
5348         return 0;
5349 }
5350 #endif
5351
5352 /**
5353  * Add Ethernet item to matcher and to the value.
5354  *
5355  * @param[in, out] matcher
5356  *   Flow matcher.
5357  * @param[in, out] key
5358  *   Flow matcher value.
5359  * @param[in] item
5360  *   Flow pattern to translate.
5361  * @param[in] inner
5362  *   Item is inner pattern.
5363  */
5364 static void
5365 flow_dv_translate_item_eth(void *matcher, void *key,
5366                            const struct rte_flow_item *item, int inner)
5367 {
5368         const struct rte_flow_item_eth *eth_m = item->mask;
5369         const struct rte_flow_item_eth *eth_v = item->spec;
5370         const struct rte_flow_item_eth nic_mask = {
5371                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5372                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5373                 .type = RTE_BE16(0xffff),
5374         };
5375         void *headers_m;
5376         void *headers_v;
5377         char *l24_v;
5378         unsigned int i;
5379
5380         if (!eth_v)
5381                 return;
5382         if (!eth_m)
5383                 eth_m = &nic_mask;
5384         if (inner) {
5385                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5386                                          inner_headers);
5387                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5388         } else {
5389                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5390                                          outer_headers);
5391                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5392         }
5393         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
5394                &eth_m->dst, sizeof(eth_m->dst));
5395         /* The value must be in the range of the mask. */
5396         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
5397         for (i = 0; i < sizeof(eth_m->dst); ++i)
5398                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
5399         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
5400                &eth_m->src, sizeof(eth_m->src));
5401         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
5402         /* The value must be in the range of the mask. */
5403         for (i = 0; i < sizeof(eth_m->dst); ++i)
5404                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
5405         if (eth_v->type) {
5406                 /* When ethertype is present set mask for tagged VLAN. */
5407                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5408                 /* Set value for tagged VLAN if ethertype is 802.1Q. */
5409                 if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
5410                     eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
5411                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
5412                                  1);
5413                         /* Return here to avoid setting match on ethertype. */
5414                         return;
5415                 }
5416         }
5417         /*
5418          * HW supports match on one Ethertype, the Ethertype following the last
5419          * VLAN tag of the packet (see PRM).
5420          * Set match on ethertype only if ETH header is not followed by VLAN.
5421          */
5422         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5423                  rte_be_to_cpu_16(eth_m->type));
5424         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
5425         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
5426 }
5427
5428 /**
5429  * Add VLAN item to matcher and to the value.
5430  *
5431  * @param[in, out] dev_flow
5432  *   Flow descriptor.
5433  * @param[in, out] matcher
5434  *   Flow matcher.
5435  * @param[in, out] key
5436  *   Flow matcher value.
5437  * @param[in] item
5438  *   Flow pattern to translate.
5439  * @param[in] inner
5440  *   Item is inner pattern.
5441  */
5442 static void
5443 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
5444                             void *matcher, void *key,
5445                             const struct rte_flow_item *item,
5446                             int inner)
5447 {
5448         const struct rte_flow_item_vlan *vlan_m = item->mask;
5449         const struct rte_flow_item_vlan *vlan_v = item->spec;
5450         void *headers_m;
5451         void *headers_v;
5452         uint16_t tci_m;
5453         uint16_t tci_v;
5454
5455         if (!vlan_v)
5456                 return;
5457         if (!vlan_m)
5458                 vlan_m = &rte_flow_item_vlan_mask;
5459         if (inner) {
5460                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5461                                          inner_headers);
5462                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5463         } else {
5464                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5465                                          outer_headers);
5466                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5467                 /*
5468                  * This is workaround, masks are not supported,
5469                  * and pre-validated.
5470                  */
5471                 dev_flow->handle->vf_vlan.tag =
5472                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
5473         }
5474         tci_m = rte_be_to_cpu_16(vlan_m->tci);
5475         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
5476         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5477         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
5478         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
5479         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
5480         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
5481         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
5482         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
5483         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
5484         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5485                  rte_be_to_cpu_16(vlan_m->inner_type));
5486         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
5487                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
5488 }
5489
5490 /**
5491  * Add IPV4 item to matcher and to the value.
5492  *
5493  * @param[in, out] matcher
5494  *   Flow matcher.
5495  * @param[in, out] key
5496  *   Flow matcher value.
5497  * @param[in] item
5498  *   Flow pattern to translate.
5499  * @param[in] item_flags
5500  *   Bit-fields that holds the items detected until now.
5501  * @param[in] inner
5502  *   Item is inner pattern.
5503  * @param[in] group
5504  *   The group to insert the rule.
5505  */
5506 static void
5507 flow_dv_translate_item_ipv4(void *matcher, void *key,
5508                             const struct rte_flow_item *item,
5509                             const uint64_t item_flags,
5510                             int inner, uint32_t group)
5511 {
5512         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
5513         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
5514         const struct rte_flow_item_ipv4 nic_mask = {
5515                 .hdr = {
5516                         .src_addr = RTE_BE32(0xffffffff),
5517                         .dst_addr = RTE_BE32(0xffffffff),
5518                         .type_of_service = 0xff,
5519                         .next_proto_id = 0xff,
5520                         .time_to_live = 0xff,
5521                 },
5522         };
5523         void *headers_m;
5524         void *headers_v;
5525         char *l24_m;
5526         char *l24_v;
5527         uint8_t tos;
5528
5529         if (inner) {
5530                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5531                                          inner_headers);
5532                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5533         } else {
5534                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5535                                          outer_headers);
5536                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5537         }
5538         if (group == 0)
5539                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5540         else
5541                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
5542         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
5543         /*
5544          * On outer header (which must contains L2), or inner header with L2,
5545          * set cvlan_tag mask bit to mark this packet as untagged.
5546          * This should be done even if item->spec is empty.
5547          */
5548         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
5549                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5550         if (!ipv4_v)
5551                 return;
5552         if (!ipv4_m)
5553                 ipv4_m = &nic_mask;
5554         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5555                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
5556         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5557                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
5558         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
5559         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
5560         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5561                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
5562         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5563                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
5564         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
5565         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
5566         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
5567         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
5568                  ipv4_m->hdr.type_of_service);
5569         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
5570         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
5571                  ipv4_m->hdr.type_of_service >> 2);
5572         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
5573         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
5574                  ipv4_m->hdr.next_proto_id);
5575         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5576                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
5577         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
5578                  ipv4_m->hdr.time_to_live);
5579         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
5580                  ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
5581 }
5582
5583 /**
5584  * Add IPV6 item to matcher and to the value.
5585  *
5586  * @param[in, out] matcher
5587  *   Flow matcher.
5588  * @param[in, out] key
5589  *   Flow matcher value.
5590  * @param[in] item
5591  *   Flow pattern to translate.
5592  * @param[in] item_flags
5593  *   Bit-fields that holds the items detected until now.
5594  * @param[in] inner
5595  *   Item is inner pattern.
5596  * @param[in] group
5597  *   The group to insert the rule.
5598  */
5599 static void
5600 flow_dv_translate_item_ipv6(void *matcher, void *key,
5601                             const struct rte_flow_item *item,
5602                             const uint64_t item_flags,
5603                             int inner, uint32_t group)
5604 {
5605         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
5606         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
5607         const struct rte_flow_item_ipv6 nic_mask = {
5608                 .hdr = {
5609                         .src_addr =
5610                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
5611                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
5612                         .dst_addr =
5613                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
5614                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
5615                         .vtc_flow = RTE_BE32(0xffffffff),
5616                         .proto = 0xff,
5617                         .hop_limits = 0xff,
5618                 },
5619         };
5620         void *headers_m;
5621         void *headers_v;
5622         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5623         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5624         char *l24_m;
5625         char *l24_v;
5626         uint32_t vtc_m;
5627         uint32_t vtc_v;
5628         int i;
5629         int size;
5630
5631         if (inner) {
5632                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5633                                          inner_headers);
5634                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5635         } else {
5636                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5637                                          outer_headers);
5638                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5639         }
5640         if (group == 0)
5641                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5642         else
5643                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
5644         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
5645         /*
5646          * On outer header (which must contains L2), or inner header with L2,
5647          * set cvlan_tag mask bit to mark this packet as untagged.
5648          * This should be done even if item->spec is empty.
5649          */
5650         if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
5651                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5652         if (!ipv6_v)
5653                 return;
5654         if (!ipv6_m)
5655                 ipv6_m = &nic_mask;
5656         size = sizeof(ipv6_m->hdr.dst_addr);
5657         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5658                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
5659         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5660                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
5661         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
5662         for (i = 0; i < size; ++i)
5663                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
5664         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5665                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
5666         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5667                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
5668         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
5669         for (i = 0; i < size; ++i)
5670                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
5671         /* TOS. */
5672         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
5673         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
5674         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
5675         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
5676         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
5677         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
5678         /* Label. */
5679         if (inner) {
5680                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
5681                          vtc_m);
5682                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
5683                          vtc_v);
5684         } else {
5685                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
5686                          vtc_m);
5687                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
5688                          vtc_v);
5689         }
5690         /* Protocol. */
5691         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
5692                  ipv6_m->hdr.proto);
5693         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5694                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
5695         /* Hop limit. */
5696         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
5697                  ipv6_m->hdr.hop_limits);
5698         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
5699                  ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
5700 }
5701
5702 /**
5703  * Add TCP item to matcher and to the value.
5704  *
5705  * @param[in, out] matcher
5706  *   Flow matcher.
5707  * @param[in, out] key
5708  *   Flow matcher value.
5709  * @param[in] item
5710  *   Flow pattern to translate.
5711  * @param[in] inner
5712  *   Item is inner pattern.
5713  */
5714 static void
5715 flow_dv_translate_item_tcp(void *matcher, void *key,
5716                            const struct rte_flow_item *item,
5717                            int inner)
5718 {
5719         const struct rte_flow_item_tcp *tcp_m = item->mask;
5720         const struct rte_flow_item_tcp *tcp_v = item->spec;
5721         void *headers_m;
5722         void *headers_v;
5723
5724         if (inner) {
5725                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5726                                          inner_headers);
5727                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5728         } else {
5729                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5730                                          outer_headers);
5731                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5732         }
5733         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5734         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
5735         if (!tcp_v)
5736                 return;
5737         if (!tcp_m)
5738                 tcp_m = &rte_flow_item_tcp_mask;
5739         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
5740                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
5741         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
5742                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
5743         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
5744                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
5745         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
5746                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
5747         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
5748                  tcp_m->hdr.tcp_flags);
5749         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
5750                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
5751 }
5752
5753 /**
5754  * Add UDP item to matcher and to the value.
5755  *
5756  * @param[in, out] matcher
5757  *   Flow matcher.
5758  * @param[in, out] key
5759  *   Flow matcher value.
5760  * @param[in] item
5761  *   Flow pattern to translate.
5762  * @param[in] inner
5763  *   Item is inner pattern.
5764  */
5765 static void
5766 flow_dv_translate_item_udp(void *matcher, void *key,
5767                            const struct rte_flow_item *item,
5768                            int inner)
5769 {
5770         const struct rte_flow_item_udp *udp_m = item->mask;
5771         const struct rte_flow_item_udp *udp_v = item->spec;
5772         void *headers_m;
5773         void *headers_v;
5774
5775         if (inner) {
5776                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5777                                          inner_headers);
5778                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5779         } else {
5780                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5781                                          outer_headers);
5782                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5783         }
5784         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5785         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
5786         if (!udp_v)
5787                 return;
5788         if (!udp_m)
5789                 udp_m = &rte_flow_item_udp_mask;
5790         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
5791                  rte_be_to_cpu_16(udp_m->hdr.src_port));
5792         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
5793                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
5794         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
5795                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
5796         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
5797                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
5798 }
5799
5800 /**
5801  * Add GRE optional Key item to matcher and to the value.
5802  *
5803  * @param[in, out] matcher
5804  *   Flow matcher.
5805  * @param[in, out] key
5806  *   Flow matcher value.
5807  * @param[in] item
5808  *   Flow pattern to translate.
5809  * @param[in] inner
5810  *   Item is inner pattern.
5811  */
5812 static void
5813 flow_dv_translate_item_gre_key(void *matcher, void *key,
5814                                    const struct rte_flow_item *item)
5815 {
5816         const rte_be32_t *key_m = item->mask;
5817         const rte_be32_t *key_v = item->spec;
5818         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5819         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5820         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
5821
5822         /* GRE K bit must be on and should already be validated */
5823         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
5824         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
5825         if (!key_v)
5826                 return;
5827         if (!key_m)
5828                 key_m = &gre_key_default_mask;
5829         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
5830                  rte_be_to_cpu_32(*key_m) >> 8);
5831         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
5832                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
5833         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
5834                  rte_be_to_cpu_32(*key_m) & 0xFF);
5835         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
5836                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
5837 }
5838
5839 /**
5840  * Add GRE item to matcher and to the value.
5841  *
5842  * @param[in, out] matcher
5843  *   Flow matcher.
5844  * @param[in, out] key
5845  *   Flow matcher value.
5846  * @param[in] item
5847  *   Flow pattern to translate.
5848  * @param[in] inner
5849  *   Item is inner pattern.
5850  */
5851 static void
5852 flow_dv_translate_item_gre(void *matcher, void *key,
5853                            const struct rte_flow_item *item,
5854                            int inner)
5855 {
5856         const struct rte_flow_item_gre *gre_m = item->mask;
5857         const struct rte_flow_item_gre *gre_v = item->spec;
5858         void *headers_m;
5859         void *headers_v;
5860         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5861         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5862         struct {
5863                 union {
5864                         __extension__
5865                         struct {
5866                                 uint16_t version:3;
5867                                 uint16_t rsvd0:9;
5868                                 uint16_t s_present:1;
5869                                 uint16_t k_present:1;
5870                                 uint16_t rsvd_bit1:1;
5871                                 uint16_t c_present:1;
5872                         };
5873                         uint16_t value;
5874                 };
5875         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
5876
5877         if (inner) {
5878                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5879                                          inner_headers);
5880                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5881         } else {
5882                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5883                                          outer_headers);
5884                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5885         }
5886         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5887         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
5888         if (!gre_v)
5889                 return;
5890         if (!gre_m)
5891                 gre_m = &rte_flow_item_gre_mask;
5892         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
5893                  rte_be_to_cpu_16(gre_m->protocol));
5894         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
5895                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
5896         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
5897         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
5898         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
5899                  gre_crks_rsvd0_ver_m.c_present);
5900         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
5901                  gre_crks_rsvd0_ver_v.c_present &
5902                  gre_crks_rsvd0_ver_m.c_present);
5903         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
5904                  gre_crks_rsvd0_ver_m.k_present);
5905         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
5906                  gre_crks_rsvd0_ver_v.k_present &
5907                  gre_crks_rsvd0_ver_m.k_present);
5908         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
5909                  gre_crks_rsvd0_ver_m.s_present);
5910         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
5911                  gre_crks_rsvd0_ver_v.s_present &
5912                  gre_crks_rsvd0_ver_m.s_present);
5913 }
5914
5915 /**
5916  * Add NVGRE item to matcher and to the value.
5917  *
5918  * @param[in, out] matcher
5919  *   Flow matcher.
5920  * @param[in, out] key
5921  *   Flow matcher value.
5922  * @param[in] item
5923  *   Flow pattern to translate.
5924  * @param[in] inner
5925  *   Item is inner pattern.
5926  */
5927 static void
5928 flow_dv_translate_item_nvgre(void *matcher, void *key,
5929                              const struct rte_flow_item *item,
5930                              int inner)
5931 {
5932         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
5933         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
5934         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5935         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5936         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
5937         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
5938         char *gre_key_m;
5939         char *gre_key_v;
5940         int size;
5941         int i;
5942
5943         /* For NVGRE, GRE header fields must be set with defined values. */
5944         const struct rte_flow_item_gre gre_spec = {
5945                 .c_rsvd0_ver = RTE_BE16(0x2000),
5946                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
5947         };
5948         const struct rte_flow_item_gre gre_mask = {
5949                 .c_rsvd0_ver = RTE_BE16(0xB000),
5950                 .protocol = RTE_BE16(UINT16_MAX),
5951         };
5952         const struct rte_flow_item gre_item = {
5953                 .spec = &gre_spec,
5954                 .mask = &gre_mask,
5955                 .last = NULL,
5956         };
5957         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
5958         if (!nvgre_v)
5959                 return;
5960         if (!nvgre_m)
5961                 nvgre_m = &rte_flow_item_nvgre_mask;
5962         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
5963         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
5964         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
5965         memcpy(gre_key_m, tni_flow_id_m, size);
5966         for (i = 0; i < size; ++i)
5967                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
5968 }
5969
5970 /**
5971  * Add VXLAN item to matcher and to the value.
5972  *
5973  * @param[in, out] matcher
5974  *   Flow matcher.
5975  * @param[in, out] key
5976  *   Flow matcher value.
5977  * @param[in] item
5978  *   Flow pattern to translate.
5979  * @param[in] inner
5980  *   Item is inner pattern.
5981  */
5982 static void
5983 flow_dv_translate_item_vxlan(void *matcher, void *key,
5984                              const struct rte_flow_item *item,
5985                              int inner)
5986 {
5987         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
5988         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
5989         void *headers_m;
5990         void *headers_v;
5991         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5992         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5993         char *vni_m;
5994         char *vni_v;
5995         uint16_t dport;
5996         int size;
5997         int i;
5998
5999         if (inner) {
6000                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6001                                          inner_headers);
6002                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6003         } else {
6004                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6005                                          outer_headers);
6006                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6007         }
6008         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6009                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6010         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6011                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6012                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6013         }
6014         if (!vxlan_v)
6015                 return;
6016         if (!vxlan_m)
6017                 vxlan_m = &rte_flow_item_vxlan_mask;
6018         size = sizeof(vxlan_m->vni);
6019         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
6020         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
6021         memcpy(vni_m, vxlan_m->vni, size);
6022         for (i = 0; i < size; ++i)
6023                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6024 }
6025
6026 /**
6027  * Add VXLAN-GPE item to matcher and to the value.
6028  *
6029  * @param[in, out] matcher
6030  *   Flow matcher.
6031  * @param[in, out] key
6032  *   Flow matcher value.
6033  * @param[in] item
6034  *   Flow pattern to translate.
6035  * @param[in] inner
6036  *   Item is inner pattern.
6037  */
6038
6039 static void
6040 flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
6041                                  const struct rte_flow_item *item, int inner)
6042 {
6043         const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
6044         const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
6045         void *headers_m;
6046         void *headers_v;
6047         void *misc_m =
6048                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
6049         void *misc_v =
6050                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6051         char *vni_m;
6052         char *vni_v;
6053         uint16_t dport;
6054         int size;
6055         int i;
6056         uint8_t flags_m = 0xff;
6057         uint8_t flags_v = 0xc;
6058
6059         if (inner) {
6060                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6061                                          inner_headers);
6062                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6063         } else {
6064                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6065                                          outer_headers);
6066                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6067         }
6068         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
6069                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
6070         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6071                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6072                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6073         }
6074         if (!vxlan_v)
6075                 return;
6076         if (!vxlan_m)
6077                 vxlan_m = &rte_flow_item_vxlan_gpe_mask;
6078         size = sizeof(vxlan_m->vni);
6079         vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
6080         vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
6081         memcpy(vni_m, vxlan_m->vni, size);
6082         for (i = 0; i < size; ++i)
6083                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
6084         if (vxlan_m->flags) {
6085                 flags_m = vxlan_m->flags;
6086                 flags_v = vxlan_v->flags;
6087         }
6088         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
6089         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
6090         MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
6091                  vxlan_m->protocol);
6092         MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
6093                  vxlan_v->protocol);
6094 }
6095
6096 /**
6097  * Add Geneve item to matcher and to the value.
6098  *
6099  * @param[in, out] matcher
6100  *   Flow matcher.
6101  * @param[in, out] key
6102  *   Flow matcher value.
6103  * @param[in] item
6104  *   Flow pattern to translate.
6105  * @param[in] inner
6106  *   Item is inner pattern.
6107  */
6108
6109 static void
6110 flow_dv_translate_item_geneve(void *matcher, void *key,
6111                               const struct rte_flow_item *item, int inner)
6112 {
6113         const struct rte_flow_item_geneve *geneve_m = item->mask;
6114         const struct rte_flow_item_geneve *geneve_v = item->spec;
6115         void *headers_m;
6116         void *headers_v;
6117         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6118         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6119         uint16_t dport;
6120         uint16_t gbhdr_m;
6121         uint16_t gbhdr_v;
6122         char *vni_m;
6123         char *vni_v;
6124         size_t size, i;
6125
6126         if (inner) {
6127                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6128                                          inner_headers);
6129                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6130         } else {
6131                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6132                                          outer_headers);
6133                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6134         }
6135         dport = MLX5_UDP_PORT_GENEVE;
6136         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6137                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6138                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6139         }
6140         if (!geneve_v)
6141                 return;
6142         if (!geneve_m)
6143                 geneve_m = &rte_flow_item_geneve_mask;
6144         size = sizeof(geneve_m->vni);
6145         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
6146         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
6147         memcpy(vni_m, geneve_m->vni, size);
6148         for (i = 0; i < size; ++i)
6149                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
6150         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
6151                  rte_be_to_cpu_16(geneve_m->protocol));
6152         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
6153                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
6154         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
6155         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
6156         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
6157                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6158         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
6159                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
6160         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
6161                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6162         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
6163                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
6164                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
6165 }
6166
6167 /**
6168  * Add MPLS item to matcher and to the value.
6169  *
6170  * @param[in, out] matcher
6171  *   Flow matcher.
6172  * @param[in, out] key
6173  *   Flow matcher value.
6174  * @param[in] item
6175  *   Flow pattern to translate.
6176  * @param[in] prev_layer
6177  *   The protocol layer indicated in previous item.
6178  * @param[in] inner
6179  *   Item is inner pattern.
6180  */
6181 static void
6182 flow_dv_translate_item_mpls(void *matcher, void *key,
6183                             const struct rte_flow_item *item,
6184                             uint64_t prev_layer,
6185                             int inner)
6186 {
6187         const uint32_t *in_mpls_m = item->mask;
6188         const uint32_t *in_mpls_v = item->spec;
6189         uint32_t *out_mpls_m = 0;
6190         uint32_t *out_mpls_v = 0;
6191         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6192         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6193         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
6194                                      misc_parameters_2);
6195         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6196         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
6197         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6198
6199         switch (prev_layer) {
6200         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6201                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
6202                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
6203                          MLX5_UDP_PORT_MPLS);
6204                 break;
6205         case MLX5_FLOW_LAYER_GRE:
6206                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
6207                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
6208                          RTE_ETHER_TYPE_MPLS);
6209                 break;
6210         default:
6211                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
6212                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
6213                          IPPROTO_MPLS);
6214                 break;
6215         }
6216         if (!in_mpls_v)
6217                 return;
6218         if (!in_mpls_m)
6219                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
6220         switch (prev_layer) {
6221         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
6222                 out_mpls_m =
6223                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6224                                                  outer_first_mpls_over_udp);
6225                 out_mpls_v =
6226                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6227                                                  outer_first_mpls_over_udp);
6228                 break;
6229         case MLX5_FLOW_LAYER_GRE:
6230                 out_mpls_m =
6231                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
6232                                                  outer_first_mpls_over_gre);
6233                 out_mpls_v =
6234                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
6235                                                  outer_first_mpls_over_gre);
6236                 break;
6237         default:
6238                 /* Inner MPLS not over GRE is not supported. */
6239                 if (!inner) {
6240                         out_mpls_m =
6241                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6242                                                          misc2_m,
6243                                                          outer_first_mpls);
6244                         out_mpls_v =
6245                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
6246                                                          misc2_v,
6247                                                          outer_first_mpls);
6248                 }
6249                 break;
6250         }
6251         if (out_mpls_m && out_mpls_v) {
6252                 *out_mpls_m = *in_mpls_m;
6253                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
6254         }
6255 }
6256
6257 /**
6258  * Add metadata register item to matcher
6259  *
6260  * @param[in, out] matcher
6261  *   Flow matcher.
6262  * @param[in, out] key
6263  *   Flow matcher value.
6264  * @param[in] reg_type
6265  *   Type of device metadata register
6266  * @param[in] value
6267  *   Register value
6268  * @param[in] mask
6269  *   Register mask
6270  */
6271 static void
6272 flow_dv_match_meta_reg(void *matcher, void *key,
6273                        enum modify_reg reg_type,
6274                        uint32_t data, uint32_t mask)
6275 {
6276         void *misc2_m =
6277                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
6278         void *misc2_v =
6279                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
6280         uint32_t temp;
6281
6282         data &= mask;
6283         switch (reg_type) {
6284         case REG_A:
6285                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
6286                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
6287                 break;
6288         case REG_B:
6289                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
6290                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
6291                 break;
6292         case REG_C_0:
6293                 /*
6294                  * The metadata register C0 field might be divided into
6295                  * source vport index and META item value, we should set
6296                  * this field according to specified mask, not as whole one.
6297                  */
6298                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
6299                 temp |= mask;
6300                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
6301                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
6302                 temp &= ~mask;
6303                 temp |= data;
6304                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
6305                 break;
6306         case REG_C_1:
6307                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
6308                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
6309                 break;
6310         case REG_C_2:
6311                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
6312                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
6313                 break;
6314         case REG_C_3:
6315                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
6316                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
6317                 break;
6318         case REG_C_4:
6319                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
6320                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
6321                 break;
6322         case REG_C_5:
6323                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
6324                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
6325                 break;
6326         case REG_C_6:
6327                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
6328                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
6329                 break;
6330         case REG_C_7:
6331                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
6332                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
6333                 break;
6334         default:
6335                 MLX5_ASSERT(false);
6336                 break;
6337         }
6338 }
6339
6340 /**
6341  * Add MARK item to matcher
6342  *
6343  * @param[in] dev
6344  *   The device to configure through.
6345  * @param[in, out] matcher
6346  *   Flow matcher.
6347  * @param[in, out] key
6348  *   Flow matcher value.
6349  * @param[in] item
6350  *   Flow pattern to translate.
6351  */
6352 static void
6353 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
6354                             void *matcher, void *key,
6355                             const struct rte_flow_item *item)
6356 {
6357         struct mlx5_priv *priv = dev->data->dev_private;
6358         const struct rte_flow_item_mark *mark;
6359         uint32_t value;
6360         uint32_t mask;
6361
6362         mark = item->mask ? (const void *)item->mask :
6363                             &rte_flow_item_mark_mask;
6364         mask = mark->id & priv->sh->dv_mark_mask;
6365         mark = (const void *)item->spec;
6366         MLX5_ASSERT(mark);
6367         value = mark->id & priv->sh->dv_mark_mask & mask;
6368         if (mask) {
6369                 enum modify_reg reg;
6370
6371                 /* Get the metadata register index for the mark. */
6372                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
6373                 MLX5_ASSERT(reg > 0);
6374                 if (reg == REG_C_0) {
6375                         struct mlx5_priv *priv = dev->data->dev_private;
6376                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6377                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6378
6379                         mask &= msk_c0;
6380                         mask <<= shl_c0;
6381                         value <<= shl_c0;
6382                 }
6383                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6384         }
6385 }
6386
6387 /**
6388  * Add META item to matcher
6389  *
6390  * @param[in] dev
6391  *   The devich to configure through.
6392  * @param[in, out] matcher
6393  *   Flow matcher.
6394  * @param[in, out] key
6395  *   Flow matcher value.
6396  * @param[in] attr
6397  *   Attributes of flow that includes this item.
6398  * @param[in] item
6399  *   Flow pattern to translate.
6400  */
6401 static void
6402 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
6403                             void *matcher, void *key,
6404                             const struct rte_flow_attr *attr,
6405                             const struct rte_flow_item *item)
6406 {
6407         const struct rte_flow_item_meta *meta_m;
6408         const struct rte_flow_item_meta *meta_v;
6409
6410         meta_m = (const void *)item->mask;
6411         if (!meta_m)
6412                 meta_m = &rte_flow_item_meta_mask;
6413         meta_v = (const void *)item->spec;
6414         if (meta_v) {
6415                 int reg;
6416                 uint32_t value = meta_v->data;
6417                 uint32_t mask = meta_m->data;
6418
6419                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
6420                 if (reg < 0)
6421                         return;
6422                 /*
6423                  * In datapath code there is no endianness
6424                  * coversions for perfromance reasons, all
6425                  * pattern conversions are done in rte_flow.
6426                  */
6427                 value = rte_cpu_to_be_32(value);
6428                 mask = rte_cpu_to_be_32(mask);
6429                 if (reg == REG_C_0) {
6430                         struct mlx5_priv *priv = dev->data->dev_private;
6431                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6432                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6433 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6434                         uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
6435
6436                         value >>= shr_c0;
6437                         mask >>= shr_c0;
6438 #endif
6439                         value <<= shl_c0;
6440                         mask <<= shl_c0;
6441                         MLX5_ASSERT(msk_c0);
6442                         MLX5_ASSERT(!(~msk_c0 & mask));
6443                 }
6444                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6445         }
6446 }
6447
6448 /**
6449  * Add vport metadata Reg C0 item to matcher
6450  *
6451  * @param[in, out] matcher
6452  *   Flow matcher.
6453  * @param[in, out] key
6454  *   Flow matcher value.
6455  * @param[in] reg
6456  *   Flow pattern to translate.
6457  */
6458 static void
6459 flow_dv_translate_item_meta_vport(void *matcher, void *key,
6460                                   uint32_t value, uint32_t mask)
6461 {
6462         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
6463 }
6464
6465 /**
6466  * Add tag item to matcher
6467  *
6468  * @param[in] dev
6469  *   The devich to configure through.
6470  * @param[in, out] matcher
6471  *   Flow matcher.
6472  * @param[in, out] key
6473  *   Flow matcher value.
6474  * @param[in] item
6475  *   Flow pattern to translate.
6476  */
6477 static void
6478 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
6479                                 void *matcher, void *key,
6480                                 const struct rte_flow_item *item)
6481 {
6482         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
6483         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
6484         uint32_t mask, value;
6485
6486         MLX5_ASSERT(tag_v);
6487         value = tag_v->data;
6488         mask = tag_m ? tag_m->data : UINT32_MAX;
6489         if (tag_v->id == REG_C_0) {
6490                 struct mlx5_priv *priv = dev->data->dev_private;
6491                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6492                 uint32_t shl_c0 = rte_bsf32(msk_c0);
6493
6494                 mask &= msk_c0;
6495                 mask <<= shl_c0;
6496                 value <<= shl_c0;
6497         }
6498         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
6499 }
6500
6501 /**
6502  * Add TAG item to matcher
6503  *
6504  * @param[in] dev
6505  *   The devich to configure through.
6506  * @param[in, out] matcher
6507  *   Flow matcher.
6508  * @param[in, out] key
6509  *   Flow matcher value.
6510  * @param[in] item
6511  *   Flow pattern to translate.
6512  */
6513 static void
6514 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
6515                            void *matcher, void *key,
6516                            const struct rte_flow_item *item)
6517 {
6518         const struct rte_flow_item_tag *tag_v = item->spec;
6519         const struct rte_flow_item_tag *tag_m = item->mask;
6520         enum modify_reg reg;
6521
6522         MLX5_ASSERT(tag_v);
6523         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
6524         /* Get the metadata register index for the tag. */
6525         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
6526         MLX5_ASSERT(reg > 0);
6527         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
6528 }
6529
6530 /**
6531  * Add source vport match to the specified matcher.
6532  *
6533  * @param[in, out] matcher
6534  *   Flow matcher.
6535  * @param[in, out] key
6536  *   Flow matcher value.
6537  * @param[in] port
6538  *   Source vport value to match
6539  * @param[in] mask
6540  *   Mask
6541  */
6542 static void
6543 flow_dv_translate_item_source_vport(void *matcher, void *key,
6544                                     int16_t port, uint16_t mask)
6545 {
6546         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6547         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6548
6549         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
6550         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
6551 }
6552
6553 /**
6554  * Translate port-id item to eswitch match on  port-id.
6555  *
6556  * @param[in] dev
6557  *   The devich to configure through.
6558  * @param[in, out] matcher
6559  *   Flow matcher.
6560  * @param[in, out] key
6561  *   Flow matcher value.
6562  * @param[in] item
6563  *   Flow pattern to translate.
6564  *
6565  * @return
6566  *   0 on success, a negative errno value otherwise.
6567  */
6568 static int
6569 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
6570                                void *key, const struct rte_flow_item *item)
6571 {
6572         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
6573         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
6574         struct mlx5_priv *priv;
6575         uint16_t mask, id;
6576
6577         mask = pid_m ? pid_m->id : 0xffff;
6578         id = pid_v ? pid_v->id : dev->data->port_id;
6579         priv = mlx5_port_to_eswitch_info(id, item == NULL);
6580         if (!priv)
6581                 return -rte_errno;
6582         /* Translate to vport field or to metadata, depending on mode. */
6583         if (priv->vport_meta_mask)
6584                 flow_dv_translate_item_meta_vport(matcher, key,
6585                                                   priv->vport_meta_tag,
6586                                                   priv->vport_meta_mask);
6587         else
6588                 flow_dv_translate_item_source_vport(matcher, key,
6589                                                     priv->vport_id, mask);
6590         return 0;
6591 }
6592
6593 /**
6594  * Add ICMP6 item to matcher and to the value.
6595  *
6596  * @param[in, out] matcher
6597  *   Flow matcher.
6598  * @param[in, out] key
6599  *   Flow matcher value.
6600  * @param[in] item
6601  *   Flow pattern to translate.
6602  * @param[in] inner
6603  *   Item is inner pattern.
6604  */
6605 static void
6606 flow_dv_translate_item_icmp6(void *matcher, void *key,
6607                               const struct rte_flow_item *item,
6608                               int inner)
6609 {
6610         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
6611         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
6612         void *headers_m;
6613         void *headers_v;
6614         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6615                                      misc_parameters_3);
6616         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6617         if (inner) {
6618                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6619                                          inner_headers);
6620                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6621         } else {
6622                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6623                                          outer_headers);
6624                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6625         }
6626         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
6627         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
6628         if (!icmp6_v)
6629                 return;
6630         if (!icmp6_m)
6631                 icmp6_m = &rte_flow_item_icmp6_mask;
6632         /*
6633          * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
6634          * If only the protocol is specified, no need to match the frag.
6635          */
6636         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6637         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
6638         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
6639         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
6640                  icmp6_v->type & icmp6_m->type);
6641         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
6642         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
6643                  icmp6_v->code & icmp6_m->code);
6644 }
6645
6646 /**
6647  * Add ICMP item to matcher and to the value.
6648  *
6649  * @param[in, out] matcher
6650  *   Flow matcher.
6651  * @param[in, out] key
6652  *   Flow matcher value.
6653  * @param[in] item
6654  *   Flow pattern to translate.
6655  * @param[in] inner
6656  *   Item is inner pattern.
6657  */
6658 static void
6659 flow_dv_translate_item_icmp(void *matcher, void *key,
6660                             const struct rte_flow_item *item,
6661                             int inner)
6662 {
6663         const struct rte_flow_item_icmp *icmp_m = item->mask;
6664         const struct rte_flow_item_icmp *icmp_v = item->spec;
6665         void *headers_m;
6666         void *headers_v;
6667         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6668                                      misc_parameters_3);
6669         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6670         if (inner) {
6671                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6672                                          inner_headers);
6673                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6674         } else {
6675                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6676                                          outer_headers);
6677                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6678         }
6679         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
6680         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
6681         if (!icmp_v)
6682                 return;
6683         if (!icmp_m)
6684                 icmp_m = &rte_flow_item_icmp_mask;
6685         /*
6686          * Force flow only to match the non-fragmented IPv4 ICMP packets.
6687          * If only the protocol is specified, no need to match the frag.
6688          */
6689         MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
6690         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
6691         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
6692                  icmp_m->hdr.icmp_type);
6693         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
6694                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
6695         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
6696                  icmp_m->hdr.icmp_code);
6697         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
6698                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
6699 }
6700
6701 /**
6702  * Add GTP item to matcher and to the value.
6703  *
6704  * @param[in, out] matcher
6705  *   Flow matcher.
6706  * @param[in, out] key
6707  *   Flow matcher value.
6708  * @param[in] item
6709  *   Flow pattern to translate.
6710  * @param[in] inner
6711  *   Item is inner pattern.
6712  */
6713 static void
6714 flow_dv_translate_item_gtp(void *matcher, void *key,
6715                            const struct rte_flow_item *item, int inner)
6716 {
6717         const struct rte_flow_item_gtp *gtp_m = item->mask;
6718         const struct rte_flow_item_gtp *gtp_v = item->spec;
6719         void *headers_m;
6720         void *headers_v;
6721         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6722                                      misc_parameters_3);
6723         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6724         uint16_t dport = RTE_GTPU_UDP_PORT;
6725
6726         if (inner) {
6727                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6728                                          inner_headers);
6729                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6730         } else {
6731                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6732                                          outer_headers);
6733                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6734         }
6735         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
6736                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
6737                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
6738         }
6739         if (!gtp_v)
6740                 return;
6741         if (!gtp_m)
6742                 gtp_m = &rte_flow_item_gtp_mask;
6743         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
6744         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
6745                  gtp_v->msg_type & gtp_m->msg_type);
6746         MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
6747                  rte_be_to_cpu_32(gtp_m->teid));
6748         MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
6749                  rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
6750 }
6751
6752 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
6753
6754 #define HEADER_IS_ZERO(match_criteria, headers)                              \
6755         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
6756                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
6757
6758 /**
6759  * Calculate flow matcher enable bitmap.
6760  *
6761  * @param match_criteria
6762  *   Pointer to flow matcher criteria.
6763  *
6764  * @return
6765  *   Bitmap of enabled fields.
6766  */
6767 static uint8_t
6768 flow_dv_matcher_enable(uint32_t *match_criteria)
6769 {
6770         uint8_t match_criteria_enable;
6771
6772         match_criteria_enable =
6773                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
6774                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
6775         match_criteria_enable |=
6776                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
6777                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
6778         match_criteria_enable |=
6779                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
6780                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
6781         match_criteria_enable |=
6782                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
6783                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
6784         match_criteria_enable |=
6785                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
6786                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
6787         return match_criteria_enable;
6788 }
6789
6790
6791 /**
6792  * Get a flow table.
6793  *
6794  * @param[in, out] dev
6795  *   Pointer to rte_eth_dev structure.
6796  * @param[in] table_id
6797  *   Table id to use.
6798  * @param[in] egress
6799  *   Direction of the table.
6800  * @param[in] transfer
6801  *   E-Switch or NIC flow.
6802  * @param[out] error
6803  *   pointer to error structure.
6804  *
6805  * @return
6806  *   Returns tables resource based on the index, NULL in case of failed.
6807  */
6808 static struct mlx5_flow_tbl_resource *
6809 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
6810                          uint32_t table_id, uint8_t egress,
6811                          uint8_t transfer,
6812                          struct rte_flow_error *error)
6813 {
6814         struct mlx5_priv *priv = dev->data->dev_private;
6815         struct mlx5_ibv_shared *sh = priv->sh;
6816         struct mlx5_flow_tbl_resource *tbl;
6817         union mlx5_flow_tbl_key table_key = {
6818                 {
6819                         .table_id = table_id,
6820                         .reserved = 0,
6821                         .domain = !!transfer,
6822                         .direction = !!egress,
6823                 }
6824         };
6825         struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
6826                                                          table_key.v64);
6827         struct mlx5_flow_tbl_data_entry *tbl_data;
6828         int ret;
6829         void *domain;
6830
6831         if (pos) {
6832                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
6833                                         entry);
6834                 tbl = &tbl_data->tbl;
6835                 rte_atomic32_inc(&tbl->refcnt);
6836                 return tbl;
6837         }
6838         tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
6839         if (!tbl_data) {
6840                 rte_flow_error_set(error, ENOMEM,
6841                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6842                                    NULL,
6843                                    "cannot allocate flow table data entry");
6844                 return NULL;
6845         }
6846         tbl = &tbl_data->tbl;
6847         pos = &tbl_data->entry;
6848         if (transfer)
6849                 domain = sh->fdb_domain;
6850         else if (egress)
6851                 domain = sh->tx_domain;
6852         else
6853                 domain = sh->rx_domain;
6854         tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
6855         if (!tbl->obj) {
6856                 rte_flow_error_set(error, ENOMEM,
6857                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6858                                    NULL, "cannot create flow table object");
6859                 rte_free(tbl_data);
6860                 return NULL;
6861         }
6862         /*
6863          * No multi-threads now, but still better to initialize the reference
6864          * count before insert it into the hash list.
6865          */
6866         rte_atomic32_init(&tbl->refcnt);
6867         /* Jump action reference count is initialized here. */
6868         rte_atomic32_init(&tbl_data->jump.refcnt);
6869         pos->key = table_key.v64;
6870         ret = mlx5_hlist_insert(sh->flow_tbls, pos);
6871         if (ret < 0) {
6872                 rte_flow_error_set(error, -ret,
6873                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6874                                    "cannot insert flow table data entry");
6875                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
6876                 rte_free(tbl_data);
6877         }
6878         rte_atomic32_inc(&tbl->refcnt);
6879         return tbl;
6880 }
6881
6882 /**
6883  * Release a flow table.
6884  *
6885  * @param[in] dev
6886  *   Pointer to rte_eth_dev structure.
6887  * @param[in] tbl
6888  *   Table resource to be released.
6889  *
6890  * @return
6891  *   Returns 0 if table was released, else return 1;
6892  */
6893 static int
6894 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
6895                              struct mlx5_flow_tbl_resource *tbl)
6896 {
6897         struct mlx5_priv *priv = dev->data->dev_private;
6898         struct mlx5_ibv_shared *sh = priv->sh;
6899         struct mlx5_flow_tbl_data_entry *tbl_data =
6900                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
6901
6902         if (!tbl)
6903                 return 0;
6904         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
6905                 struct mlx5_hlist_entry *pos = &tbl_data->entry;
6906
6907                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
6908                 tbl->obj = NULL;
6909                 /* remove the entry from the hash list and free memory. */
6910                 mlx5_hlist_remove(sh->flow_tbls, pos);
6911                 rte_free(tbl_data);
6912                 return 0;
6913         }
6914         return 1;
6915 }
6916
6917 /**
6918  * Register the flow matcher.
6919  *
6920  * @param[in, out] dev
6921  *   Pointer to rte_eth_dev structure.
6922  * @param[in, out] matcher
6923  *   Pointer to flow matcher.
6924  * @param[in, out] key
6925  *   Pointer to flow table key.
6926  * @parm[in, out] dev_flow
6927  *   Pointer to the dev_flow.
6928  * @param[out] error
6929  *   pointer to error structure.
6930  *
6931  * @return
6932  *   0 on success otherwise -errno and errno is set.
6933  */
6934 static int
6935 flow_dv_matcher_register(struct rte_eth_dev *dev,
6936                          struct mlx5_flow_dv_matcher *matcher,
6937                          union mlx5_flow_tbl_key *key,
6938                          struct mlx5_flow *dev_flow,
6939                          struct rte_flow_error *error)
6940 {
6941         struct mlx5_priv *priv = dev->data->dev_private;
6942         struct mlx5_ibv_shared *sh = priv->sh;
6943         struct mlx5_flow_dv_matcher *cache_matcher;
6944         struct mlx5dv_flow_matcher_attr dv_attr = {
6945                 .type = IBV_FLOW_ATTR_NORMAL,
6946                 .match_mask = (void *)&matcher->mask,
6947         };
6948         struct mlx5_flow_tbl_resource *tbl;
6949         struct mlx5_flow_tbl_data_entry *tbl_data;
6950
6951         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
6952                                        key->domain, error);
6953         if (!tbl)
6954                 return -rte_errno;      /* No need to refill the error info */
6955         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
6956         /* Lookup from cache. */
6957         LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
6958                 if (matcher->crc == cache_matcher->crc &&
6959                     matcher->priority == cache_matcher->priority &&
6960                     !memcmp((const void *)matcher->mask.buf,
6961                             (const void *)cache_matcher->mask.buf,
6962                             cache_matcher->mask.size)) {
6963                         DRV_LOG(DEBUG,
6964                                 "%s group %u priority %hd use %s "
6965                                 "matcher %p: refcnt %d++",
6966                                 key->domain ? "FDB" : "NIC", key->table_id,
6967                                 cache_matcher->priority,
6968                                 key->direction ? "tx" : "rx",
6969                                 (void *)cache_matcher,
6970                                 rte_atomic32_read(&cache_matcher->refcnt));
6971                         rte_atomic32_inc(&cache_matcher->refcnt);
6972                         dev_flow->handle->dvh.matcher = cache_matcher;
6973                         /* old matcher should not make the table ref++. */
6974                         flow_dv_tbl_resource_release(dev, tbl);
6975                         return 0;
6976                 }
6977         }
6978         /* Register new matcher. */
6979         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
6980         if (!cache_matcher) {
6981                 flow_dv_tbl_resource_release(dev, tbl);
6982                 return rte_flow_error_set(error, ENOMEM,
6983                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6984                                           "cannot allocate matcher memory");
6985         }
6986         *cache_matcher = *matcher;
6987         dv_attr.match_criteria_enable =
6988                 flow_dv_matcher_enable(cache_matcher->mask.buf);
6989         dv_attr.priority = matcher->priority;
6990         if (key->direction)
6991                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
6992         cache_matcher->matcher_object =
6993                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
6994         if (!cache_matcher->matcher_object) {
6995                 rte_free(cache_matcher);
6996 #ifdef HAVE_MLX5DV_DR
6997                 flow_dv_tbl_resource_release(dev, tbl);
6998 #endif
6999                 return rte_flow_error_set(error, ENOMEM,
7000                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7001                                           NULL, "cannot create matcher");
7002         }
7003         /* Save the table information */
7004         cache_matcher->tbl = tbl;
7005         rte_atomic32_init(&cache_matcher->refcnt);
7006         /* only matcher ref++, table ref++ already done above in get API. */
7007         rte_atomic32_inc(&cache_matcher->refcnt);
7008         LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
7009         dev_flow->handle->dvh.matcher = cache_matcher;
7010         DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
7011                 key->domain ? "FDB" : "NIC", key->table_id,
7012                 cache_matcher->priority,
7013                 key->direction ? "tx" : "rx", (void *)cache_matcher,
7014                 rte_atomic32_read(&cache_matcher->refcnt));
7015         return 0;
7016 }
7017
7018 /**
7019  * Find existing tag resource or create and register a new one.
7020  *
7021  * @param dev[in, out]
7022  *   Pointer to rte_eth_dev structure.
7023  * @param[in, out] tag_be24
7024  *   Tag value in big endian then R-shift 8.
7025  * @parm[in, out] dev_flow
7026  *   Pointer to the dev_flow.
7027  * @param[out] error
7028  *   pointer to error structure.
7029  *
7030  * @return
7031  *   0 on success otherwise -errno and errno is set.
7032  */
7033 static int
7034 flow_dv_tag_resource_register
7035                         (struct rte_eth_dev *dev,
7036                          uint32_t tag_be24,
7037                          struct mlx5_flow *dev_flow,
7038                          struct rte_flow_error *error)
7039 {
7040         struct mlx5_priv *priv = dev->data->dev_private;
7041         struct mlx5_ibv_shared *sh = priv->sh;
7042         struct mlx5_flow_dv_tag_resource *cache_resource;
7043         struct mlx5_hlist_entry *entry;
7044
7045         /* Lookup a matching resource from cache. */
7046         entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
7047         if (entry) {
7048                 cache_resource = container_of
7049                         (entry, struct mlx5_flow_dv_tag_resource, entry);
7050                 rte_atomic32_inc(&cache_resource->refcnt);
7051                 dev_flow->handle->dvh.tag_resource = cache_resource;
7052                 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
7053                         (void *)cache_resource,
7054                         rte_atomic32_read(&cache_resource->refcnt));
7055                 return 0;
7056         }
7057         /* Register new resource. */
7058         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
7059         if (!cache_resource)
7060                 return rte_flow_error_set(error, ENOMEM,
7061                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7062                                           "cannot allocate resource memory");
7063         cache_resource->entry.key = (uint64_t)tag_be24;
7064         cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
7065         if (!cache_resource->action) {
7066                 rte_free(cache_resource);
7067                 return rte_flow_error_set(error, ENOMEM,
7068                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7069                                           NULL, "cannot create action");
7070         }
7071         rte_atomic32_init(&cache_resource->refcnt);
7072         rte_atomic32_inc(&cache_resource->refcnt);
7073         if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
7074                 mlx5_glue->destroy_flow_action(cache_resource->action);
7075                 rte_free(cache_resource);
7076                 return rte_flow_error_set(error, EEXIST,
7077                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7078                                           NULL, "cannot insert tag");
7079         }
7080         dev_flow->handle->dvh.tag_resource = cache_resource;
7081         DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
7082                 (void *)cache_resource,
7083                 rte_atomic32_read(&cache_resource->refcnt));
7084         return 0;
7085 }
7086
7087 /**
7088  * Release the tag.
7089  *
7090  * @param dev
7091  *   Pointer to Ethernet device.
7092  * @param flow
7093  *   Pointer to mlx5_flow.
7094  *
7095  * @return
7096  *   1 while a reference on it exists, 0 when freed.
7097  */
7098 static int
7099 flow_dv_tag_release(struct rte_eth_dev *dev,
7100                     struct mlx5_flow_dv_tag_resource *tag)
7101 {
7102         struct mlx5_priv *priv = dev->data->dev_private;
7103         struct mlx5_ibv_shared *sh = priv->sh;
7104
7105         MLX5_ASSERT(tag);
7106         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
7107                 dev->data->port_id, (void *)tag,
7108                 rte_atomic32_read(&tag->refcnt));
7109         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
7110                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
7111                 mlx5_hlist_remove(sh->tag_table, &tag->entry);
7112                 DRV_LOG(DEBUG, "port %u tag %p: removed",
7113                         dev->data->port_id, (void *)tag);
7114                 rte_free(tag);
7115                 return 0;
7116         }
7117         return 1;
7118 }
7119
7120 /**
7121  * Translate port ID action to vport.
7122  *
7123  * @param[in] dev
7124  *   Pointer to rte_eth_dev structure.
7125  * @param[in] action
7126  *   Pointer to the port ID action.
7127  * @param[out] dst_port_id
7128  *   The target port ID.
7129  * @param[out] error
7130  *   Pointer to the error structure.
7131  *
7132  * @return
7133  *   0 on success, a negative errno value otherwise and rte_errno is set.
7134  */
7135 static int
7136 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
7137                                  const struct rte_flow_action *action,
7138                                  uint32_t *dst_port_id,
7139                                  struct rte_flow_error *error)
7140 {
7141         uint32_t port;
7142         struct mlx5_priv *priv;
7143         const struct rte_flow_action_port_id *conf =
7144                         (const struct rte_flow_action_port_id *)action->conf;
7145
7146         port = conf->original ? dev->data->port_id : conf->id;
7147         priv = mlx5_port_to_eswitch_info(port, false);
7148         if (!priv)
7149                 return rte_flow_error_set(error, -rte_errno,
7150                                           RTE_FLOW_ERROR_TYPE_ACTION,
7151                                           NULL,
7152                                           "No eswitch info was found for port");
7153 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
7154         /*
7155          * This parameter is transferred to
7156          * mlx5dv_dr_action_create_dest_ib_port().
7157          */
7158         *dst_port_id = priv->ibv_port;
7159 #else
7160         /*
7161          * Legacy mode, no LAG configurations is supported.
7162          * This parameter is transferred to
7163          * mlx5dv_dr_action_create_dest_vport().
7164          */
7165         *dst_port_id = priv->vport_id;
7166 #endif
7167         return 0;
7168 }
7169
7170 /**
7171  * Add Tx queue matcher
7172  *
7173  * @param[in] dev
7174  *   Pointer to the dev struct.
7175  * @param[in, out] matcher
7176  *   Flow matcher.
7177  * @param[in, out] key
7178  *   Flow matcher value.
7179  * @param[in] item
7180  *   Flow pattern to translate.
7181  * @param[in] inner
7182  *   Item is inner pattern.
7183  */
7184 static void
7185 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
7186                                 void *matcher, void *key,
7187                                 const struct rte_flow_item *item)
7188 {
7189         const struct mlx5_rte_flow_item_tx_queue *queue_m;
7190         const struct mlx5_rte_flow_item_tx_queue *queue_v;
7191         void *misc_m =
7192                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
7193         void *misc_v =
7194                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
7195         struct mlx5_txq_ctrl *txq;
7196         uint32_t queue;
7197
7198
7199         queue_m = (const void *)item->mask;
7200         if (!queue_m)
7201                 return;
7202         queue_v = (const void *)item->spec;
7203         if (!queue_v)
7204                 return;
7205         txq = mlx5_txq_get(dev, queue_v->queue);
7206         if (!txq)
7207                 return;
7208         queue = txq->obj->sq->id;
7209         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
7210         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
7211                  queue & queue_m->queue);
7212         mlx5_txq_release(dev, queue_v->queue);
7213 }
7214
7215 /**
7216  * Set the hash fields according to the @p flow information.
7217  *
7218  * @param[in] dev_flow
7219  *   Pointer to the mlx5_flow.
7220  */
7221 static void
7222 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
7223 {
7224         struct rte_flow *flow = dev_flow->flow;
7225         uint64_t items = dev_flow->handle->layers;
7226         int rss_inner = 0;
7227         uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
7228
7229         dev_flow->hash_fields = 0;
7230 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
7231         if (flow->rss.level >= 2) {
7232                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
7233                 rss_inner = 1;
7234         }
7235 #endif
7236         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
7237             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
7238                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
7239                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
7240                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
7241                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
7242                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
7243                         else
7244                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
7245                 }
7246         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
7247                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
7248                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
7249                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
7250                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
7251                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
7252                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
7253                         else
7254                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
7255                 }
7256         }
7257         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
7258             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
7259                 if (rss_types & ETH_RSS_UDP) {
7260                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
7261                                 dev_flow->hash_fields |=
7262                                                 IBV_RX_HASH_SRC_PORT_UDP;
7263                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
7264                                 dev_flow->hash_fields |=
7265                                                 IBV_RX_HASH_DST_PORT_UDP;
7266                         else
7267                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
7268                 }
7269         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
7270                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
7271                 if (rss_types & ETH_RSS_TCP) {
7272                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
7273                                 dev_flow->hash_fields |=
7274                                                 IBV_RX_HASH_SRC_PORT_TCP;
7275                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
7276                                 dev_flow->hash_fields |=
7277                                                 IBV_RX_HASH_DST_PORT_TCP;
7278                         else
7279                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
7280                 }
7281         }
7282 }
7283
7284 /**
7285  * Fill the flow with DV spec, lock free
7286  * (mutex should be acquired by caller).
7287  *
7288  * @param[in] dev
7289  *   Pointer to rte_eth_dev structure.
7290  * @param[in, out] dev_flow
7291  *   Pointer to the sub flow.
7292  * @param[in] attr
7293  *   Pointer to the flow attributes.
7294  * @param[in] items
7295  *   Pointer to the list of items.
7296  * @param[in] actions
7297  *   Pointer to the list of actions.
7298  * @param[out] error
7299  *   Pointer to the error structure.
7300  *
7301  * @return
7302  *   0 on success, a negative errno value otherwise and rte_errno is set.
7303  */
7304 static int
7305 __flow_dv_translate(struct rte_eth_dev *dev,
7306                     struct mlx5_flow *dev_flow,
7307                     const struct rte_flow_attr *attr,
7308                     const struct rte_flow_item items[],
7309                     const struct rte_flow_action actions[],
7310                     struct rte_flow_error *error)
7311 {
7312         struct mlx5_priv *priv = dev->data->dev_private;
7313         struct mlx5_dev_config *dev_conf = &priv->config;
7314         struct rte_flow *flow = dev_flow->flow;
7315         struct mlx5_flow_handle *handle = dev_flow->handle;
7316         uint64_t item_flags = 0;
7317         uint64_t last_item = 0;
7318         uint64_t action_flags = 0;
7319         uint64_t priority = attr->priority;
7320         struct mlx5_flow_dv_matcher matcher = {
7321                 .mask = {
7322                         .size = sizeof(matcher.mask.buf),
7323                 },
7324         };
7325         int actions_n = 0;
7326         bool actions_end = false;
7327         union {
7328                 struct mlx5_flow_dv_modify_hdr_resource res;
7329                 uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
7330                             sizeof(struct mlx5_modification_cmd) *
7331                             (MLX5_MAX_MODIFY_NUM + 1)];
7332         } mhdr_dummy;
7333         struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
7334         union flow_dv_attr flow_attr = { .attr = 0 };
7335         uint32_t tag_be;
7336         union mlx5_flow_tbl_key tbl_key;
7337         uint32_t modify_action_position = UINT32_MAX;
7338         void *match_mask = matcher.mask.buf;
7339         void *match_value = dev_flow->dv.value.buf;
7340         uint8_t next_protocol = 0xff;
7341         struct rte_vlan_hdr vlan = { 0 };
7342         uint32_t table;
7343         int ret = 0;
7344
7345         mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
7346                                            MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
7347         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
7348                                        !!priv->fdb_def_rule, &table, error);
7349         if (ret)
7350                 return ret;
7351         dev_flow->dv.group = table;
7352         if (attr->transfer)
7353                 mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
7354         if (priority == MLX5_FLOW_PRIO_RSVD)
7355                 priority = dev_conf->flow_prio - 1;
7356         /* number of actions must be set to 0 in case of dirty stack. */
7357         mhdr_res->actions_num = 0;
7358         for (; !actions_end ; actions++) {
7359                 const struct rte_flow_action_queue *queue;
7360                 const struct rte_flow_action_rss *rss;
7361                 const struct rte_flow_action *action = actions;
7362                 const struct rte_flow_action_count *count = action->conf;
7363                 const uint8_t *rss_key;
7364                 const struct rte_flow_action_jump *jump_data;
7365                 const struct rte_flow_action_meter *mtr;
7366                 struct mlx5_flow_tbl_resource *tbl;
7367                 uint32_t port_id = 0;
7368                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
7369                 int action_type = actions->type;
7370                 const struct rte_flow_action *found_action = NULL;
7371
7372                 switch (action_type) {
7373                 case RTE_FLOW_ACTION_TYPE_VOID:
7374                         break;
7375                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7376                         if (flow_dv_translate_action_port_id(dev, action,
7377                                                              &port_id, error))
7378                                 return -rte_errno;
7379                         port_id_resource.port_id = port_id;
7380                         if (flow_dv_port_id_action_resource_register
7381                             (dev, &port_id_resource, dev_flow, error))
7382                                 return -rte_errno;
7383                         dev_flow->dv.actions[actions_n++] =
7384                                         handle->dvh.port_id_action->action;
7385                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
7386                         break;
7387                 case RTE_FLOW_ACTION_TYPE_FLAG:
7388                         action_flags |= MLX5_FLOW_ACTION_FLAG;
7389                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7390                                 struct rte_flow_action_mark mark = {
7391                                         .id = MLX5_FLOW_MARK_DEFAULT,
7392                                 };
7393
7394                                 if (flow_dv_convert_action_mark(dev, &mark,
7395                                                                 mhdr_res,
7396                                                                 error))
7397                                         return -rte_errno;
7398                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
7399                                 break;
7400                         }
7401                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
7402                         /*
7403                          * Only one FLAG or MARK is supported per device flow
7404                          * right now. So the pointer to the tag resource must be
7405                          * zero before the register process.
7406                          */
7407                         MLX5_ASSERT(!handle->dvh.tag_resource);
7408                         if (flow_dv_tag_resource_register(dev, tag_be,
7409                                                           dev_flow, error))
7410                                 return -rte_errno;
7411                         dev_flow->dv.actions[actions_n++] =
7412                                         handle->dvh.tag_resource->action;
7413                         break;
7414                 case RTE_FLOW_ACTION_TYPE_MARK:
7415                         action_flags |= MLX5_FLOW_ACTION_MARK;
7416                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
7417                                 const struct rte_flow_action_mark *mark =
7418                                         (const struct rte_flow_action_mark *)
7419                                                 actions->conf;
7420
7421                                 if (flow_dv_convert_action_mark(dev, mark,
7422                                                                 mhdr_res,
7423                                                                 error))
7424                                         return -rte_errno;
7425                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
7426                                 break;
7427                         }
7428                         /* Fall-through */
7429                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7430                         /* Legacy (non-extensive) MARK action. */
7431                         tag_be = mlx5_flow_mark_set
7432                               (((const struct rte_flow_action_mark *)
7433                                (actions->conf))->id);
7434                         MLX5_ASSERT(!handle->dvh.tag_resource);
7435                         if (flow_dv_tag_resource_register(dev, tag_be,
7436                                                           dev_flow, error))
7437                                 return -rte_errno;
7438                         dev_flow->dv.actions[actions_n++] =
7439                                         handle->dvh.tag_resource->action;
7440                         break;
7441                 case RTE_FLOW_ACTION_TYPE_SET_META:
7442                         if (flow_dv_convert_action_set_meta
7443                                 (dev, mhdr_res, attr,
7444                                  (const struct rte_flow_action_set_meta *)
7445                                   actions->conf, error))
7446                                 return -rte_errno;
7447                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7448                         break;
7449                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7450                         if (flow_dv_convert_action_set_tag
7451                                 (dev, mhdr_res,
7452                                  (const struct rte_flow_action_set_tag *)
7453                                   actions->conf, error))
7454                                 return -rte_errno;
7455                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7456                         break;
7457                 case RTE_FLOW_ACTION_TYPE_DROP:
7458                         action_flags |= MLX5_FLOW_ACTION_DROP;
7459                         break;
7460                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7461                         MLX5_ASSERT(flow->rss.queue);
7462                         queue = actions->conf;
7463                         flow->rss.queue_num = 1;
7464                         (*flow->rss.queue)[0] = queue->index;
7465                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7466                         break;
7467                 case RTE_FLOW_ACTION_TYPE_RSS:
7468                         MLX5_ASSERT(flow->rss.queue);
7469                         rss = actions->conf;
7470                         if (flow->rss.queue)
7471                                 memcpy((*flow->rss.queue), rss->queue,
7472                                        rss->queue_num * sizeof(uint16_t));
7473                         flow->rss.queue_num = rss->queue_num;
7474                         /* NULL RSS key indicates default RSS key. */
7475                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
7476                         memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
7477                         /*
7478                          * rss->level and rss.types should be set in advance
7479                          * when expanding items for RSS.
7480                          */
7481                         action_flags |= MLX5_FLOW_ACTION_RSS;
7482                         break;
7483                 case RTE_FLOW_ACTION_TYPE_COUNT:
7484                         if (!dev_conf->devx) {
7485                                 rte_errno = ENOTSUP;
7486                                 goto cnt_err;
7487                         }
7488                         flow->counter = flow_dv_counter_alloc(dev,
7489                                                         count->shared,
7490                                                         count->id,
7491                                                         dev_flow->dv.group);
7492                         if (!flow->counter)
7493                                 goto cnt_err;
7494                         dev_flow->dv.actions[actions_n++] =
7495                                   (flow_dv_counter_get_by_idx(dev,
7496                                   flow->counter, NULL))->action;
7497                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7498                         break;
7499 cnt_err:
7500                         if (rte_errno == ENOTSUP)
7501                                 return rte_flow_error_set
7502                                               (error, ENOTSUP,
7503                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7504                                                NULL,
7505                                                "count action not supported");
7506                         else
7507                                 return rte_flow_error_set
7508                                                 (error, rte_errno,
7509                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7510                                                  action,
7511                                                  "cannot create counter"
7512                                                   " object.");
7513                         break;
7514                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7515                         dev_flow->dv.actions[actions_n++] =
7516                                                 priv->sh->pop_vlan_action;
7517                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7518                         break;
7519                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7520                         if (!(action_flags &
7521                               MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
7522                                 flow_dev_get_vlan_info_from_items(items, &vlan);
7523                         vlan.eth_proto = rte_be_to_cpu_16
7524                              ((((const struct rte_flow_action_of_push_vlan *)
7525                                                    actions->conf)->ethertype));
7526                         found_action = mlx5_flow_find_action
7527                                         (actions + 1,
7528                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
7529                         if (found_action)
7530                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
7531                         found_action = mlx5_flow_find_action
7532                                         (actions + 1,
7533                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
7534                         if (found_action)
7535                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
7536                         if (flow_dv_create_action_push_vlan
7537                                             (dev, attr, &vlan, dev_flow, error))
7538                                 return -rte_errno;
7539                         dev_flow->dv.actions[actions_n++] =
7540                                         handle->dvh.push_vlan_res->action;
7541                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7542                         break;
7543                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7544                         /* of_vlan_push action handled this action */
7545                         MLX5_ASSERT(action_flags &
7546                                     MLX5_FLOW_ACTION_OF_PUSH_VLAN);
7547                         break;
7548                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7549                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7550                                 break;
7551                         flow_dev_get_vlan_info_from_items(items, &vlan);
7552                         mlx5_update_vlan_vid_pcp(actions, &vlan);
7553                         /* If no VLAN push - this is a modify header action */
7554                         if (flow_dv_convert_action_modify_vlan_vid
7555                                                 (mhdr_res, actions, error))
7556                                 return -rte_errno;
7557                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7558                         break;
7559                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7560                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7561                         if (flow_dv_create_action_l2_encap(dev, actions,
7562                                                            dev_flow,
7563                                                            attr->transfer,
7564                                                            error))
7565                                 return -rte_errno;
7566                         dev_flow->dv.actions[actions_n++] =
7567                                         handle->dvh.encap_decap->verbs_action;
7568                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7569                         break;
7570                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7571                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7572                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
7573                                                            attr->transfer,
7574                                                            error))
7575                                 return -rte_errno;
7576                         dev_flow->dv.actions[actions_n++] =
7577                                         handle->dvh.encap_decap->verbs_action;
7578                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7579                         break;
7580                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7581                         /* Handle encap with preceding decap. */
7582                         if (action_flags & MLX5_FLOW_ACTION_DECAP) {
7583                                 if (flow_dv_create_action_raw_encap
7584                                         (dev, actions, dev_flow, attr, error))
7585                                         return -rte_errno;
7586                                 dev_flow->dv.actions[actions_n++] =
7587                                         handle->dvh.encap_decap->verbs_action;
7588                         } else {
7589                                 /* Handle encap without preceding decap. */
7590                                 if (flow_dv_create_action_l2_encap
7591                                     (dev, actions, dev_flow, attr->transfer,
7592                                      error))
7593                                         return -rte_errno;
7594                                 dev_flow->dv.actions[actions_n++] =
7595                                         handle->dvh.encap_decap->verbs_action;
7596                         }
7597                         action_flags |= MLX5_FLOW_ACTION_ENCAP;
7598                         break;
7599                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7600                         while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
7601                                 ;
7602                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7603                                 if (flow_dv_create_action_l2_decap
7604                                     (dev, dev_flow, attr->transfer, error))
7605                                         return -rte_errno;
7606                                 dev_flow->dv.actions[actions_n++] =
7607                                         handle->dvh.encap_decap->verbs_action;
7608                         }
7609                         /* If decap is followed by encap, handle it at encap. */
7610                         action_flags |= MLX5_FLOW_ACTION_DECAP;
7611                         break;
7612                 case RTE_FLOW_ACTION_TYPE_JUMP:
7613                         jump_data = action->conf;
7614                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
7615                                                        jump_data->group,
7616                                                        !!priv->fdb_def_rule,
7617                                                        &table, error);
7618                         if (ret)
7619                                 return ret;
7620                         tbl = flow_dv_tbl_resource_get(dev, table,
7621                                                        attr->egress,
7622                                                        attr->transfer, error);
7623                         if (!tbl)
7624                                 return rte_flow_error_set
7625                                                 (error, errno,
7626                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7627                                                  NULL,
7628                                                  "cannot create jump action.");
7629                         if (flow_dv_jump_tbl_resource_register
7630                             (dev, tbl, dev_flow, error)) {
7631                                 flow_dv_tbl_resource_release(dev, tbl);
7632                                 return rte_flow_error_set
7633                                                 (error, errno,
7634                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7635                                                  NULL,
7636                                                  "cannot create jump action.");
7637                         }
7638                         dev_flow->dv.actions[actions_n++] =
7639                                         handle->dvh.jump->action;
7640                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7641                         break;
7642                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7643                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7644                         if (flow_dv_convert_action_modify_mac
7645                                         (mhdr_res, actions, error))
7646                                 return -rte_errno;
7647                         action_flags |= actions->type ==
7648                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7649                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
7650                                         MLX5_FLOW_ACTION_SET_MAC_DST;
7651                         break;
7652                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7653                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7654                         if (flow_dv_convert_action_modify_ipv4
7655                                         (mhdr_res, actions, error))
7656                                 return -rte_errno;
7657                         action_flags |= actions->type ==
7658                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7659                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
7660                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
7661                         break;
7662                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7663                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7664                         if (flow_dv_convert_action_modify_ipv6
7665                                         (mhdr_res, actions, error))
7666                                 return -rte_errno;
7667                         action_flags |= actions->type ==
7668                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7669                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
7670                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
7671                         break;
7672                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7673                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7674                         if (flow_dv_convert_action_modify_tp
7675                                         (mhdr_res, actions, items,
7676                                          &flow_attr, dev_flow, !!(action_flags &
7677                                          MLX5_FLOW_ACTION_DECAP), error))
7678                                 return -rte_errno;
7679                         action_flags |= actions->type ==
7680                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7681                                         MLX5_FLOW_ACTION_SET_TP_SRC :
7682                                         MLX5_FLOW_ACTION_SET_TP_DST;
7683                         break;
7684                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7685                         if (flow_dv_convert_action_modify_dec_ttl
7686                                         (mhdr_res, items, &flow_attr, dev_flow,
7687                                          !!(action_flags &
7688                                          MLX5_FLOW_ACTION_DECAP), error))
7689                                 return -rte_errno;
7690                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
7691                         break;
7692                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7693                         if (flow_dv_convert_action_modify_ttl
7694                                         (mhdr_res, actions, items, &flow_attr,
7695                                          dev_flow, !!(action_flags &
7696                                          MLX5_FLOW_ACTION_DECAP), error))
7697                                 return -rte_errno;
7698                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
7699                         break;
7700                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7701                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7702                         if (flow_dv_convert_action_modify_tcp_seq
7703                                         (mhdr_res, actions, error))
7704                                 return -rte_errno;
7705                         action_flags |= actions->type ==
7706                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7707                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
7708                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7709                         break;
7710
7711                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7712                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7713                         if (flow_dv_convert_action_modify_tcp_ack
7714                                         (mhdr_res, actions, error))
7715                                 return -rte_errno;
7716                         action_flags |= actions->type ==
7717                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7718                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
7719                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
7720                         break;
7721                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7722                         if (flow_dv_convert_action_set_reg
7723                                         (mhdr_res, actions, error))
7724                                 return -rte_errno;
7725                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7726                         break;
7727                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7728                         if (flow_dv_convert_action_copy_mreg
7729                                         (dev, mhdr_res, actions, error))
7730                                 return -rte_errno;
7731                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7732                         break;
7733                 case RTE_FLOW_ACTION_TYPE_METER:
7734                         mtr = actions->conf;
7735                         if (!flow->meter) {
7736                                 flow->meter = mlx5_flow_meter_attach(priv,
7737                                                         mtr->mtr_id, attr,
7738                                                         error);
7739                                 if (!flow->meter)
7740                                         return rte_flow_error_set(error,
7741                                                 rte_errno,
7742                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7743                                                 NULL,
7744                                                 "meter not found "
7745                                                 "or invalid parameters");
7746                         }
7747                         /* Set the meter action. */
7748                         dev_flow->dv.actions[actions_n++] =
7749                                 flow->meter->mfts->meter_action;
7750                         action_flags |= MLX5_FLOW_ACTION_METER;
7751                         break;
7752                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7753                         if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
7754                                                               actions, error))
7755                                 return -rte_errno;
7756                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7757                         break;
7758                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7759                         if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
7760                                                               actions, error))
7761                                 return -rte_errno;
7762                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7763                         break;
7764                 case RTE_FLOW_ACTION_TYPE_END:
7765                         actions_end = true;
7766                         if (mhdr_res->actions_num) {
7767                                 /* create modify action if needed. */
7768                                 if (flow_dv_modify_hdr_resource_register
7769                                         (dev, mhdr_res, dev_flow, error))
7770                                         return -rte_errno;
7771                                 dev_flow->dv.actions[modify_action_position] =
7772                                         handle->dvh.modify_hdr->verbs_action;
7773                         }
7774                         break;
7775                 default:
7776                         break;
7777                 }
7778                 if (mhdr_res->actions_num &&
7779                     modify_action_position == UINT32_MAX)
7780                         modify_action_position = actions_n++;
7781         }
7782         dev_flow->dv.actions_n = actions_n;
7783         handle->act_flags = action_flags;
7784         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
7785                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
7786                 int item_type = items->type;
7787
7788                 switch (item_type) {
7789                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
7790                         flow_dv_translate_item_port_id(dev, match_mask,
7791                                                        match_value, items);
7792                         last_item = MLX5_FLOW_ITEM_PORT_ID;
7793                         break;
7794                 case RTE_FLOW_ITEM_TYPE_ETH:
7795                         flow_dv_translate_item_eth(match_mask, match_value,
7796                                                    items, tunnel);
7797                         matcher.priority = MLX5_PRIORITY_MAP_L2;
7798                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
7799                                              MLX5_FLOW_LAYER_OUTER_L2;
7800                         break;
7801                 case RTE_FLOW_ITEM_TYPE_VLAN:
7802                         flow_dv_translate_item_vlan(dev_flow,
7803                                                     match_mask, match_value,
7804                                                     items, tunnel);
7805                         matcher.priority = MLX5_PRIORITY_MAP_L2;
7806                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
7807                                               MLX5_FLOW_LAYER_INNER_VLAN) :
7808                                              (MLX5_FLOW_LAYER_OUTER_L2 |
7809                                               MLX5_FLOW_LAYER_OUTER_VLAN);
7810                         break;
7811                 case RTE_FLOW_ITEM_TYPE_IPV4:
7812                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7813                                                   &item_flags, &tunnel);
7814                         flow_dv_translate_item_ipv4(match_mask, match_value,
7815                                                     items, item_flags, tunnel,
7816                                                     dev_flow->dv.group);
7817                         matcher.priority = MLX5_PRIORITY_MAP_L3;
7818                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7819                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7820                         if (items->mask != NULL &&
7821                             ((const struct rte_flow_item_ipv4 *)
7822                              items->mask)->hdr.next_proto_id) {
7823                                 next_protocol =
7824                                         ((const struct rte_flow_item_ipv4 *)
7825                                          (items->spec))->hdr.next_proto_id;
7826                                 next_protocol &=
7827                                         ((const struct rte_flow_item_ipv4 *)
7828                                          (items->mask))->hdr.next_proto_id;
7829                         } else {
7830                                 /* Reset for inner layer. */
7831                                 next_protocol = 0xff;
7832                         }
7833                         break;
7834                 case RTE_FLOW_ITEM_TYPE_IPV6:
7835                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7836                                                   &item_flags, &tunnel);
7837                         flow_dv_translate_item_ipv6(match_mask, match_value,
7838                                                     items, item_flags, tunnel,
7839                                                     dev_flow->dv.group);
7840                         matcher.priority = MLX5_PRIORITY_MAP_L3;
7841                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7842                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7843                         if (items->mask != NULL &&
7844                             ((const struct rte_flow_item_ipv6 *)
7845                              items->mask)->hdr.proto) {
7846                                 next_protocol =
7847                                         ((const struct rte_flow_item_ipv6 *)
7848                                          items->spec)->hdr.proto;
7849                                 next_protocol &=
7850                                         ((const struct rte_flow_item_ipv6 *)
7851                                          items->mask)->hdr.proto;
7852                         } else {
7853                                 /* Reset for inner layer. */
7854                                 next_protocol = 0xff;
7855                         }
7856                         break;
7857                 case RTE_FLOW_ITEM_TYPE_TCP:
7858                         flow_dv_translate_item_tcp(match_mask, match_value,
7859                                                    items, tunnel);
7860                         matcher.priority = MLX5_PRIORITY_MAP_L4;
7861                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7862                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7863                         break;
7864                 case RTE_FLOW_ITEM_TYPE_UDP:
7865                         flow_dv_translate_item_udp(match_mask, match_value,
7866                                                    items, tunnel);
7867                         matcher.priority = MLX5_PRIORITY_MAP_L4;
7868                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7869                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7870                         break;
7871                 case RTE_FLOW_ITEM_TYPE_GRE:
7872                         flow_dv_translate_item_gre(match_mask, match_value,
7873                                                    items, tunnel);
7874                         matcher.priority = flow->rss.level >= 2 ?
7875                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7876                         last_item = MLX5_FLOW_LAYER_GRE;
7877                         break;
7878                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7879                         flow_dv_translate_item_gre_key(match_mask,
7880                                                        match_value, items);
7881                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7882                         break;
7883                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7884                         flow_dv_translate_item_nvgre(match_mask, match_value,
7885                                                      items, tunnel);
7886                         matcher.priority = flow->rss.level >= 2 ?
7887                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7888                         last_item = MLX5_FLOW_LAYER_GRE;
7889                         break;
7890                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7891                         flow_dv_translate_item_vxlan(match_mask, match_value,
7892                                                      items, tunnel);
7893                         matcher.priority = flow->rss.level >= 2 ?
7894                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7895                         last_item = MLX5_FLOW_LAYER_VXLAN;
7896                         break;
7897                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7898                         flow_dv_translate_item_vxlan_gpe(match_mask,
7899                                                          match_value, items,
7900                                                          tunnel);
7901                         matcher.priority = flow->rss.level >= 2 ?
7902                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7903                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7904                         break;
7905                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7906                         flow_dv_translate_item_geneve(match_mask, match_value,
7907                                                       items, tunnel);
7908                         matcher.priority = flow->rss.level >= 2 ?
7909                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7910                         last_item = MLX5_FLOW_LAYER_GENEVE;
7911                         break;
7912                 case RTE_FLOW_ITEM_TYPE_MPLS:
7913                         flow_dv_translate_item_mpls(match_mask, match_value,
7914                                                     items, last_item, tunnel);
7915                         matcher.priority = flow->rss.level >= 2 ?
7916                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7917                         last_item = MLX5_FLOW_LAYER_MPLS;
7918                         break;
7919                 case RTE_FLOW_ITEM_TYPE_MARK:
7920                         flow_dv_translate_item_mark(dev, match_mask,
7921                                                     match_value, items);
7922                         last_item = MLX5_FLOW_ITEM_MARK;
7923                         break;
7924                 case RTE_FLOW_ITEM_TYPE_META:
7925                         flow_dv_translate_item_meta(dev, match_mask,
7926                                                     match_value, attr, items);
7927                         last_item = MLX5_FLOW_ITEM_METADATA;
7928                         break;
7929                 case RTE_FLOW_ITEM_TYPE_ICMP:
7930                         flow_dv_translate_item_icmp(match_mask, match_value,
7931                                                     items, tunnel);
7932                         last_item = MLX5_FLOW_LAYER_ICMP;
7933                         break;
7934                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7935                         flow_dv_translate_item_icmp6(match_mask, match_value,
7936                                                       items, tunnel);
7937                         last_item = MLX5_FLOW_LAYER_ICMP6;
7938                         break;
7939                 case RTE_FLOW_ITEM_TYPE_TAG:
7940                         flow_dv_translate_item_tag(dev, match_mask,
7941                                                    match_value, items);
7942                         last_item = MLX5_FLOW_ITEM_TAG;
7943                         break;
7944                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7945                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
7946                                                         match_value, items);
7947                         last_item = MLX5_FLOW_ITEM_TAG;
7948                         break;
7949                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7950                         flow_dv_translate_item_tx_queue(dev, match_mask,
7951                                                         match_value,
7952                                                         items);
7953                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
7954                         break;
7955                 case RTE_FLOW_ITEM_TYPE_GTP:
7956                         flow_dv_translate_item_gtp(match_mask, match_value,
7957                                                    items, tunnel);
7958                         matcher.priority = flow->rss.level >= 2 ?
7959                                     MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
7960                         last_item = MLX5_FLOW_LAYER_GTP;
7961                         break;
7962                 default:
7963                         break;
7964                 }
7965                 item_flags |= last_item;
7966         }
7967         /*
7968          * When E-Switch mode is enabled, we have two cases where we need to
7969          * set the source port manually.
7970          * The first one, is in case of Nic steering rule, and the second is
7971          * E-Switch rule where no port_id item was found. In both cases
7972          * the source port is set according the current port in use.
7973          */
7974         if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) &&
7975             (priv->representor || priv->master)) {
7976                 if (flow_dv_translate_item_port_id(dev, match_mask,
7977                                                    match_value, NULL))
7978                         return -rte_errno;
7979         }
7980 #ifdef RTE_LIBRTE_MLX5_DEBUG
7981         MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
7982                                               dev_flow->dv.value.buf));
7983 #endif
7984         /*
7985          * Layers may be already initialized from prefix flow if this dev_flow
7986          * is the suffix flow.
7987          */
7988         handle->layers |= item_flags;
7989         if (action_flags & MLX5_FLOW_ACTION_RSS)
7990                 flow_dv_hashfields_set(dev_flow);
7991         /* Register matcher. */
7992         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
7993                                     matcher.mask.size);
7994         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
7995                                                      matcher.priority);
7996         /* reserved field no needs to be set to 0 here. */
7997         tbl_key.domain = attr->transfer;
7998         tbl_key.direction = attr->egress;
7999         tbl_key.table_id = dev_flow->dv.group;
8000         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
8001                 return -rte_errno;
8002         return 0;
8003 }
8004
8005 /**
8006  * Apply the flow to the NIC, lock free,
8007  * (mutex should be acquired by caller).
8008  *
8009  * @param[in] dev
8010  *   Pointer to the Ethernet device structure.
8011  * @param[in, out] flow
8012  *   Pointer to flow structure.
8013  * @param[out] error
8014  *   Pointer to error structure.
8015  *
8016  * @return
8017  *   0 on success, a negative errno value otherwise and rte_errno is set.
8018  */
8019 static int
8020 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
8021                 struct rte_flow_error *error)
8022 {
8023         struct mlx5_flow_dv_workspace *dv;
8024         struct mlx5_flow_handle *dh;
8025         struct mlx5_flow_handle_dv *dv_h;
8026         struct mlx5_flow *dev_flow;
8027         struct mlx5_priv *priv = dev->data->dev_private;
8028         int n;
8029         int err;
8030         int idx;
8031
8032         for (idx = priv->flow_idx - 1; idx >= priv->flow_nested_idx; idx--) {
8033                 dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
8034                 dv = &dev_flow->dv;
8035                 dh = dev_flow->handle;
8036                 dv_h = &dh->dvh;
8037                 n = dv->actions_n;
8038                 if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
8039                         if (dv->transfer) {
8040                                 dv->actions[n++] = priv->sh->esw_drop_action;
8041                         } else {
8042                                 dh->hrxq = mlx5_hrxq_drop_new(dev);
8043                                 if (!dh->hrxq) {
8044                                         rte_flow_error_set
8045                                                 (error, errno,
8046                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8047                                                  NULL,
8048                                                  "cannot get drop hash queue");
8049                                         goto error;
8050                                 }
8051                                 dv->actions[n++] = dh->hrxq->action;
8052                         }
8053                 } else if (dh->act_flags &
8054                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
8055                         struct mlx5_hrxq *hrxq;
8056
8057                         MLX5_ASSERT(flow->rss.queue);
8058                         hrxq = mlx5_hrxq_get(dev, flow->rss.key,
8059                                              MLX5_RSS_HASH_KEY_LEN,
8060                                              dev_flow->hash_fields,
8061                                              (*flow->rss.queue),
8062                                              flow->rss.queue_num);
8063                         if (!hrxq) {
8064                                 hrxq = mlx5_hrxq_new
8065                                         (dev, flow->rss.key,
8066                                          MLX5_RSS_HASH_KEY_LEN,
8067                                          dev_flow->hash_fields,
8068                                          (*flow->rss.queue),
8069                                          flow->rss.queue_num,
8070                                          !!(dh->layers &
8071                                             MLX5_FLOW_LAYER_TUNNEL));
8072                         }
8073                         if (!hrxq) {
8074                                 rte_flow_error_set
8075                                         (error, rte_errno,
8076                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8077                                          "cannot get hash queue");
8078                                 goto error;
8079                         }
8080                         dh->hrxq = hrxq;
8081                         dv->actions[n++] = dh->hrxq->action;
8082                 }
8083                 dh->ib_flow =
8084                         mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
8085                                                   (void *)&dv->value, n,
8086                                                   dv->actions);
8087                 if (!dh->ib_flow) {
8088                         rte_flow_error_set(error, errno,
8089                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8090                                            NULL,
8091                                            "hardware refuses to create flow");
8092                         goto error;
8093                 }
8094                 if (priv->vmwa_context &&
8095                     dh->vf_vlan.tag && !dh->vf_vlan.created) {
8096                         /*
8097                          * The rule contains the VLAN pattern.
8098                          * For VF we are going to create VLAN
8099                          * interface to make hypervisor set correct
8100                          * e-Switch vport context.
8101                          */
8102                         mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
8103                 }
8104         }
8105         return 0;
8106 error:
8107         err = rte_errno; /* Save rte_errno before cleanup. */
8108         LIST_FOREACH(dh, &flow->dev_handles, next) {
8109                 if (dh->hrxq) {
8110                         if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
8111                                 mlx5_hrxq_drop_release(dev);
8112                         else
8113                                 mlx5_hrxq_release(dev, dh->hrxq);
8114                         dh->hrxq = NULL;
8115                 }
8116                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
8117                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
8118         }
8119         rte_errno = err; /* Restore rte_errno. */
8120         return -rte_errno;
8121 }
8122
8123 /**
8124  * Release the flow matcher.
8125  *
8126  * @param dev
8127  *   Pointer to Ethernet device.
8128  * @param handle
8129  *   Pointer to mlx5_flow_handle.
8130  *
8131  * @return
8132  *   1 while a reference on it exists, 0 when freed.
8133  */
8134 static int
8135 flow_dv_matcher_release(struct rte_eth_dev *dev,
8136                         struct mlx5_flow_handle *handle)
8137 {
8138         struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
8139
8140         MLX5_ASSERT(matcher->matcher_object);
8141         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
8142                 dev->data->port_id, (void *)matcher,
8143                 rte_atomic32_read(&matcher->refcnt));
8144         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
8145                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8146                            (matcher->matcher_object));
8147                 LIST_REMOVE(matcher, next);
8148                 /* table ref-- in release interface. */
8149                 flow_dv_tbl_resource_release(dev, matcher->tbl);
8150                 rte_free(matcher);
8151                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
8152                         dev->data->port_id, (void *)matcher);
8153                 return 0;
8154         }
8155         return 1;
8156 }
8157
8158 /**
8159  * Release an encap/decap resource.
8160  *
8161  * @param handle
8162  *   Pointer to mlx5_flow_handle.
8163  *
8164  * @return
8165  *   1 while a reference on it exists, 0 when freed.
8166  */
8167 static int
8168 flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle)
8169 {
8170         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
8171                                                 handle->dvh.encap_decap;
8172
8173         MLX5_ASSERT(cache_resource->verbs_action);
8174         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
8175                 (void *)cache_resource,
8176                 rte_atomic32_read(&cache_resource->refcnt));
8177         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8178                 claim_zero(mlx5_glue->destroy_flow_action
8179                                 (cache_resource->verbs_action));
8180                 LIST_REMOVE(cache_resource, next);
8181                 rte_free(cache_resource);
8182                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
8183                         (void *)cache_resource);
8184                 return 0;
8185         }
8186         return 1;
8187 }
8188
8189 /**
8190  * Release an jump to table action resource.
8191  *
8192  * @param dev
8193  *   Pointer to Ethernet device.
8194  * @param handle
8195  *   Pointer to mlx5_flow_handle.
8196  *
8197  * @return
8198  *   1 while a reference on it exists, 0 when freed.
8199  */
8200 static int
8201 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
8202                                   struct mlx5_flow_handle *handle)
8203 {
8204         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
8205                                                         handle->dvh.jump;
8206         struct mlx5_flow_tbl_data_entry *tbl_data =
8207                         container_of(cache_resource,
8208                                      struct mlx5_flow_tbl_data_entry, jump);
8209
8210         MLX5_ASSERT(cache_resource->action);
8211         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
8212                 (void *)cache_resource,
8213                 rte_atomic32_read(&cache_resource->refcnt));
8214         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8215                 claim_zero(mlx5_glue->destroy_flow_action
8216                                 (cache_resource->action));
8217                 /* jump action memory free is inside the table release. */
8218                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
8219                 DRV_LOG(DEBUG, "jump table resource %p: removed",
8220                         (void *)cache_resource);
8221                 return 0;
8222         }
8223         return 1;
8224 }
8225
8226 /**
8227  * Release a modify-header resource.
8228  *
8229  * @param handle
8230  *   Pointer to mlx5_flow_handle.
8231  *
8232  * @return
8233  *   1 while a reference on it exists, 0 when freed.
8234  */
8235 static int
8236 flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
8237 {
8238         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
8239                                                         handle->dvh.modify_hdr;
8240
8241         MLX5_ASSERT(cache_resource->verbs_action);
8242         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
8243                 (void *)cache_resource,
8244                 rte_atomic32_read(&cache_resource->refcnt));
8245         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8246                 claim_zero(mlx5_glue->destroy_flow_action
8247                                 (cache_resource->verbs_action));
8248                 LIST_REMOVE(cache_resource, next);
8249                 rte_free(cache_resource);
8250                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
8251                         (void *)cache_resource);
8252                 return 0;
8253         }
8254         return 1;
8255 }
8256
8257 /**
8258  * Release port ID action resource.
8259  *
8260  * @param handle
8261  *   Pointer to mlx5_flow_handle.
8262  *
8263  * @return
8264  *   1 while a reference on it exists, 0 when freed.
8265  */
8266 static int
8267 flow_dv_port_id_action_resource_release(struct mlx5_flow_handle *handle)
8268 {
8269         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
8270                                                 handle->dvh.port_id_action;
8271
8272         MLX5_ASSERT(cache_resource->action);
8273         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
8274                 (void *)cache_resource,
8275                 rte_atomic32_read(&cache_resource->refcnt));
8276         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8277                 claim_zero(mlx5_glue->destroy_flow_action
8278                                 (cache_resource->action));
8279                 LIST_REMOVE(cache_resource, next);
8280                 rte_free(cache_resource);
8281                 DRV_LOG(DEBUG, "port id action resource %p: removed",
8282                         (void *)cache_resource);
8283                 return 0;
8284         }
8285         return 1;
8286 }
8287
8288 /**
8289  * Release push vlan action resource.
8290  *
8291  * @param handle
8292  *   Pointer to mlx5_flow_handle.
8293  *
8294  * @return
8295  *   1 while a reference on it exists, 0 when freed.
8296  */
8297 static int
8298 flow_dv_push_vlan_action_resource_release(struct mlx5_flow_handle *handle)
8299 {
8300         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
8301                                                 handle->dvh.push_vlan_res;
8302
8303         MLX5_ASSERT(cache_resource->action);
8304         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
8305                 (void *)cache_resource,
8306                 rte_atomic32_read(&cache_resource->refcnt));
8307         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
8308                 claim_zero(mlx5_glue->destroy_flow_action
8309                                 (cache_resource->action));
8310                 LIST_REMOVE(cache_resource, next);
8311                 rte_free(cache_resource);
8312                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
8313                         (void *)cache_resource);
8314                 return 0;
8315         }
8316         return 1;
8317 }
8318
8319 /**
8320  * Remove the flow from the NIC but keeps it in memory.
8321  * Lock free, (mutex should be acquired by caller).
8322  *
8323  * @param[in] dev
8324  *   Pointer to Ethernet device.
8325  * @param[in, out] flow
8326  *   Pointer to flow structure.
8327  */
8328 static void
8329 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
8330 {
8331         struct mlx5_flow_handle *dh;
8332
8333         if (!flow)
8334                 return;
8335         LIST_FOREACH(dh, &flow->dev_handles, next) {
8336                 if (dh->ib_flow) {
8337                         claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
8338                         dh->ib_flow = NULL;
8339                 }
8340                 if (dh->hrxq) {
8341                         if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
8342                                 mlx5_hrxq_drop_release(dev);
8343                         else
8344                                 mlx5_hrxq_release(dev, dh->hrxq);
8345                         dh->hrxq = NULL;
8346                 }
8347                 if (dh->vf_vlan.tag && dh->vf_vlan.created)
8348                         mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
8349         }
8350 }
8351
8352 /**
8353  * Remove the flow from the NIC and the memory.
8354  * Lock free, (mutex should be acquired by caller).
8355  *
8356  * @param[in] dev
8357  *   Pointer to the Ethernet device structure.
8358  * @param[in, out] flow
8359  *   Pointer to flow structure.
8360  */
8361 static void
8362 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
8363 {
8364         struct mlx5_flow_handle *dev_handle;
8365
8366         if (!flow)
8367                 return;
8368         __flow_dv_remove(dev, flow);
8369         if (flow->counter) {
8370                 flow_dv_counter_release(dev, flow->counter);
8371                 flow->counter = 0;
8372         }
8373         if (flow->meter) {
8374                 mlx5_flow_meter_detach(flow->meter);
8375                 flow->meter = NULL;
8376         }
8377         while (!LIST_EMPTY(&flow->dev_handles)) {
8378                 dev_handle = LIST_FIRST(&flow->dev_handles);
8379                 LIST_REMOVE(dev_handle, next);
8380                 if (dev_handle->dvh.matcher)
8381                         flow_dv_matcher_release(dev, dev_handle);
8382                 if (dev_handle->dvh.encap_decap)
8383                         flow_dv_encap_decap_resource_release(dev_handle);
8384                 if (dev_handle->dvh.modify_hdr)
8385                         flow_dv_modify_hdr_resource_release(dev_handle);
8386                 if (dev_handle->dvh.jump)
8387                         flow_dv_jump_tbl_resource_release(dev, dev_handle);
8388                 if (dev_handle->dvh.port_id_action)
8389                         flow_dv_port_id_action_resource_release(dev_handle);
8390                 if (dev_handle->dvh.push_vlan_res)
8391                         flow_dv_push_vlan_action_resource_release(dev_handle);
8392                 if (dev_handle->dvh.tag_resource)
8393                         flow_dv_tag_release(dev,
8394                                             dev_handle->dvh.tag_resource);
8395                 rte_free(dev_handle);
8396         }
8397 }
8398
8399 /**
8400  * Query a dv flow  rule for its statistics via devx.
8401  *
8402  * @param[in] dev
8403  *   Pointer to Ethernet device.
8404  * @param[in] flow
8405  *   Pointer to the sub flow.
8406  * @param[out] data
8407  *   data retrieved by the query.
8408  * @param[out] error
8409  *   Perform verbose error reporting if not NULL.
8410  *
8411  * @return
8412  *   0 on success, a negative errno value otherwise and rte_errno is set.
8413  */
8414 static int
8415 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
8416                     void *data, struct rte_flow_error *error)
8417 {
8418         struct mlx5_priv *priv = dev->data->dev_private;
8419         struct rte_flow_query_count *qc = data;
8420
8421         if (!priv->config.devx)
8422                 return rte_flow_error_set(error, ENOTSUP,
8423                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8424                                           NULL,
8425                                           "counters are not supported");
8426         if (flow->counter) {
8427                 uint64_t pkts, bytes;
8428                 struct mlx5_flow_counter *cnt;
8429
8430                 cnt = flow_dv_counter_get_by_idx(dev, flow->counter,
8431                                                  NULL);
8432                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
8433                                                &bytes);
8434
8435                 if (err)
8436                         return rte_flow_error_set(error, -err,
8437                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8438                                         NULL, "cannot read counters");
8439                 qc->hits_set = 1;
8440                 qc->bytes_set = 1;
8441                 qc->hits = pkts - cnt->hits;
8442                 qc->bytes = bytes - cnt->bytes;
8443                 if (qc->reset) {
8444                         cnt->hits = pkts;
8445                         cnt->bytes = bytes;
8446                 }
8447                 return 0;
8448         }
8449         return rte_flow_error_set(error, EINVAL,
8450                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
8451                                   NULL,
8452                                   "counters are not available");
8453 }
8454
8455 /**
8456  * Query a flow.
8457  *
8458  * @see rte_flow_query()
8459  * @see rte_flow_ops
8460  */
8461 static int
8462 flow_dv_query(struct rte_eth_dev *dev,
8463               struct rte_flow *flow __rte_unused,
8464               const struct rte_flow_action *actions __rte_unused,
8465               void *data __rte_unused,
8466               struct rte_flow_error *error __rte_unused)
8467 {
8468         int ret = -EINVAL;
8469
8470         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
8471                 switch (actions->type) {
8472                 case RTE_FLOW_ACTION_TYPE_VOID:
8473                         break;
8474                 case RTE_FLOW_ACTION_TYPE_COUNT:
8475                         ret = flow_dv_query_count(dev, flow, data, error);
8476                         break;
8477                 default:
8478                         return rte_flow_error_set(error, ENOTSUP,
8479                                                   RTE_FLOW_ERROR_TYPE_ACTION,
8480                                                   actions,
8481                                                   "action not supported");
8482                 }
8483         }
8484         return ret;
8485 }
8486
8487 /**
8488  * Destroy the meter table set.
8489  * Lock free, (mutex should be acquired by caller).
8490  *
8491  * @param[in] dev
8492  *   Pointer to Ethernet device.
8493  * @param[in] tbl
8494  *   Pointer to the meter table set.
8495  *
8496  * @return
8497  *   Always 0.
8498  */
8499 static int
8500 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
8501                         struct mlx5_meter_domains_infos *tbl)
8502 {
8503         struct mlx5_priv *priv = dev->data->dev_private;
8504         struct mlx5_meter_domains_infos *mtd =
8505                                 (struct mlx5_meter_domains_infos *)tbl;
8506
8507         if (!mtd || !priv->config.dv_flow_en)
8508                 return 0;
8509         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
8510                 claim_zero(mlx5_glue->dv_destroy_flow
8511                           (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
8512         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
8513                 claim_zero(mlx5_glue->dv_destroy_flow
8514                           (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
8515         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
8516                 claim_zero(mlx5_glue->dv_destroy_flow
8517                           (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
8518         if (mtd->egress.color_matcher)
8519                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8520                           (mtd->egress.color_matcher));
8521         if (mtd->egress.any_matcher)
8522                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8523                           (mtd->egress.any_matcher));
8524         if (mtd->egress.tbl)
8525                 claim_zero(flow_dv_tbl_resource_release(dev,
8526                                                         mtd->egress.tbl));
8527         if (mtd->egress.sfx_tbl)
8528                 claim_zero(flow_dv_tbl_resource_release(dev,
8529                                                         mtd->egress.sfx_tbl));
8530         if (mtd->ingress.color_matcher)
8531                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8532                           (mtd->ingress.color_matcher));
8533         if (mtd->ingress.any_matcher)
8534                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8535                           (mtd->ingress.any_matcher));
8536         if (mtd->ingress.tbl)
8537                 claim_zero(flow_dv_tbl_resource_release(dev,
8538                                                         mtd->ingress.tbl));
8539         if (mtd->ingress.sfx_tbl)
8540                 claim_zero(flow_dv_tbl_resource_release(dev,
8541                                                         mtd->ingress.sfx_tbl));
8542         if (mtd->transfer.color_matcher)
8543                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8544                           (mtd->transfer.color_matcher));
8545         if (mtd->transfer.any_matcher)
8546                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8547                           (mtd->transfer.any_matcher));
8548         if (mtd->transfer.tbl)
8549                 claim_zero(flow_dv_tbl_resource_release(dev,
8550                                                         mtd->transfer.tbl));
8551         if (mtd->transfer.sfx_tbl)
8552                 claim_zero(flow_dv_tbl_resource_release(dev,
8553                                                         mtd->transfer.sfx_tbl));
8554         if (mtd->drop_actn)
8555                 claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
8556         rte_free(mtd);
8557         return 0;
8558 }
8559
8560 /* Number of meter flow actions, count and jump or count and drop. */
8561 #define METER_ACTIONS 2
8562
8563 /**
8564  * Create specify domain meter table and suffix table.
8565  *
8566  * @param[in] dev
8567  *   Pointer to Ethernet device.
8568  * @param[in,out] mtb
8569  *   Pointer to DV meter table set.
8570  * @param[in] egress
8571  *   Table attribute.
8572  * @param[in] transfer
8573  *   Table attribute.
8574  * @param[in] color_reg_c_idx
8575  *   Reg C index for color match.
8576  *
8577  * @return
8578  *   0 on success, -1 otherwise and rte_errno is set.
8579  */
8580 static int
8581 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
8582                            struct mlx5_meter_domains_infos *mtb,
8583                            uint8_t egress, uint8_t transfer,
8584                            uint32_t color_reg_c_idx)
8585 {
8586         struct mlx5_priv *priv = dev->data->dev_private;
8587         struct mlx5_ibv_shared *sh = priv->sh;
8588         struct mlx5_flow_dv_match_params mask = {
8589                 .size = sizeof(mask.buf),
8590         };
8591         struct mlx5_flow_dv_match_params value = {
8592                 .size = sizeof(value.buf),
8593         };
8594         struct mlx5dv_flow_matcher_attr dv_attr = {
8595                 .type = IBV_FLOW_ATTR_NORMAL,
8596                 .priority = 0,
8597                 .match_criteria_enable = 0,
8598                 .match_mask = (void *)&mask,
8599         };
8600         void *actions[METER_ACTIONS];
8601         struct mlx5_meter_domain_info *dtb;
8602         struct rte_flow_error error;
8603         int i = 0;
8604
8605         if (transfer)
8606                 dtb = &mtb->transfer;
8607         else if (egress)
8608                 dtb = &mtb->egress;
8609         else
8610                 dtb = &mtb->ingress;
8611         /* Create the meter table with METER level. */
8612         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
8613                                             egress, transfer, &error);
8614         if (!dtb->tbl) {
8615                 DRV_LOG(ERR, "Failed to create meter policer table.");
8616                 return -1;
8617         }
8618         /* Create the meter suffix table with SUFFIX level. */
8619         dtb->sfx_tbl = flow_dv_tbl_resource_get(dev,
8620                                             MLX5_FLOW_TABLE_LEVEL_SUFFIX,
8621                                             egress, transfer, &error);
8622         if (!dtb->sfx_tbl) {
8623                 DRV_LOG(ERR, "Failed to create meter suffix table.");
8624                 return -1;
8625         }
8626         /* Create matchers, Any and Color. */
8627         dv_attr.priority = 3;
8628         dv_attr.match_criteria_enable = 0;
8629         dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
8630                                                              &dv_attr,
8631                                                              dtb->tbl->obj);
8632         if (!dtb->any_matcher) {
8633                 DRV_LOG(ERR, "Failed to create meter"
8634                              " policer default matcher.");
8635                 goto error_exit;
8636         }
8637         dv_attr.priority = 0;
8638         dv_attr.match_criteria_enable =
8639                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
8640         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
8641                                rte_col_2_mlx5_col(RTE_COLORS), UINT8_MAX);
8642         dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
8643                                                                &dv_attr,
8644                                                                dtb->tbl->obj);
8645         if (!dtb->color_matcher) {
8646                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
8647                 goto error_exit;
8648         }
8649         if (mtb->count_actns[RTE_MTR_DROPPED])
8650                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
8651         actions[i++] = mtb->drop_actn;
8652         /* Default rule: lowest priority, match any, actions: drop. */
8653         dtb->policer_rules[RTE_MTR_DROPPED] =
8654                         mlx5_glue->dv_create_flow(dtb->any_matcher,
8655                                                  (void *)&value, i, actions);
8656         if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
8657                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
8658                 goto error_exit;
8659         }
8660         return 0;
8661 error_exit:
8662         return -1;
8663 }
8664
8665 /**
8666  * Create the needed meter and suffix tables.
8667  * Lock free, (mutex should be acquired by caller).
8668  *
8669  * @param[in] dev
8670  *   Pointer to Ethernet device.
8671  * @param[in] fm
8672  *   Pointer to the flow meter.
8673  *
8674  * @return
8675  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
8676  */
8677 static struct mlx5_meter_domains_infos *
8678 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
8679                        const struct mlx5_flow_meter *fm)
8680 {
8681         struct mlx5_priv *priv = dev->data->dev_private;
8682         struct mlx5_meter_domains_infos *mtb;
8683         int ret;
8684         int i;
8685
8686         if (!priv->mtr_en) {
8687                 rte_errno = ENOTSUP;
8688                 return NULL;
8689         }
8690         mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
8691         if (!mtb) {
8692                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
8693                 return NULL;
8694         }
8695         /* Create meter count actions */
8696         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
8697                 struct mlx5_flow_counter *cnt;
8698                 if (!fm->policer_stats.cnt[i])
8699                         continue;
8700                 cnt = flow_dv_counter_get_by_idx(dev,
8701                       fm->policer_stats.cnt[i], NULL);
8702                 mtb->count_actns[i] = cnt->action;
8703         }
8704         /* Create drop action. */
8705         mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
8706         if (!mtb->drop_actn) {
8707                 DRV_LOG(ERR, "Failed to create drop action.");
8708                 goto error_exit;
8709         }
8710         /* Egress meter table. */
8711         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
8712         if (ret) {
8713                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
8714                 goto error_exit;
8715         }
8716         /* Ingress meter table. */
8717         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
8718         if (ret) {
8719                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
8720                 goto error_exit;
8721         }
8722         /* FDB meter table. */
8723         if (priv->config.dv_esw_en) {
8724                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
8725                                                  priv->mtr_color_reg);
8726                 if (ret) {
8727                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
8728                         goto error_exit;
8729                 }
8730         }
8731         return mtb;
8732 error_exit:
8733         flow_dv_destroy_mtr_tbl(dev, mtb);
8734         return NULL;
8735 }
8736
8737 /**
8738  * Destroy domain policer rule.
8739  *
8740  * @param[in] dt
8741  *   Pointer to domain table.
8742  */
8743 static void
8744 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
8745 {
8746         int i;
8747
8748         for (i = 0; i < RTE_MTR_DROPPED; i++) {
8749                 if (dt->policer_rules[i]) {
8750                         claim_zero(mlx5_glue->dv_destroy_flow
8751                                   (dt->policer_rules[i]));
8752                         dt->policer_rules[i] = NULL;
8753                 }
8754         }
8755         if (dt->jump_actn) {
8756                 claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
8757                 dt->jump_actn = NULL;
8758         }
8759 }
8760
8761 /**
8762  * Destroy policer rules.
8763  *
8764  * @param[in] dev
8765  *   Pointer to Ethernet device.
8766  * @param[in] fm
8767  *   Pointer to flow meter structure.
8768  * @param[in] attr
8769  *   Pointer to flow attributes.
8770  *
8771  * @return
8772  *   Always 0.
8773  */
8774 static int
8775 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
8776                               const struct mlx5_flow_meter *fm,
8777                               const struct rte_flow_attr *attr)
8778 {
8779         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
8780
8781         if (!mtb)
8782                 return 0;
8783         if (attr->egress)
8784                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
8785         if (attr->ingress)
8786                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
8787         if (attr->transfer)
8788                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
8789         return 0;
8790 }
8791
8792 /**
8793  * Create specify domain meter policer rule.
8794  *
8795  * @param[in] fm
8796  *   Pointer to flow meter structure.
8797  * @param[in] mtb
8798  *   Pointer to DV meter table set.
8799  * @param[in] mtr_reg_c
8800  *   Color match REG_C.
8801  *
8802  * @return
8803  *   0 on success, -1 otherwise.
8804  */
8805 static int
8806 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
8807                                     struct mlx5_meter_domain_info *dtb,
8808                                     uint8_t mtr_reg_c)
8809 {
8810         struct mlx5_flow_dv_match_params matcher = {
8811                 .size = sizeof(matcher.buf),
8812         };
8813         struct mlx5_flow_dv_match_params value = {
8814                 .size = sizeof(value.buf),
8815         };
8816         struct mlx5_meter_domains_infos *mtb = fm->mfts;
8817         void *actions[METER_ACTIONS];
8818         int i;
8819
8820         /* Create jump action. */
8821         if (!dtb->jump_actn)
8822                 dtb->jump_actn =
8823                         mlx5_glue->dr_create_flow_action_dest_flow_tbl
8824                                                         (dtb->sfx_tbl->obj);
8825         if (!dtb->jump_actn) {
8826                 DRV_LOG(ERR, "Failed to create policer jump action.");
8827                 goto error;
8828         }
8829         for (i = 0; i < RTE_MTR_DROPPED; i++) {
8830                 int j = 0;
8831
8832                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
8833                                        rte_col_2_mlx5_col(i), UINT8_MAX);
8834                 if (mtb->count_actns[i])
8835                         actions[j++] = mtb->count_actns[i];
8836                 if (fm->params.action[i] == MTR_POLICER_ACTION_DROP)
8837                         actions[j++] = mtb->drop_actn;
8838                 else
8839                         actions[j++] = dtb->jump_actn;
8840                 dtb->policer_rules[i] =
8841                         mlx5_glue->dv_create_flow(dtb->color_matcher,
8842                                                  (void *)&value,
8843                                                   j, actions);
8844                 if (!dtb->policer_rules[i]) {
8845                         DRV_LOG(ERR, "Failed to create policer rule.");
8846                         goto error;
8847                 }
8848         }
8849         return 0;
8850 error:
8851         rte_errno = errno;
8852         return -1;
8853 }
8854
8855 /**
8856  * Create policer rules.
8857  *
8858  * @param[in] dev
8859  *   Pointer to Ethernet device.
8860  * @param[in] fm
8861  *   Pointer to flow meter structure.
8862  * @param[in] attr
8863  *   Pointer to flow attributes.
8864  *
8865  * @return
8866  *   0 on success, -1 otherwise.
8867  */
8868 static int
8869 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
8870                              struct mlx5_flow_meter *fm,
8871                              const struct rte_flow_attr *attr)
8872 {
8873         struct mlx5_priv *priv = dev->data->dev_private;
8874         struct mlx5_meter_domains_infos *mtb = fm->mfts;
8875         int ret;
8876
8877         if (attr->egress) {
8878                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
8879                                                 priv->mtr_color_reg);
8880                 if (ret) {
8881                         DRV_LOG(ERR, "Failed to create egress policer.");
8882                         goto error;
8883                 }
8884         }
8885         if (attr->ingress) {
8886                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
8887                                                 priv->mtr_color_reg);
8888                 if (ret) {
8889                         DRV_LOG(ERR, "Failed to create ingress policer.");
8890                         goto error;
8891                 }
8892         }
8893         if (attr->transfer) {
8894                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
8895                                                 priv->mtr_color_reg);
8896                 if (ret) {
8897                         DRV_LOG(ERR, "Failed to create transfer policer.");
8898                         goto error;
8899                 }
8900         }
8901         return 0;
8902 error:
8903         flow_dv_destroy_policer_rules(dev, fm, attr);
8904         return -1;
8905 }
8906
8907 /**
8908  * Query a devx counter.
8909  *
8910  * @param[in] dev
8911  *   Pointer to the Ethernet device structure.
8912  * @param[in] cnt
8913  *   Index to the flow counter.
8914  * @param[in] clear
8915  *   Set to clear the counter statistics.
8916  * @param[out] pkts
8917  *   The statistics value of packets.
8918  * @param[out] bytes
8919  *   The statistics value of bytes.
8920  *
8921  * @return
8922  *   0 on success, otherwise return -1.
8923  */
8924 static int
8925 flow_dv_counter_query(struct rte_eth_dev *dev, uint32_t counter, bool clear,
8926                       uint64_t *pkts, uint64_t *bytes)
8927 {
8928         struct mlx5_priv *priv = dev->data->dev_private;
8929         struct mlx5_flow_counter *cnt;
8930         uint64_t inn_pkts, inn_bytes;
8931         int ret;
8932
8933         if (!priv->config.devx)
8934                 return -1;
8935
8936         ret = _flow_dv_query_count(dev, counter, &inn_pkts, &inn_bytes);
8937         if (ret)
8938                 return -1;
8939         cnt = flow_dv_counter_get_by_idx(dev, counter, NULL);
8940         *pkts = inn_pkts - cnt->hits;
8941         *bytes = inn_bytes - cnt->bytes;
8942         if (clear) {
8943                 cnt->hits = inn_pkts;
8944                 cnt->bytes = inn_bytes;
8945         }
8946         return 0;
8947 }
8948
8949 /*
8950  * Mutex-protected thunk to lock-free  __flow_dv_translate().
8951  */
8952 static int
8953 flow_dv_translate(struct rte_eth_dev *dev,
8954                   struct mlx5_flow *dev_flow,
8955                   const struct rte_flow_attr *attr,
8956                   const struct rte_flow_item items[],
8957                   const struct rte_flow_action actions[],
8958                   struct rte_flow_error *error)
8959 {
8960         int ret;
8961
8962         flow_dv_shared_lock(dev);
8963         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
8964         flow_dv_shared_unlock(dev);
8965         return ret;
8966 }
8967
8968 /*
8969  * Mutex-protected thunk to lock-free  __flow_dv_apply().
8970  */
8971 static int
8972 flow_dv_apply(struct rte_eth_dev *dev,
8973               struct rte_flow *flow,
8974               struct rte_flow_error *error)
8975 {
8976         int ret;
8977
8978         flow_dv_shared_lock(dev);
8979         ret = __flow_dv_apply(dev, flow, error);
8980         flow_dv_shared_unlock(dev);
8981         return ret;
8982 }
8983
8984 /*
8985  * Mutex-protected thunk to lock-free __flow_dv_remove().
8986  */
8987 static void
8988 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
8989 {
8990         flow_dv_shared_lock(dev);
8991         __flow_dv_remove(dev, flow);
8992         flow_dv_shared_unlock(dev);
8993 }
8994
8995 /*
8996  * Mutex-protected thunk to lock-free __flow_dv_destroy().
8997  */
8998 static void
8999 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
9000 {
9001         flow_dv_shared_lock(dev);
9002         __flow_dv_destroy(dev, flow);
9003         flow_dv_shared_unlock(dev);
9004 }
9005
9006 /*
9007  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
9008  */
9009 static uint32_t
9010 flow_dv_counter_allocate(struct rte_eth_dev *dev)
9011 {
9012         uint32_t cnt;
9013
9014         flow_dv_shared_lock(dev);
9015         cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
9016         flow_dv_shared_unlock(dev);
9017         return cnt;
9018 }
9019
9020 /*
9021  * Mutex-protected thunk to lock-free flow_dv_counter_release().
9022  */
9023 static void
9024 flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
9025 {
9026         flow_dv_shared_lock(dev);
9027         flow_dv_counter_release(dev, cnt);
9028         flow_dv_shared_unlock(dev);
9029 }
9030
9031 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
9032         .validate = flow_dv_validate,
9033         .prepare = flow_dv_prepare,
9034         .translate = flow_dv_translate,
9035         .apply = flow_dv_apply,
9036         .remove = flow_dv_remove,
9037         .destroy = flow_dv_destroy,
9038         .query = flow_dv_query,
9039         .create_mtr_tbls = flow_dv_create_mtr_tbl,
9040         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
9041         .create_policer_rules = flow_dv_create_policer_rules,
9042         .destroy_policer_rules = flow_dv_destroy_policer_rules,
9043         .counter_alloc = flow_dv_counter_allocate,
9044         .counter_free = flow_dv_counter_free,
9045         .counter_query = flow_dv_counter_query,
9046 };
9047
9048 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */