da988f9dd7ba20e0cd50967d79673ce33acfcb00
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29 #include <rte_vxlan.h>
30
31 #include "mlx5.h"
32 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #include "mlx5_prm.h"
36 #include "mlx5_rxtx.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
55                                           sizeof(struct rte_flow_item_ipv4))
56 /* VLAN header definitions */
57 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
58 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
59 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
60 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
61 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
62
63 union flow_dv_attr {
64         struct {
65                 uint32_t valid:1;
66                 uint32_t ipv4:1;
67                 uint32_t ipv6:1;
68                 uint32_t tcp:1;
69                 uint32_t udp:1;
70                 uint32_t reserved:27;
71         };
72         uint32_t attr;
73 };
74
75 /**
76  * Initialize flow attributes structure according to flow items' types.
77  *
78  * flow_dv_validate() avoids multiple L3/L4 layers cases other than tunnel
79  * mode. For tunnel mode, the items to be modified are the outermost ones.
80  *
81  * @param[in] item
82  *   Pointer to item specification.
83  * @param[out] attr
84  *   Pointer to flow attributes structure.
85  */
86 static void
87 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
88 {
89         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
90                 switch (item->type) {
91                 case RTE_FLOW_ITEM_TYPE_IPV4:
92                         if (!attr->ipv6)
93                                 attr->ipv4 = 1;
94                         break;
95                 case RTE_FLOW_ITEM_TYPE_IPV6:
96                         if (!attr->ipv4)
97                                 attr->ipv6 = 1;
98                         break;
99                 case RTE_FLOW_ITEM_TYPE_UDP:
100                         if (!attr->tcp)
101                                 attr->udp = 1;
102                         break;
103                 case RTE_FLOW_ITEM_TYPE_TCP:
104                         if (!attr->udp)
105                                 attr->tcp = 1;
106                         break;
107                 default:
108                         break;
109                 }
110         }
111         attr->valid = 1;
112 }
113
114 /**
115  * Convert rte_mtr_color to mlx5 color.
116  *
117  * @param[in] rcol
118  *   rte_mtr_color.
119  *
120  * @return
121  *   mlx5 color.
122  */
123 static int
124 rte_col_2_mlx5_col(enum rte_color rcol)
125 {
126         switch (rcol) {
127         case RTE_COLOR_GREEN:
128                 return MLX5_FLOW_COLOR_GREEN;
129         case RTE_COLOR_YELLOW:
130                 return MLX5_FLOW_COLOR_YELLOW;
131         case RTE_COLOR_RED:
132                 return MLX5_FLOW_COLOR_RED;
133         default:
134                 break;
135         }
136         return MLX5_FLOW_COLOR_UNDEFINED;
137 }
138
139 struct field_modify_info {
140         uint32_t size; /* Size of field in protocol header, in bytes. */
141         uint32_t offset; /* Offset of field in protocol header, in bytes. */
142         enum mlx5_modification_field id;
143 };
144
145 struct field_modify_info modify_eth[] = {
146         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
147         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
148         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
149         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
150         {0, 0, 0},
151 };
152
153 struct field_modify_info modify_vlan_out_first_vid[] = {
154         /* Size in bits !!! */
155         {12, 0, MLX5_MODI_OUT_FIRST_VID},
156         {0, 0, 0},
157 };
158
159 struct field_modify_info modify_ipv4[] = {
160         {1,  1, MLX5_MODI_OUT_IP_DSCP},
161         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
162         {4, 12, MLX5_MODI_OUT_SIPV4},
163         {4, 16, MLX5_MODI_OUT_DIPV4},
164         {0, 0, 0},
165 };
166
167 struct field_modify_info modify_ipv6[] = {
168         {1,  0, MLX5_MODI_OUT_IP_DSCP},
169         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
170         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
171         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
172         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
173         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
174         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
175         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
176         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
177         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
178         {0, 0, 0},
179 };
180
181 struct field_modify_info modify_udp[] = {
182         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
183         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
184         {0, 0, 0},
185 };
186
187 struct field_modify_info modify_tcp[] = {
188         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
189         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
190         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
191         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
192         {0, 0, 0},
193 };
194
195 static void
196 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
197                           uint8_t next_protocol, uint64_t *item_flags,
198                           int *tunnel)
199 {
200         assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
201                item->type == RTE_FLOW_ITEM_TYPE_IPV6);
202         if (next_protocol == IPPROTO_IPIP) {
203                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
204                 *tunnel = 1;
205         }
206         if (next_protocol == IPPROTO_IPV6) {
207                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
208                 *tunnel = 1;
209         }
210 }
211
212 /**
213  * Acquire the synchronizing object to protect multithreaded access
214  * to shared dv context. Lock occurs only if context is actually
215  * shared, i.e. we have multiport IB device and representors are
216  * created.
217  *
218  * @param[in] dev
219  *   Pointer to the rte_eth_dev structure.
220  */
221 static void
222 flow_dv_shared_lock(struct rte_eth_dev *dev)
223 {
224         struct mlx5_priv *priv = dev->data->dev_private;
225         struct mlx5_ibv_shared *sh = priv->sh;
226
227         if (sh->dv_refcnt > 1) {
228                 int ret;
229
230                 ret = pthread_mutex_lock(&sh->dv_mutex);
231                 assert(!ret);
232                 (void)ret;
233         }
234 }
235
236 static void
237 flow_dv_shared_unlock(struct rte_eth_dev *dev)
238 {
239         struct mlx5_priv *priv = dev->data->dev_private;
240         struct mlx5_ibv_shared *sh = priv->sh;
241
242         if (sh->dv_refcnt > 1) {
243                 int ret;
244
245                 ret = pthread_mutex_unlock(&sh->dv_mutex);
246                 assert(!ret);
247                 (void)ret;
248         }
249 }
250
251 /* Update VLAN's VID/PCP based on input rte_flow_action.
252  *
253  * @param[in] action
254  *   Pointer to struct rte_flow_action.
255  * @param[out] vlan
256  *   Pointer to struct rte_vlan_hdr.
257  */
258 static void
259 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
260                          struct rte_vlan_hdr *vlan)
261 {
262         uint16_t vlan_tci;
263         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
264                 vlan_tci =
265                     ((const struct rte_flow_action_of_set_vlan_pcp *)
266                                                action->conf)->vlan_pcp;
267                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
268                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
269                 vlan->vlan_tci |= vlan_tci;
270         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
271                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
272                 vlan->vlan_tci |= rte_be_to_cpu_16
273                     (((const struct rte_flow_action_of_set_vlan_vid *)
274                                              action->conf)->vlan_vid);
275         }
276 }
277
278 /**
279  * Fetch 1, 2, 3 or 4 byte field from the byte array
280  * and return as unsigned integer in host-endian format.
281  *
282  * @param[in] data
283  *   Pointer to data array.
284  * @param[in] size
285  *   Size of field to extract.
286  *
287  * @return
288  *   converted field in host endian format.
289  */
290 static inline uint32_t
291 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
292 {
293         uint32_t ret;
294
295         switch (size) {
296         case 1:
297                 ret = *data;
298                 break;
299         case 2:
300                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
301                 break;
302         case 3:
303                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
304                 ret = (ret << 8) | *(data + sizeof(uint16_t));
305                 break;
306         case 4:
307                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
308                 break;
309         default:
310                 assert(false);
311                 ret = 0;
312                 break;
313         }
314         return ret;
315 }
316
317 /**
318  * Convert modify-header action to DV specification.
319  *
320  * Data length of each action is determined by provided field description
321  * and the item mask. Data bit offset and width of each action is determined
322  * by provided item mask.
323  *
324  * @param[in] item
325  *   Pointer to item specification.
326  * @param[in] field
327  *   Pointer to field modification information.
328  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
329  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
330  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
331  * @param[in] dcopy
332  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
333  *   Negative offset value sets the same offset as source offset.
334  *   size field is ignored, value is taken from source field.
335  * @param[in,out] resource
336  *   Pointer to the modify-header resource.
337  * @param[in] type
338  *   Type of modification.
339  * @param[out] error
340  *   Pointer to the error structure.
341  *
342  * @return
343  *   0 on success, a negative errno value otherwise and rte_errno is set.
344  */
345 static int
346 flow_dv_convert_modify_action(struct rte_flow_item *item,
347                               struct field_modify_info *field,
348                               struct field_modify_info *dcopy,
349                               struct mlx5_flow_dv_modify_hdr_resource *resource,
350                               uint32_t type, struct rte_flow_error *error)
351 {
352         uint32_t i = resource->actions_num;
353         struct mlx5_modification_cmd *actions = resource->actions;
354
355         /*
356          * The item and mask are provided in big-endian format.
357          * The fields should be presented as in big-endian format either.
358          * Mask must be always present, it defines the actual field width.
359          */
360         assert(item->mask);
361         assert(field->size);
362         do {
363                 unsigned int size_b;
364                 unsigned int off_b;
365                 uint32_t mask;
366                 uint32_t data;
367
368                 if (i >= MLX5_MODIFY_NUM)
369                         return rte_flow_error_set(error, EINVAL,
370                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
371                                  "too many items to modify");
372                 /* Fetch variable byte size mask from the array. */
373                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
374                                            field->offset, field->size);
375                 if (!mask) {
376                         ++field;
377                         continue;
378                 }
379                 /* Deduce actual data width in bits from mask value. */
380                 off_b = rte_bsf32(mask);
381                 size_b = sizeof(uint32_t) * CHAR_BIT -
382                          off_b - __builtin_clz(mask);
383                 assert(size_b);
384                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
385                 actions[i].action_type = type;
386                 actions[i].field = field->id;
387                 actions[i].offset = off_b;
388                 actions[i].length = size_b;
389                 /* Convert entire record to expected big-endian format. */
390                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
391                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
392                         assert(dcopy);
393                         actions[i].dst_field = dcopy->id;
394                         actions[i].dst_offset =
395                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
396                         /* Convert entire record to big-endian format. */
397                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
398                 } else {
399                         assert(item->spec);
400                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
401                                                    field->offset, field->size);
402                         /* Shift out the trailing masked bits from data. */
403                         data = (data & mask) >> off_b;
404                         actions[i].data1 = rte_cpu_to_be_32(data);
405                 }
406                 ++i;
407                 ++field;
408         } while (field->size);
409         resource->actions_num = i;
410         if (!resource->actions_num)
411                 return rte_flow_error_set(error, EINVAL,
412                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
413                                           "invalid modification flow item");
414         return 0;
415 }
416
417 /**
418  * Convert modify-header set IPv4 address action to DV specification.
419  *
420  * @param[in,out] resource
421  *   Pointer to the modify-header resource.
422  * @param[in] action
423  *   Pointer to action specification.
424  * @param[out] error
425  *   Pointer to the error structure.
426  *
427  * @return
428  *   0 on success, a negative errno value otherwise and rte_errno is set.
429  */
430 static int
431 flow_dv_convert_action_modify_ipv4
432                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
433                          const struct rte_flow_action *action,
434                          struct rte_flow_error *error)
435 {
436         const struct rte_flow_action_set_ipv4 *conf =
437                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
438         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
439         struct rte_flow_item_ipv4 ipv4;
440         struct rte_flow_item_ipv4 ipv4_mask;
441
442         memset(&ipv4, 0, sizeof(ipv4));
443         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
444         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
445                 ipv4.hdr.src_addr = conf->ipv4_addr;
446                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
447         } else {
448                 ipv4.hdr.dst_addr = conf->ipv4_addr;
449                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
450         }
451         item.spec = &ipv4;
452         item.mask = &ipv4_mask;
453         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
454                                              MLX5_MODIFICATION_TYPE_SET, error);
455 }
456
457 /**
458  * Convert modify-header set IPv6 address action to DV specification.
459  *
460  * @param[in,out] resource
461  *   Pointer to the modify-header resource.
462  * @param[in] action
463  *   Pointer to action specification.
464  * @param[out] error
465  *   Pointer to the error structure.
466  *
467  * @return
468  *   0 on success, a negative errno value otherwise and rte_errno is set.
469  */
470 static int
471 flow_dv_convert_action_modify_ipv6
472                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
473                          const struct rte_flow_action *action,
474                          struct rte_flow_error *error)
475 {
476         const struct rte_flow_action_set_ipv6 *conf =
477                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
478         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
479         struct rte_flow_item_ipv6 ipv6;
480         struct rte_flow_item_ipv6 ipv6_mask;
481
482         memset(&ipv6, 0, sizeof(ipv6));
483         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
484         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
485                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
486                        sizeof(ipv6.hdr.src_addr));
487                 memcpy(&ipv6_mask.hdr.src_addr,
488                        &rte_flow_item_ipv6_mask.hdr.src_addr,
489                        sizeof(ipv6.hdr.src_addr));
490         } else {
491                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
492                        sizeof(ipv6.hdr.dst_addr));
493                 memcpy(&ipv6_mask.hdr.dst_addr,
494                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
495                        sizeof(ipv6.hdr.dst_addr));
496         }
497         item.spec = &ipv6;
498         item.mask = &ipv6_mask;
499         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
500                                              MLX5_MODIFICATION_TYPE_SET, error);
501 }
502
503 /**
504  * Convert modify-header set MAC address action to DV specification.
505  *
506  * @param[in,out] resource
507  *   Pointer to the modify-header resource.
508  * @param[in] action
509  *   Pointer to action specification.
510  * @param[out] error
511  *   Pointer to the error structure.
512  *
513  * @return
514  *   0 on success, a negative errno value otherwise and rte_errno is set.
515  */
516 static int
517 flow_dv_convert_action_modify_mac
518                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
519                          const struct rte_flow_action *action,
520                          struct rte_flow_error *error)
521 {
522         const struct rte_flow_action_set_mac *conf =
523                 (const struct rte_flow_action_set_mac *)(action->conf);
524         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
525         struct rte_flow_item_eth eth;
526         struct rte_flow_item_eth eth_mask;
527
528         memset(&eth, 0, sizeof(eth));
529         memset(&eth_mask, 0, sizeof(eth_mask));
530         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
531                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
532                        sizeof(eth.src.addr_bytes));
533                 memcpy(&eth_mask.src.addr_bytes,
534                        &rte_flow_item_eth_mask.src.addr_bytes,
535                        sizeof(eth_mask.src.addr_bytes));
536         } else {
537                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
538                        sizeof(eth.dst.addr_bytes));
539                 memcpy(&eth_mask.dst.addr_bytes,
540                        &rte_flow_item_eth_mask.dst.addr_bytes,
541                        sizeof(eth_mask.dst.addr_bytes));
542         }
543         item.spec = &eth;
544         item.mask = &eth_mask;
545         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
546                                              MLX5_MODIFICATION_TYPE_SET, error);
547 }
548
549 /**
550  * Convert modify-header set VLAN VID action to DV specification.
551  *
552  * @param[in,out] resource
553  *   Pointer to the modify-header resource.
554  * @param[in] action
555  *   Pointer to action specification.
556  * @param[out] error
557  *   Pointer to the error structure.
558  *
559  * @return
560  *   0 on success, a negative errno value otherwise and rte_errno is set.
561  */
562 static int
563 flow_dv_convert_action_modify_vlan_vid
564                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
565                          const struct rte_flow_action *action,
566                          struct rte_flow_error *error)
567 {
568         const struct rte_flow_action_of_set_vlan_vid *conf =
569                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
570         int i = resource->actions_num;
571         struct mlx5_modification_cmd *actions = &resource->actions[i];
572         struct field_modify_info *field = modify_vlan_out_first_vid;
573
574         if (i >= MLX5_MODIFY_NUM)
575                 return rte_flow_error_set(error, EINVAL,
576                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
577                          "too many items to modify");
578         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
579         actions[i].field = field->id;
580         actions[i].length = field->size;
581         actions[i].offset = field->offset;
582         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
583         actions[i].data1 = conf->vlan_vid;
584         actions[i].data1 = actions[i].data1 << 16;
585         resource->actions_num = ++i;
586         return 0;
587 }
588
589 /**
590  * Convert modify-header set TP action to DV specification.
591  *
592  * @param[in,out] resource
593  *   Pointer to the modify-header resource.
594  * @param[in] action
595  *   Pointer to action specification.
596  * @param[in] items
597  *   Pointer to rte_flow_item objects list.
598  * @param[in] attr
599  *   Pointer to flow attributes structure.
600  * @param[out] error
601  *   Pointer to the error structure.
602  *
603  * @return
604  *   0 on success, a negative errno value otherwise and rte_errno is set.
605  */
606 static int
607 flow_dv_convert_action_modify_tp
608                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
609                          const struct rte_flow_action *action,
610                          const struct rte_flow_item *items,
611                          union flow_dv_attr *attr,
612                          struct rte_flow_error *error)
613 {
614         const struct rte_flow_action_set_tp *conf =
615                 (const struct rte_flow_action_set_tp *)(action->conf);
616         struct rte_flow_item item;
617         struct rte_flow_item_udp udp;
618         struct rte_flow_item_udp udp_mask;
619         struct rte_flow_item_tcp tcp;
620         struct rte_flow_item_tcp tcp_mask;
621         struct field_modify_info *field;
622
623         if (!attr->valid)
624                 flow_dv_attr_init(items, attr);
625         if (attr->udp) {
626                 memset(&udp, 0, sizeof(udp));
627                 memset(&udp_mask, 0, sizeof(udp_mask));
628                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
629                         udp.hdr.src_port = conf->port;
630                         udp_mask.hdr.src_port =
631                                         rte_flow_item_udp_mask.hdr.src_port;
632                 } else {
633                         udp.hdr.dst_port = conf->port;
634                         udp_mask.hdr.dst_port =
635                                         rte_flow_item_udp_mask.hdr.dst_port;
636                 }
637                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
638                 item.spec = &udp;
639                 item.mask = &udp_mask;
640                 field = modify_udp;
641         }
642         if (attr->tcp) {
643                 memset(&tcp, 0, sizeof(tcp));
644                 memset(&tcp_mask, 0, sizeof(tcp_mask));
645                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
646                         tcp.hdr.src_port = conf->port;
647                         tcp_mask.hdr.src_port =
648                                         rte_flow_item_tcp_mask.hdr.src_port;
649                 } else {
650                         tcp.hdr.dst_port = conf->port;
651                         tcp_mask.hdr.dst_port =
652                                         rte_flow_item_tcp_mask.hdr.dst_port;
653                 }
654                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
655                 item.spec = &tcp;
656                 item.mask = &tcp_mask;
657                 field = modify_tcp;
658         }
659         return flow_dv_convert_modify_action(&item, field, NULL, resource,
660                                              MLX5_MODIFICATION_TYPE_SET, error);
661 }
662
663 /**
664  * Convert modify-header set TTL action to DV specification.
665  *
666  * @param[in,out] resource
667  *   Pointer to the modify-header resource.
668  * @param[in] action
669  *   Pointer to action specification.
670  * @param[in] items
671  *   Pointer to rte_flow_item objects list.
672  * @param[in] attr
673  *   Pointer to flow attributes structure.
674  * @param[out] error
675  *   Pointer to the error structure.
676  *
677  * @return
678  *   0 on success, a negative errno value otherwise and rte_errno is set.
679  */
680 static int
681 flow_dv_convert_action_modify_ttl
682                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
683                          const struct rte_flow_action *action,
684                          const struct rte_flow_item *items,
685                          union flow_dv_attr *attr,
686                          struct rte_flow_error *error)
687 {
688         const struct rte_flow_action_set_ttl *conf =
689                 (const struct rte_flow_action_set_ttl *)(action->conf);
690         struct rte_flow_item item;
691         struct rte_flow_item_ipv4 ipv4;
692         struct rte_flow_item_ipv4 ipv4_mask;
693         struct rte_flow_item_ipv6 ipv6;
694         struct rte_flow_item_ipv6 ipv6_mask;
695         struct field_modify_info *field;
696
697         if (!attr->valid)
698                 flow_dv_attr_init(items, attr);
699         if (attr->ipv4) {
700                 memset(&ipv4, 0, sizeof(ipv4));
701                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
702                 ipv4.hdr.time_to_live = conf->ttl_value;
703                 ipv4_mask.hdr.time_to_live = 0xFF;
704                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
705                 item.spec = &ipv4;
706                 item.mask = &ipv4_mask;
707                 field = modify_ipv4;
708         }
709         if (attr->ipv6) {
710                 memset(&ipv6, 0, sizeof(ipv6));
711                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
712                 ipv6.hdr.hop_limits = conf->ttl_value;
713                 ipv6_mask.hdr.hop_limits = 0xFF;
714                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
715                 item.spec = &ipv6;
716                 item.mask = &ipv6_mask;
717                 field = modify_ipv6;
718         }
719         return flow_dv_convert_modify_action(&item, field, NULL, resource,
720                                              MLX5_MODIFICATION_TYPE_SET, error);
721 }
722
723 /**
724  * Convert modify-header decrement TTL action to DV specification.
725  *
726  * @param[in,out] resource
727  *   Pointer to the modify-header resource.
728  * @param[in] action
729  *   Pointer to action specification.
730  * @param[in] items
731  *   Pointer to rte_flow_item objects list.
732  * @param[in] attr
733  *   Pointer to flow attributes structure.
734  * @param[out] error
735  *   Pointer to the error structure.
736  *
737  * @return
738  *   0 on success, a negative errno value otherwise and rte_errno is set.
739  */
740 static int
741 flow_dv_convert_action_modify_dec_ttl
742                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
743                          const struct rte_flow_item *items,
744                          union flow_dv_attr *attr,
745                          struct rte_flow_error *error)
746 {
747         struct rte_flow_item item;
748         struct rte_flow_item_ipv4 ipv4;
749         struct rte_flow_item_ipv4 ipv4_mask;
750         struct rte_flow_item_ipv6 ipv6;
751         struct rte_flow_item_ipv6 ipv6_mask;
752         struct field_modify_info *field;
753
754         if (!attr->valid)
755                 flow_dv_attr_init(items, attr);
756         if (attr->ipv4) {
757                 memset(&ipv4, 0, sizeof(ipv4));
758                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
759                 ipv4.hdr.time_to_live = 0xFF;
760                 ipv4_mask.hdr.time_to_live = 0xFF;
761                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
762                 item.spec = &ipv4;
763                 item.mask = &ipv4_mask;
764                 field = modify_ipv4;
765         }
766         if (attr->ipv6) {
767                 memset(&ipv6, 0, sizeof(ipv6));
768                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
769                 ipv6.hdr.hop_limits = 0xFF;
770                 ipv6_mask.hdr.hop_limits = 0xFF;
771                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
772                 item.spec = &ipv6;
773                 item.mask = &ipv6_mask;
774                 field = modify_ipv6;
775         }
776         return flow_dv_convert_modify_action(&item, field, NULL, resource,
777                                              MLX5_MODIFICATION_TYPE_ADD, error);
778 }
779
780 /**
781  * Convert modify-header increment/decrement TCP Sequence number
782  * to DV specification.
783  *
784  * @param[in,out] resource
785  *   Pointer to the modify-header resource.
786  * @param[in] action
787  *   Pointer to action specification.
788  * @param[out] error
789  *   Pointer to the error structure.
790  *
791  * @return
792  *   0 on success, a negative errno value otherwise and rte_errno is set.
793  */
794 static int
795 flow_dv_convert_action_modify_tcp_seq
796                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
797                          const struct rte_flow_action *action,
798                          struct rte_flow_error *error)
799 {
800         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
801         uint64_t value = rte_be_to_cpu_32(*conf);
802         struct rte_flow_item item;
803         struct rte_flow_item_tcp tcp;
804         struct rte_flow_item_tcp tcp_mask;
805
806         memset(&tcp, 0, sizeof(tcp));
807         memset(&tcp_mask, 0, sizeof(tcp_mask));
808         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
809                 /*
810                  * The HW has no decrement operation, only increment operation.
811                  * To simulate decrement X from Y using increment operation
812                  * we need to add UINT32_MAX X times to Y.
813                  * Each adding of UINT32_MAX decrements Y by 1.
814                  */
815                 value *= UINT32_MAX;
816         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
817         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
818         item.type = RTE_FLOW_ITEM_TYPE_TCP;
819         item.spec = &tcp;
820         item.mask = &tcp_mask;
821         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
822                                              MLX5_MODIFICATION_TYPE_ADD, error);
823 }
824
825 /**
826  * Convert modify-header increment/decrement TCP Acknowledgment number
827  * to DV specification.
828  *
829  * @param[in,out] resource
830  *   Pointer to the modify-header resource.
831  * @param[in] action
832  *   Pointer to action specification.
833  * @param[out] error
834  *   Pointer to the error structure.
835  *
836  * @return
837  *   0 on success, a negative errno value otherwise and rte_errno is set.
838  */
839 static int
840 flow_dv_convert_action_modify_tcp_ack
841                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
842                          const struct rte_flow_action *action,
843                          struct rte_flow_error *error)
844 {
845         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
846         uint64_t value = rte_be_to_cpu_32(*conf);
847         struct rte_flow_item item;
848         struct rte_flow_item_tcp tcp;
849         struct rte_flow_item_tcp tcp_mask;
850
851         memset(&tcp, 0, sizeof(tcp));
852         memset(&tcp_mask, 0, sizeof(tcp_mask));
853         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
854                 /*
855                  * The HW has no decrement operation, only increment operation.
856                  * To simulate decrement X from Y using increment operation
857                  * we need to add UINT32_MAX X times to Y.
858                  * Each adding of UINT32_MAX decrements Y by 1.
859                  */
860                 value *= UINT32_MAX;
861         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
862         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
863         item.type = RTE_FLOW_ITEM_TYPE_TCP;
864         item.spec = &tcp;
865         item.mask = &tcp_mask;
866         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
867                                              MLX5_MODIFICATION_TYPE_ADD, error);
868 }
869
870 static enum mlx5_modification_field reg_to_field[] = {
871         [REG_NONE] = MLX5_MODI_OUT_NONE,
872         [REG_A] = MLX5_MODI_META_DATA_REG_A,
873         [REG_B] = MLX5_MODI_META_DATA_REG_B,
874         [REG_C_0] = MLX5_MODI_META_REG_C_0,
875         [REG_C_1] = MLX5_MODI_META_REG_C_1,
876         [REG_C_2] = MLX5_MODI_META_REG_C_2,
877         [REG_C_3] = MLX5_MODI_META_REG_C_3,
878         [REG_C_4] = MLX5_MODI_META_REG_C_4,
879         [REG_C_5] = MLX5_MODI_META_REG_C_5,
880         [REG_C_6] = MLX5_MODI_META_REG_C_6,
881         [REG_C_7] = MLX5_MODI_META_REG_C_7,
882 };
883
884 /**
885  * Convert register set to DV specification.
886  *
887  * @param[in,out] resource
888  *   Pointer to the modify-header resource.
889  * @param[in] action
890  *   Pointer to action specification.
891  * @param[out] error
892  *   Pointer to the error structure.
893  *
894  * @return
895  *   0 on success, a negative errno value otherwise and rte_errno is set.
896  */
897 static int
898 flow_dv_convert_action_set_reg
899                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
900                          const struct rte_flow_action *action,
901                          struct rte_flow_error *error)
902 {
903         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
904         struct mlx5_modification_cmd *actions = resource->actions;
905         uint32_t i = resource->actions_num;
906
907         if (i >= MLX5_MODIFY_NUM)
908                 return rte_flow_error_set(error, EINVAL,
909                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
910                                           "too many items to modify");
911         assert(conf->id != REG_NONE);
912         assert(conf->id < RTE_DIM(reg_to_field));
913         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
914         actions[i].field = reg_to_field[conf->id];
915         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
916         actions[i].data1 = rte_cpu_to_be_32(conf->data);
917         ++i;
918         resource->actions_num = i;
919         if (!resource->actions_num)
920                 return rte_flow_error_set(error, EINVAL,
921                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
922                                           "invalid modification flow item");
923         return 0;
924 }
925
926 /**
927  * Convert SET_TAG action to DV specification.
928  *
929  * @param[in] dev
930  *   Pointer to the rte_eth_dev structure.
931  * @param[in,out] resource
932  *   Pointer to the modify-header resource.
933  * @param[in] conf
934  *   Pointer to action specification.
935  * @param[out] error
936  *   Pointer to the error structure.
937  *
938  * @return
939  *   0 on success, a negative errno value otherwise and rte_errno is set.
940  */
941 static int
942 flow_dv_convert_action_set_tag
943                         (struct rte_eth_dev *dev,
944                          struct mlx5_flow_dv_modify_hdr_resource *resource,
945                          const struct rte_flow_action_set_tag *conf,
946                          struct rte_flow_error *error)
947 {
948         rte_be32_t data = rte_cpu_to_be_32(conf->data);
949         rte_be32_t mask = rte_cpu_to_be_32(conf->mask);
950         struct rte_flow_item item = {
951                 .spec = &data,
952                 .mask = &mask,
953         };
954         struct field_modify_info reg_c_x[] = {
955                 [1] = {0, 0, 0},
956         };
957         enum mlx5_modification_field reg_type;
958         int ret;
959
960         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
961         if (ret < 0)
962                 return ret;
963         assert(ret != REG_NONE);
964         assert((unsigned int)ret < RTE_DIM(reg_to_field));
965         reg_type = reg_to_field[ret];
966         assert(reg_type > 0);
967         reg_c_x[0] = (struct field_modify_info){4, 0, reg_type};
968         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
969                                              MLX5_MODIFICATION_TYPE_SET, error);
970 }
971
972 /**
973  * Convert internal COPY_REG action to DV specification.
974  *
975  * @param[in] dev
976  *   Pointer to the rte_eth_dev structure.
977  * @param[in,out] res
978  *   Pointer to the modify-header resource.
979  * @param[in] action
980  *   Pointer to action specification.
981  * @param[out] error
982  *   Pointer to the error structure.
983  *
984  * @return
985  *   0 on success, a negative errno value otherwise and rte_errno is set.
986  */
987 static int
988 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev,
989                                  struct mlx5_flow_dv_modify_hdr_resource *res,
990                                  const struct rte_flow_action *action,
991                                  struct rte_flow_error *error)
992 {
993         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
994         rte_be32_t mask = RTE_BE32(UINT32_MAX);
995         struct rte_flow_item item = {
996                 .spec = NULL,
997                 .mask = &mask,
998         };
999         struct field_modify_info reg_src[] = {
1000                 {4, 0, reg_to_field[conf->src]},
1001                 {0, 0, 0},
1002         };
1003         struct field_modify_info reg_dst = {
1004                 .offset = 0,
1005                 .id = reg_to_field[conf->dst],
1006         };
1007         /* Adjust reg_c[0] usage according to reported mask. */
1008         if (conf->dst == REG_C_0 || conf->src == REG_C_0) {
1009                 struct mlx5_priv *priv = dev->data->dev_private;
1010                 uint32_t reg_c0 = priv->sh->dv_regc0_mask;
1011
1012                 assert(reg_c0);
1013                 assert(priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY);
1014                 if (conf->dst == REG_C_0) {
1015                         /* Copy to reg_c[0], within mask only. */
1016                         reg_dst.offset = rte_bsf32(reg_c0);
1017                         /*
1018                          * Mask is ignoring the enianness, because
1019                          * there is no conversion in datapath.
1020                          */
1021 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1022                         /* Copy from destination lower bits to reg_c[0]. */
1023                         mask = reg_c0 >> reg_dst.offset;
1024 #else
1025                         /* Copy from destination upper bits to reg_c[0]. */
1026                         mask = reg_c0 << (sizeof(reg_c0) * CHAR_BIT -
1027                                           rte_fls_u32(reg_c0));
1028 #endif
1029                 } else {
1030                         mask = rte_cpu_to_be_32(reg_c0);
1031 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1032                         /* Copy from reg_c[0] to destination lower bits. */
1033                         reg_dst.offset = 0;
1034 #else
1035                         /* Copy from reg_c[0] to destination upper bits. */
1036                         reg_dst.offset = sizeof(reg_c0) * CHAR_BIT -
1037                                          (rte_fls_u32(reg_c0) -
1038                                           rte_bsf32(reg_c0));
1039 #endif
1040                 }
1041         }
1042         return flow_dv_convert_modify_action(&item,
1043                                              reg_src, &reg_dst, res,
1044                                              MLX5_MODIFICATION_TYPE_COPY,
1045                                              error);
1046 }
1047
1048 /**
1049  * Convert MARK action to DV specification. This routine is used
1050  * in extensive metadata only and requires metadata register to be
1051  * handled. In legacy mode hardware tag resource is engaged.
1052  *
1053  * @param[in] dev
1054  *   Pointer to the rte_eth_dev structure.
1055  * @param[in] conf
1056  *   Pointer to MARK action specification.
1057  * @param[in,out] resource
1058  *   Pointer to the modify-header resource.
1059  * @param[out] error
1060  *   Pointer to the error structure.
1061  *
1062  * @return
1063  *   0 on success, a negative errno value otherwise and rte_errno is set.
1064  */
1065 static int
1066 flow_dv_convert_action_mark(struct rte_eth_dev *dev,
1067                             const struct rte_flow_action_mark *conf,
1068                             struct mlx5_flow_dv_modify_hdr_resource *resource,
1069                             struct rte_flow_error *error)
1070 {
1071         struct mlx5_priv *priv = dev->data->dev_private;
1072         rte_be32_t mask = rte_cpu_to_be_32(MLX5_FLOW_MARK_MASK &
1073                                            priv->sh->dv_mark_mask);
1074         rte_be32_t data = rte_cpu_to_be_32(conf->id) & mask;
1075         struct rte_flow_item item = {
1076                 .spec = &data,
1077                 .mask = &mask,
1078         };
1079         struct field_modify_info reg_c_x[] = {
1080                 {4, 0, 0}, /* dynamic instead of MLX5_MODI_META_REG_C_1. */
1081                 {0, 0, 0},
1082         };
1083         enum modify_reg reg;
1084
1085         if (!mask)
1086                 return rte_flow_error_set(error, EINVAL,
1087                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1088                                           NULL, "zero mark action mask");
1089         reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1090         if (reg < 0)
1091                 return reg;
1092         assert(reg > 0);
1093         if (reg == REG_C_0) {
1094                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1095                 uint32_t shl_c0 = rte_bsf32(msk_c0);
1096
1097                 data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
1098                 mask = rte_cpu_to_be_32(mask) & msk_c0;
1099                 mask = rte_cpu_to_be_32(mask << shl_c0);
1100         }
1101         reg_c_x[0].id = reg_to_field[reg];
1102         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1103                                              MLX5_MODIFICATION_TYPE_SET, error);
1104 }
1105
1106 /**
1107  * Get metadata register index for specified steering domain.
1108  *
1109  * @param[in] dev
1110  *   Pointer to the rte_eth_dev structure.
1111  * @param[in] attr
1112  *   Attributes of flow to determine steering domain.
1113  * @param[out] error
1114  *   Pointer to the error structure.
1115  *
1116  * @return
1117  *   positive index on success, a negative errno value otherwise
1118  *   and rte_errno is set.
1119  */
1120 static enum modify_reg
1121 flow_dv_get_metadata_reg(struct rte_eth_dev *dev,
1122                          const struct rte_flow_attr *attr,
1123                          struct rte_flow_error *error)
1124 {
1125         enum modify_reg reg =
1126                 mlx5_flow_get_reg_id(dev, attr->transfer ?
1127                                           MLX5_METADATA_FDB :
1128                                             attr->egress ?
1129                                             MLX5_METADATA_TX :
1130                                             MLX5_METADATA_RX, 0, error);
1131         if (reg < 0)
1132                 return rte_flow_error_set(error,
1133                                           ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1134                                           NULL, "unavailable "
1135                                           "metadata register");
1136         return reg;
1137 }
1138
1139 /**
1140  * Convert SET_META action to DV specification.
1141  *
1142  * @param[in] dev
1143  *   Pointer to the rte_eth_dev structure.
1144  * @param[in,out] resource
1145  *   Pointer to the modify-header resource.
1146  * @param[in] attr
1147  *   Attributes of flow that includes this item.
1148  * @param[in] conf
1149  *   Pointer to action specification.
1150  * @param[out] error
1151  *   Pointer to the error structure.
1152  *
1153  * @return
1154  *   0 on success, a negative errno value otherwise and rte_errno is set.
1155  */
1156 static int
1157 flow_dv_convert_action_set_meta
1158                         (struct rte_eth_dev *dev,
1159                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1160                          const struct rte_flow_attr *attr,
1161                          const struct rte_flow_action_set_meta *conf,
1162                          struct rte_flow_error *error)
1163 {
1164         uint32_t data = conf->data;
1165         uint32_t mask = conf->mask;
1166         struct rte_flow_item item = {
1167                 .spec = &data,
1168                 .mask = &mask,
1169         };
1170         struct field_modify_info reg_c_x[] = {
1171                 [1] = {0, 0, 0},
1172         };
1173         enum modify_reg reg = flow_dv_get_metadata_reg(dev, attr, error);
1174
1175         if (reg < 0)
1176                 return reg;
1177         /*
1178          * In datapath code there is no endianness
1179          * coversions for perfromance reasons, all
1180          * pattern conversions are done in rte_flow.
1181          */
1182         if (reg == REG_C_0) {
1183                 struct mlx5_priv *priv = dev->data->dev_private;
1184                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
1185                 uint32_t shl_c0;
1186
1187                 assert(msk_c0);
1188 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
1189                 shl_c0 = rte_bsf32(msk_c0);
1190 #else
1191                 shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
1192 #endif
1193                 mask <<= shl_c0;
1194                 data <<= shl_c0;
1195                 assert(!(~msk_c0 & rte_cpu_to_be_32(mask)));
1196         }
1197         reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
1198         /* The routine expects parameters in memory as big-endian ones. */
1199         return flow_dv_convert_modify_action(&item, reg_c_x, NULL, resource,
1200                                              MLX5_MODIFICATION_TYPE_SET, error);
1201 }
1202
1203 /**
1204  * Convert modify-header set IPv4 DSCP action to DV specification.
1205  *
1206  * @param[in,out] resource
1207  *   Pointer to the modify-header resource.
1208  * @param[in] action
1209  *   Pointer to action specification.
1210  * @param[out] error
1211  *   Pointer to the error structure.
1212  *
1213  * @return
1214  *   0 on success, a negative errno value otherwise and rte_errno is set.
1215  */
1216 static int
1217 flow_dv_convert_action_modify_ipv4_dscp
1218                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1219                          const struct rte_flow_action *action,
1220                          struct rte_flow_error *error)
1221 {
1222         const struct rte_flow_action_set_dscp *conf =
1223                 (const struct rte_flow_action_set_dscp *)(action->conf);
1224         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
1225         struct rte_flow_item_ipv4 ipv4;
1226         struct rte_flow_item_ipv4 ipv4_mask;
1227
1228         memset(&ipv4, 0, sizeof(ipv4));
1229         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
1230         ipv4.hdr.type_of_service = conf->dscp;
1231         ipv4_mask.hdr.type_of_service = RTE_IPV4_HDR_DSCP_MASK >> 2;
1232         item.spec = &ipv4;
1233         item.mask = &ipv4_mask;
1234         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
1235                                              MLX5_MODIFICATION_TYPE_SET, error);
1236 }
1237
1238 /**
1239  * Convert modify-header set IPv6 DSCP action to DV specification.
1240  *
1241  * @param[in,out] resource
1242  *   Pointer to the modify-header resource.
1243  * @param[in] action
1244  *   Pointer to action specification.
1245  * @param[out] error
1246  *   Pointer to the error structure.
1247  *
1248  * @return
1249  *   0 on success, a negative errno value otherwise and rte_errno is set.
1250  */
1251 static int
1252 flow_dv_convert_action_modify_ipv6_dscp
1253                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
1254                          const struct rte_flow_action *action,
1255                          struct rte_flow_error *error)
1256 {
1257         const struct rte_flow_action_set_dscp *conf =
1258                 (const struct rte_flow_action_set_dscp *)(action->conf);
1259         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
1260         struct rte_flow_item_ipv6 ipv6;
1261         struct rte_flow_item_ipv6 ipv6_mask;
1262
1263         memset(&ipv6, 0, sizeof(ipv6));
1264         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
1265         /*
1266          * Even though the DSCP bits offset of IPv6 is not byte aligned,
1267          * rdma-core only accept the DSCP bits byte aligned start from
1268          * bit 0 to 5 as to be compatible with IPv4. No need to shift the
1269          * bits in IPv6 case as rdma-core requires byte aligned value.
1270          */
1271         ipv6.hdr.vtc_flow = conf->dscp;
1272         ipv6_mask.hdr.vtc_flow = RTE_IPV6_HDR_DSCP_MASK >> 22;
1273         item.spec = &ipv6;
1274         item.mask = &ipv6_mask;
1275         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
1276                                              MLX5_MODIFICATION_TYPE_SET, error);
1277 }
1278
1279 /**
1280  * Validate MARK item.
1281  *
1282  * @param[in] dev
1283  *   Pointer to the rte_eth_dev structure.
1284  * @param[in] item
1285  *   Item specification.
1286  * @param[in] attr
1287  *   Attributes of flow that includes this item.
1288  * @param[out] error
1289  *   Pointer to error structure.
1290  *
1291  * @return
1292  *   0 on success, a negative errno value otherwise and rte_errno is set.
1293  */
1294 static int
1295 flow_dv_validate_item_mark(struct rte_eth_dev *dev,
1296                            const struct rte_flow_item *item,
1297                            const struct rte_flow_attr *attr __rte_unused,
1298                            struct rte_flow_error *error)
1299 {
1300         struct mlx5_priv *priv = dev->data->dev_private;
1301         struct mlx5_dev_config *config = &priv->config;
1302         const struct rte_flow_item_mark *spec = item->spec;
1303         const struct rte_flow_item_mark *mask = item->mask;
1304         const struct rte_flow_item_mark nic_mask = {
1305                 .id = priv->sh->dv_mark_mask,
1306         };
1307         int ret;
1308
1309         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1310                 return rte_flow_error_set(error, ENOTSUP,
1311                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1312                                           "extended metadata feature"
1313                                           " isn't enabled");
1314         if (!mlx5_flow_ext_mreg_supported(dev))
1315                 return rte_flow_error_set(error, ENOTSUP,
1316                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1317                                           "extended metadata register"
1318                                           " isn't supported");
1319         if (!nic_mask.id)
1320                 return rte_flow_error_set(error, ENOTSUP,
1321                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1322                                           "extended metadata register"
1323                                           " isn't available");
1324         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1325         if (ret < 0)
1326                 return ret;
1327         if (!spec)
1328                 return rte_flow_error_set(error, EINVAL,
1329                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1330                                           item->spec,
1331                                           "data cannot be empty");
1332         if (spec->id >= (MLX5_FLOW_MARK_MAX & nic_mask.id))
1333                 return rte_flow_error_set(error, EINVAL,
1334                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1335                                           &spec->id,
1336                                           "mark id exceeds the limit");
1337         if (!mask)
1338                 mask = &nic_mask;
1339         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1340                                         (const uint8_t *)&nic_mask,
1341                                         sizeof(struct rte_flow_item_mark),
1342                                         error);
1343         if (ret < 0)
1344                 return ret;
1345         return 0;
1346 }
1347
1348 /**
1349  * Validate META item.
1350  *
1351  * @param[in] dev
1352  *   Pointer to the rte_eth_dev structure.
1353  * @param[in] item
1354  *   Item specification.
1355  * @param[in] attr
1356  *   Attributes of flow that includes this item.
1357  * @param[out] error
1358  *   Pointer to error structure.
1359  *
1360  * @return
1361  *   0 on success, a negative errno value otherwise and rte_errno is set.
1362  */
1363 static int
1364 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
1365                            const struct rte_flow_item *item,
1366                            const struct rte_flow_attr *attr,
1367                            struct rte_flow_error *error)
1368 {
1369         struct mlx5_priv *priv = dev->data->dev_private;
1370         struct mlx5_dev_config *config = &priv->config;
1371         const struct rte_flow_item_meta *spec = item->spec;
1372         const struct rte_flow_item_meta *mask = item->mask;
1373         struct rte_flow_item_meta nic_mask = {
1374                 .data = UINT32_MAX
1375         };
1376         enum modify_reg reg;
1377         int ret;
1378
1379         if (!spec)
1380                 return rte_flow_error_set(error, EINVAL,
1381                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1382                                           item->spec,
1383                                           "data cannot be empty");
1384         if (!spec->data)
1385                 return rte_flow_error_set(error, EINVAL,
1386                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1387                                           "data cannot be zero");
1388         if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
1389                 if (!mlx5_flow_ext_mreg_supported(dev))
1390                         return rte_flow_error_set(error, ENOTSUP,
1391                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1392                                           "extended metadata register"
1393                                           " isn't supported");
1394                 reg = flow_dv_get_metadata_reg(dev, attr, error);
1395                 if (reg < 0)
1396                         return reg;
1397                 if (reg == REG_B)
1398                         return rte_flow_error_set(error, ENOTSUP,
1399                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1400                                           "match on reg_b "
1401                                           "isn't supported");
1402                 if (reg != REG_A)
1403                         nic_mask.data = priv->sh->dv_meta_mask;
1404         }
1405         if (!mask)
1406                 mask = &rte_flow_item_meta_mask;
1407         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1408                                         (const uint8_t *)&nic_mask,
1409                                         sizeof(struct rte_flow_item_meta),
1410                                         error);
1411         return ret;
1412 }
1413
1414 /**
1415  * Validate TAG item.
1416  *
1417  * @param[in] dev
1418  *   Pointer to the rte_eth_dev structure.
1419  * @param[in] item
1420  *   Item specification.
1421  * @param[in] attr
1422  *   Attributes of flow that includes this item.
1423  * @param[out] error
1424  *   Pointer to error structure.
1425  *
1426  * @return
1427  *   0 on success, a negative errno value otherwise and rte_errno is set.
1428  */
1429 static int
1430 flow_dv_validate_item_tag(struct rte_eth_dev *dev,
1431                           const struct rte_flow_item *item,
1432                           const struct rte_flow_attr *attr __rte_unused,
1433                           struct rte_flow_error *error)
1434 {
1435         const struct rte_flow_item_tag *spec = item->spec;
1436         const struct rte_flow_item_tag *mask = item->mask;
1437         const struct rte_flow_item_tag nic_mask = {
1438                 .data = RTE_BE32(UINT32_MAX),
1439                 .index = 0xff,
1440         };
1441         int ret;
1442
1443         if (!mlx5_flow_ext_mreg_supported(dev))
1444                 return rte_flow_error_set(error, ENOTSUP,
1445                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1446                                           "extensive metadata register"
1447                                           " isn't supported");
1448         if (!spec)
1449                 return rte_flow_error_set(error, EINVAL,
1450                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1451                                           item->spec,
1452                                           "data cannot be empty");
1453         if (!mask)
1454                 mask = &rte_flow_item_tag_mask;
1455         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1456                                         (const uint8_t *)&nic_mask,
1457                                         sizeof(struct rte_flow_item_tag),
1458                                         error);
1459         if (ret < 0)
1460                 return ret;
1461         if (mask->index != 0xff)
1462                 return rte_flow_error_set(error, EINVAL,
1463                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
1464                                           "partial mask for tag index"
1465                                           " is not supported");
1466         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, spec->index, error);
1467         if (ret < 0)
1468                 return ret;
1469         assert(ret != REG_NONE);
1470         return 0;
1471 }
1472
1473 /**
1474  * Validate vport item.
1475  *
1476  * @param[in] dev
1477  *   Pointer to the rte_eth_dev structure.
1478  * @param[in] item
1479  *   Item specification.
1480  * @param[in] attr
1481  *   Attributes of flow that includes this item.
1482  * @param[in] item_flags
1483  *   Bit-fields that holds the items detected until now.
1484  * @param[out] error
1485  *   Pointer to error structure.
1486  *
1487  * @return
1488  *   0 on success, a negative errno value otherwise and rte_errno is set.
1489  */
1490 static int
1491 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1492                               const struct rte_flow_item *item,
1493                               const struct rte_flow_attr *attr,
1494                               uint64_t item_flags,
1495                               struct rte_flow_error *error)
1496 {
1497         const struct rte_flow_item_port_id *spec = item->spec;
1498         const struct rte_flow_item_port_id *mask = item->mask;
1499         const struct rte_flow_item_port_id switch_mask = {
1500                         .id = 0xffffffff,
1501         };
1502         struct mlx5_priv *esw_priv;
1503         struct mlx5_priv *dev_priv;
1504         int ret;
1505
1506         if (!attr->transfer)
1507                 return rte_flow_error_set(error, EINVAL,
1508                                           RTE_FLOW_ERROR_TYPE_ITEM,
1509                                           NULL,
1510                                           "match on port id is valid only"
1511                                           " when transfer flag is enabled");
1512         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1513                 return rte_flow_error_set(error, ENOTSUP,
1514                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1515                                           "multiple source ports are not"
1516                                           " supported");
1517         if (!mask)
1518                 mask = &switch_mask;
1519         if (mask->id != 0xffffffff)
1520                 return rte_flow_error_set(error, ENOTSUP,
1521                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1522                                            mask,
1523                                            "no support for partial mask on"
1524                                            " \"id\" field");
1525         ret = mlx5_flow_item_acceptable
1526                                 (item, (const uint8_t *)mask,
1527                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1528                                  sizeof(struct rte_flow_item_port_id),
1529                                  error);
1530         if (ret)
1531                 return ret;
1532         if (!spec)
1533                 return 0;
1534         esw_priv = mlx5_port_to_eswitch_info(spec->id, false);
1535         if (!esw_priv)
1536                 return rte_flow_error_set(error, rte_errno,
1537                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1538                                           "failed to obtain E-Switch info for"
1539                                           " port");
1540         dev_priv = mlx5_dev_to_eswitch_info(dev);
1541         if (!dev_priv)
1542                 return rte_flow_error_set(error, rte_errno,
1543                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1544                                           NULL,
1545                                           "failed to obtain E-Switch info");
1546         if (esw_priv->domain_id != dev_priv->domain_id)
1547                 return rte_flow_error_set(error, EINVAL,
1548                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1549                                           "cannot match on a port from a"
1550                                           " different E-Switch");
1551         return 0;
1552 }
1553
1554 /**
1555  * Validate the pop VLAN action.
1556  *
1557  * @param[in] dev
1558  *   Pointer to the rte_eth_dev structure.
1559  * @param[in] action_flags
1560  *   Holds the actions detected until now.
1561  * @param[in] action
1562  *   Pointer to the pop vlan action.
1563  * @param[in] item_flags
1564  *   The items found in this flow rule.
1565  * @param[in] attr
1566  *   Pointer to flow attributes.
1567  * @param[out] error
1568  *   Pointer to error structure.
1569  *
1570  * @return
1571  *   0 on success, a negative errno value otherwise and rte_errno is set.
1572  */
1573 static int
1574 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1575                                  uint64_t action_flags,
1576                                  const struct rte_flow_action *action,
1577                                  uint64_t item_flags,
1578                                  const struct rte_flow_attr *attr,
1579                                  struct rte_flow_error *error)
1580 {
1581         struct mlx5_priv *priv = dev->data->dev_private;
1582
1583         (void)action;
1584         (void)attr;
1585         if (!priv->sh->pop_vlan_action)
1586                 return rte_flow_error_set(error, ENOTSUP,
1587                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1588                                           NULL,
1589                                           "pop vlan action is not supported");
1590         /*
1591          * Check for inconsistencies:
1592          *  fail strip_vlan in a flow that matches packets without VLAN tags.
1593          *  fail strip_vlan in a flow that matches packets without explicitly a
1594          *  matching on VLAN tag ?
1595          */
1596         if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
1597                 return rte_flow_error_set(error, ENOTSUP,
1598                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1599                                           NULL,
1600                                           "no support for multiple vlan pop "
1601                                           "actions");
1602         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1603                 return rte_flow_error_set(error, ENOTSUP,
1604                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1605                                           NULL,
1606                                           "cannot pop vlan without a "
1607                                           "match on (outer) vlan in the flow");
1608         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1609                 return rte_flow_error_set(error, EINVAL,
1610                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1611                                           "wrong action order, port_id should "
1612                                           "be after pop VLAN action");
1613         return 0;
1614 }
1615
1616 /**
1617  * Get VLAN default info from vlan match info.
1618  *
1619  * @param[in] dev
1620  *   Pointer to the rte_eth_dev structure.
1621  * @param[in] item
1622  *   the list of item specifications.
1623  * @param[out] vlan
1624  *   pointer VLAN info to fill to.
1625  * @param[out] error
1626  *   Pointer to error structure.
1627  *
1628  * @return
1629  *   0 on success, a negative errno value otherwise and rte_errno is set.
1630  */
1631 static void
1632 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1633                                   struct rte_vlan_hdr *vlan)
1634 {
1635         const struct rte_flow_item_vlan nic_mask = {
1636                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1637                                 MLX5DV_FLOW_VLAN_VID_MASK),
1638                 .inner_type = RTE_BE16(0xffff),
1639         };
1640
1641         if (items == NULL)
1642                 return;
1643         for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
1644                items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
1645                 ;
1646         if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1647                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1648                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1649
1650                 if (!vlan_m)
1651                         vlan_m = &nic_mask;
1652                 /* Only full match values are accepted */
1653                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1654                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1655                         vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
1656                         vlan->vlan_tci |=
1657                                 rte_be_to_cpu_16(vlan_v->tci &
1658                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1659                 }
1660                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1661                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1662                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1663                         vlan->vlan_tci |=
1664                                 rte_be_to_cpu_16(vlan_v->tci &
1665                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1666                 }
1667                 if (vlan_m->inner_type == nic_mask.inner_type)
1668                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1669                                                            vlan_m->inner_type);
1670         }
1671 }
1672
1673 /**
1674  * Validate the push VLAN action.
1675  *
1676  * @param[in] action_flags
1677  *   Holds the actions detected until now.
1678  * @param[in] action
1679  *   Pointer to the encap action.
1680  * @param[in] attr
1681  *   Pointer to flow attributes
1682  * @param[out] error
1683  *   Pointer to error structure.
1684  *
1685  * @return
1686  *   0 on success, a negative errno value otherwise and rte_errno is set.
1687  */
1688 static int
1689 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1690                                   uint64_t item_flags,
1691                                   const struct rte_flow_action *action,
1692                                   const struct rte_flow_attr *attr,
1693                                   struct rte_flow_error *error)
1694 {
1695         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1696
1697         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1698             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1699                 return rte_flow_error_set(error, EINVAL,
1700                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1701                                           "invalid vlan ethertype");
1702         if (action_flags &
1703                 (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1704                 return rte_flow_error_set(error, ENOTSUP,
1705                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1706                                           "no support for multiple VLAN "
1707                                           "actions");
1708         if (!mlx5_flow_find_action
1709                         (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) &&
1710             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1711                 return rte_flow_error_set(error, ENOTSUP,
1712                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
1713                                 "push VLAN needs to match on VLAN in order to "
1714                                 "get VLAN VID information because there is "
1715                                 "no followed set VLAN VID action");
1716         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1717                 return rte_flow_error_set(error, EINVAL,
1718                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1719                                           "wrong action order, port_id should "
1720                                           "be after push VLAN");
1721         (void)attr;
1722         return 0;
1723 }
1724
1725 /**
1726  * Validate the set VLAN PCP.
1727  *
1728  * @param[in] action_flags
1729  *   Holds the actions detected until now.
1730  * @param[in] actions
1731  *   Pointer to the list of actions remaining in the flow rule.
1732  * @param[in] attr
1733  *   Pointer to flow attributes
1734  * @param[out] error
1735  *   Pointer to error structure.
1736  *
1737  * @return
1738  *   0 on success, a negative errno value otherwise and rte_errno is set.
1739  */
1740 static int
1741 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1742                                      const struct rte_flow_action actions[],
1743                                      struct rte_flow_error *error)
1744 {
1745         const struct rte_flow_action *action = actions;
1746         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1747
1748         if (conf->vlan_pcp > 7)
1749                 return rte_flow_error_set(error, EINVAL,
1750                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1751                                           "VLAN PCP value is too big");
1752         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1753                 return rte_flow_error_set(error, ENOTSUP,
1754                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1755                                           "set VLAN PCP action must follow "
1756                                           "the push VLAN action");
1757         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1758                 return rte_flow_error_set(error, ENOTSUP,
1759                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1760                                           "Multiple VLAN PCP modification are "
1761                                           "not supported");
1762         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1763                 return rte_flow_error_set(error, EINVAL,
1764                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1765                                           "wrong action order, port_id should "
1766                                           "be after set VLAN PCP");
1767         return 0;
1768 }
1769
1770 /**
1771  * Validate the set VLAN VID.
1772  *
1773  * @param[in] item_flags
1774  *   Holds the items detected in this rule.
1775  * @param[in] actions
1776  *   Pointer to the list of actions remaining in the flow rule.
1777  * @param[in] attr
1778  *   Pointer to flow attributes
1779  * @param[out] error
1780  *   Pointer to error structure.
1781  *
1782  * @return
1783  *   0 on success, a negative errno value otherwise and rte_errno is set.
1784  */
1785 static int
1786 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1787                                      uint64_t action_flags,
1788                                      const struct rte_flow_action actions[],
1789                                      struct rte_flow_error *error)
1790 {
1791         const struct rte_flow_action *action = actions;
1792         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1793
1794         if (conf->vlan_vid > RTE_BE16(0xFFE))
1795                 return rte_flow_error_set(error, EINVAL,
1796                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1797                                           "VLAN VID value is too big");
1798         /* there is an of_push_vlan action before us */
1799         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
1800                 if (mlx5_flow_find_action(actions + 1,
1801                                           RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1802                         return rte_flow_error_set(error, ENOTSUP,
1803                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
1804                                         "Multiple VLAN VID modifications are "
1805                                         "not supported");
1806                 else
1807                         return 0;
1808         }
1809
1810         /*
1811          * Action is on an existing VLAN header:
1812          *    Need to verify this is a single modify CID action.
1813          *   Rule mast include a match on outer VLAN.
1814          */
1815         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1816                 return rte_flow_error_set(error, ENOTSUP,
1817                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1818                                           "Multiple VLAN VID modifications are "
1819                                           "not supported");
1820         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1821                 return rte_flow_error_set(error, EINVAL,
1822                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1823                                           "match on VLAN is required in order "
1824                                           "to set VLAN VID");
1825         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1826                 return rte_flow_error_set(error, EINVAL,
1827                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1828                                           "wrong action order, port_id should "
1829                                           "be after set VLAN VID");
1830         return 0;
1831 }
1832
1833 /*
1834  * Validate the FLAG action.
1835  *
1836  * @param[in] dev
1837  *   Pointer to the rte_eth_dev structure.
1838  * @param[in] action_flags
1839  *   Holds the actions detected until now.
1840  * @param[in] attr
1841  *   Pointer to flow attributes
1842  * @param[out] error
1843  *   Pointer to error structure.
1844  *
1845  * @return
1846  *   0 on success, a negative errno value otherwise and rte_errno is set.
1847  */
1848 static int
1849 flow_dv_validate_action_flag(struct rte_eth_dev *dev,
1850                              uint64_t action_flags,
1851                              const struct rte_flow_attr *attr,
1852                              struct rte_flow_error *error)
1853 {
1854         struct mlx5_priv *priv = dev->data->dev_private;
1855         struct mlx5_dev_config *config = &priv->config;
1856         int ret;
1857
1858         /* Fall back if no extended metadata register support. */
1859         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1860                 return mlx5_flow_validate_action_flag(action_flags, attr,
1861                                                       error);
1862         /* Extensive metadata mode requires registers. */
1863         if (!mlx5_flow_ext_mreg_supported(dev))
1864                 return rte_flow_error_set(error, ENOTSUP,
1865                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1866                                           "no metadata registers "
1867                                           "to support flag action");
1868         if (!(priv->sh->dv_mark_mask & MLX5_FLOW_MARK_DEFAULT))
1869                 return rte_flow_error_set(error, ENOTSUP,
1870                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1871                                           "extended metadata register"
1872                                           " isn't available");
1873         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1874         if (ret < 0)
1875                 return ret;
1876         assert(ret > 0);
1877         if (action_flags & MLX5_FLOW_ACTION_DROP)
1878                 return rte_flow_error_set(error, EINVAL,
1879                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1880                                           "can't drop and flag in same flow");
1881         if (action_flags & MLX5_FLOW_ACTION_MARK)
1882                 return rte_flow_error_set(error, EINVAL,
1883                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1884                                           "can't mark and flag in same flow");
1885         if (action_flags & MLX5_FLOW_ACTION_FLAG)
1886                 return rte_flow_error_set(error, EINVAL,
1887                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1888                                           "can't have 2 flag"
1889                                           " actions in same flow");
1890         return 0;
1891 }
1892
1893 /**
1894  * Validate MARK action.
1895  *
1896  * @param[in] dev
1897  *   Pointer to the rte_eth_dev structure.
1898  * @param[in] action
1899  *   Pointer to action.
1900  * @param[in] action_flags
1901  *   Holds the actions detected until now.
1902  * @param[in] attr
1903  *   Pointer to flow attributes
1904  * @param[out] error
1905  *   Pointer to error structure.
1906  *
1907  * @return
1908  *   0 on success, a negative errno value otherwise and rte_errno is set.
1909  */
1910 static int
1911 flow_dv_validate_action_mark(struct rte_eth_dev *dev,
1912                              const struct rte_flow_action *action,
1913                              uint64_t action_flags,
1914                              const struct rte_flow_attr *attr,
1915                              struct rte_flow_error *error)
1916 {
1917         struct mlx5_priv *priv = dev->data->dev_private;
1918         struct mlx5_dev_config *config = &priv->config;
1919         const struct rte_flow_action_mark *mark = action->conf;
1920         int ret;
1921
1922         /* Fall back if no extended metadata register support. */
1923         if (config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY)
1924                 return mlx5_flow_validate_action_mark(action, action_flags,
1925                                                       attr, error);
1926         /* Extensive metadata mode requires registers. */
1927         if (!mlx5_flow_ext_mreg_supported(dev))
1928                 return rte_flow_error_set(error, ENOTSUP,
1929                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1930                                           "no metadata registers "
1931                                           "to support mark action");
1932         if (!priv->sh->dv_mark_mask)
1933                 return rte_flow_error_set(error, ENOTSUP,
1934                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1935                                           "extended metadata register"
1936                                           " isn't available");
1937         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
1938         if (ret < 0)
1939                 return ret;
1940         assert(ret > 0);
1941         if (!mark)
1942                 return rte_flow_error_set(error, EINVAL,
1943                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1944                                           "configuration cannot be null");
1945         if (mark->id >= (MLX5_FLOW_MARK_MAX & priv->sh->dv_mark_mask))
1946                 return rte_flow_error_set(error, EINVAL,
1947                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1948                                           &mark->id,
1949                                           "mark id exceeds the limit");
1950         if (action_flags & MLX5_FLOW_ACTION_DROP)
1951                 return rte_flow_error_set(error, EINVAL,
1952                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1953                                           "can't drop and mark in same flow");
1954         if (action_flags & MLX5_FLOW_ACTION_FLAG)
1955                 return rte_flow_error_set(error, EINVAL,
1956                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1957                                           "can't flag and mark in same flow");
1958         if (action_flags & MLX5_FLOW_ACTION_MARK)
1959                 return rte_flow_error_set(error, EINVAL,
1960                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1961                                           "can't have 2 mark actions in same"
1962                                           " flow");
1963         return 0;
1964 }
1965
1966 /**
1967  * Validate SET_META action.
1968  *
1969  * @param[in] dev
1970  *   Pointer to the rte_eth_dev structure.
1971  * @param[in] action
1972  *   Pointer to the encap action.
1973  * @param[in] action_flags
1974  *   Holds the actions detected until now.
1975  * @param[in] attr
1976  *   Pointer to flow attributes
1977  * @param[out] error
1978  *   Pointer to error structure.
1979  *
1980  * @return
1981  *   0 on success, a negative errno value otherwise and rte_errno is set.
1982  */
1983 static int
1984 flow_dv_validate_action_set_meta(struct rte_eth_dev *dev,
1985                                  const struct rte_flow_action *action,
1986                                  uint64_t action_flags __rte_unused,
1987                                  const struct rte_flow_attr *attr,
1988                                  struct rte_flow_error *error)
1989 {
1990         const struct rte_flow_action_set_meta *conf;
1991         uint32_t nic_mask = UINT32_MAX;
1992         enum modify_reg reg;
1993
1994         if (!mlx5_flow_ext_mreg_supported(dev))
1995                 return rte_flow_error_set(error, ENOTSUP,
1996                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1997                                           "extended metadata register"
1998                                           " isn't supported");
1999         reg = flow_dv_get_metadata_reg(dev, attr, error);
2000         if (reg < 0)
2001                 return reg;
2002         if (reg != REG_A && reg != REG_B) {
2003                 struct mlx5_priv *priv = dev->data->dev_private;
2004
2005                 nic_mask = priv->sh->dv_meta_mask;
2006         }
2007         if (!(action->conf))
2008                 return rte_flow_error_set(error, EINVAL,
2009                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2010                                           "configuration cannot be null");
2011         conf = (const struct rte_flow_action_set_meta *)action->conf;
2012         if (!conf->mask)
2013                 return rte_flow_error_set(error, EINVAL,
2014                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2015                                           "zero mask doesn't have any effect");
2016         if (conf->mask & ~nic_mask)
2017                 return rte_flow_error_set(error, EINVAL,
2018                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2019                                           "meta data must be within reg C0");
2020         if (!(conf->data & conf->mask))
2021                 return rte_flow_error_set(error, EINVAL,
2022                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2023                                           "zero value has no effect");
2024         return 0;
2025 }
2026
2027 /**
2028  * Validate SET_TAG action.
2029  *
2030  * @param[in] dev
2031  *   Pointer to the rte_eth_dev structure.
2032  * @param[in] action
2033  *   Pointer to the encap action.
2034  * @param[in] action_flags
2035  *   Holds the actions detected until now.
2036  * @param[in] attr
2037  *   Pointer to flow attributes
2038  * @param[out] error
2039  *   Pointer to error structure.
2040  *
2041  * @return
2042  *   0 on success, a negative errno value otherwise and rte_errno is set.
2043  */
2044 static int
2045 flow_dv_validate_action_set_tag(struct rte_eth_dev *dev,
2046                                 const struct rte_flow_action *action,
2047                                 uint64_t action_flags,
2048                                 const struct rte_flow_attr *attr,
2049                                 struct rte_flow_error *error)
2050 {
2051         const struct rte_flow_action_set_tag *conf;
2052         const uint64_t terminal_action_flags =
2053                 MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE |
2054                 MLX5_FLOW_ACTION_RSS;
2055         int ret;
2056
2057         if (!mlx5_flow_ext_mreg_supported(dev))
2058                 return rte_flow_error_set(error, ENOTSUP,
2059                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2060                                           "extensive metadata register"
2061                                           " isn't supported");
2062         if (!(action->conf))
2063                 return rte_flow_error_set(error, EINVAL,
2064                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2065                                           "configuration cannot be null");
2066         conf = (const struct rte_flow_action_set_tag *)action->conf;
2067         if (!conf->mask)
2068                 return rte_flow_error_set(error, EINVAL,
2069                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2070                                           "zero mask doesn't have any effect");
2071         ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, conf->index, error);
2072         if (ret < 0)
2073                 return ret;
2074         if (!attr->transfer && attr->ingress &&
2075             (action_flags & terminal_action_flags))
2076                 return rte_flow_error_set(error, EINVAL,
2077                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2078                                           "set_tag has no effect"
2079                                           " with terminal actions");
2080         return 0;
2081 }
2082
2083 /**
2084  * Validate count action.
2085  *
2086  * @param[in] dev
2087  *   device otr.
2088  * @param[out] error
2089  *   Pointer to error structure.
2090  *
2091  * @return
2092  *   0 on success, a negative errno value otherwise and rte_errno is set.
2093  */
2094 static int
2095 flow_dv_validate_action_count(struct rte_eth_dev *dev,
2096                               struct rte_flow_error *error)
2097 {
2098         struct mlx5_priv *priv = dev->data->dev_private;
2099
2100         if (!priv->config.devx)
2101                 goto notsup_err;
2102 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
2103         return 0;
2104 #endif
2105 notsup_err:
2106         return rte_flow_error_set
2107                       (error, ENOTSUP,
2108                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2109                        NULL,
2110                        "count action not supported");
2111 }
2112
2113 /**
2114  * Validate the L2 encap action.
2115  *
2116  * @param[in] action_flags
2117  *   Holds the actions detected until now.
2118  * @param[in] action
2119  *   Pointer to the encap action.
2120  * @param[in] attr
2121  *   Pointer to flow attributes
2122  * @param[out] error
2123  *   Pointer to error structure.
2124  *
2125  * @return
2126  *   0 on success, a negative errno value otherwise and rte_errno is set.
2127  */
2128 static int
2129 flow_dv_validate_action_l2_encap(uint64_t action_flags,
2130                                  const struct rte_flow_action *action,
2131                                  const struct rte_flow_attr *attr,
2132                                  struct rte_flow_error *error)
2133 {
2134         if (!(action->conf))
2135                 return rte_flow_error_set(error, EINVAL,
2136                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2137                                           "configuration cannot be null");
2138         if (action_flags & MLX5_FLOW_ACTION_DROP)
2139                 return rte_flow_error_set(error, EINVAL,
2140                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2141                                           "can't drop and encap in same flow");
2142         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
2143                 return rte_flow_error_set(error, EINVAL,
2144                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2145                                           "can only have a single encap or"
2146                                           " decap action in a flow");
2147         if (!attr->transfer && attr->ingress)
2148                 return rte_flow_error_set(error, ENOTSUP,
2149                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2150                                           NULL,
2151                                           "encap action not supported for "
2152                                           "ingress");
2153         return 0;
2154 }
2155
2156 /**
2157  * Validate the L2 decap action.
2158  *
2159  * @param[in] action_flags
2160  *   Holds the actions detected until now.
2161  * @param[in] attr
2162  *   Pointer to flow attributes
2163  * @param[out] error
2164  *   Pointer to error structure.
2165  *
2166  * @return
2167  *   0 on success, a negative errno value otherwise and rte_errno is set.
2168  */
2169 static int
2170 flow_dv_validate_action_l2_decap(uint64_t action_flags,
2171                                  const struct rte_flow_attr *attr,
2172                                  struct rte_flow_error *error)
2173 {
2174         if (action_flags & MLX5_FLOW_ACTION_DROP)
2175                 return rte_flow_error_set(error, EINVAL,
2176                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2177                                           "can't drop and decap in same flow");
2178         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
2179                 return rte_flow_error_set(error, EINVAL,
2180                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2181                                           "can only have a single encap or"
2182                                           " decap action in a flow");
2183         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
2184                 return rte_flow_error_set(error, EINVAL,
2185                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2186                                           "can't have decap action after"
2187                                           " modify action");
2188         if (attr->egress)
2189                 return rte_flow_error_set(error, ENOTSUP,
2190                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2191                                           NULL,
2192                                           "decap action not supported for "
2193                                           "egress");
2194         return 0;
2195 }
2196
2197 /**
2198  * Validate the raw encap action.
2199  *
2200  * @param[in] action_flags
2201  *   Holds the actions detected until now.
2202  * @param[in] action
2203  *   Pointer to the encap action.
2204  * @param[in] attr
2205  *   Pointer to flow attributes
2206  * @param[out] error
2207  *   Pointer to error structure.
2208  *
2209  * @return
2210  *   0 on success, a negative errno value otherwise and rte_errno is set.
2211  */
2212 static int
2213 flow_dv_validate_action_raw_encap(uint64_t action_flags,
2214                                   const struct rte_flow_action *action,
2215                                   const struct rte_flow_attr *attr,
2216                                   struct rte_flow_error *error)
2217 {
2218         const struct rte_flow_action_raw_encap *raw_encap =
2219                 (const struct rte_flow_action_raw_encap *)action->conf;
2220         if (!(action->conf))
2221                 return rte_flow_error_set(error, EINVAL,
2222                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2223                                           "configuration cannot be null");
2224         if (action_flags & MLX5_FLOW_ACTION_DROP)
2225                 return rte_flow_error_set(error, EINVAL,
2226                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2227                                           "can't drop and encap in same flow");
2228         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2229                 return rte_flow_error_set(error, EINVAL,
2230                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2231                                           "can only have a single encap"
2232                                           " action in a flow");
2233         /* encap without preceding decap is not supported for ingress */
2234         if (!attr->transfer &&  attr->ingress &&
2235             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
2236                 return rte_flow_error_set(error, ENOTSUP,
2237                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2238                                           NULL,
2239                                           "encap action not supported for "
2240                                           "ingress");
2241         if (!raw_encap->size || !raw_encap->data)
2242                 return rte_flow_error_set(error, EINVAL,
2243                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
2244                                           "raw encap data cannot be empty");
2245         return 0;
2246 }
2247
2248 /**
2249  * Validate the raw decap action.
2250  *
2251  * @param[in] action_flags
2252  *   Holds the actions detected until now.
2253  * @param[in] action
2254  *   Pointer to the encap action.
2255  * @param[in] attr
2256  *   Pointer to flow attributes
2257  * @param[out] error
2258  *   Pointer to error structure.
2259  *
2260  * @return
2261  *   0 on success, a negative errno value otherwise and rte_errno is set.
2262  */
2263 static int
2264 flow_dv_validate_action_raw_decap(uint64_t action_flags,
2265                                   const struct rte_flow_action *action,
2266                                   const struct rte_flow_attr *attr,
2267                                   struct rte_flow_error *error)
2268 {
2269         const struct rte_flow_action_raw_decap *decap   = action->conf;
2270
2271         if (action_flags & MLX5_FLOW_ACTION_DROP)
2272                 return rte_flow_error_set(error, EINVAL,
2273                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2274                                           "can't drop and decap in same flow");
2275         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2276                 return rte_flow_error_set(error, EINVAL,
2277                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2278                                           "can't have encap action before"
2279                                           " decap action");
2280         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
2281                 return rte_flow_error_set(error, EINVAL,
2282                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2283                                           "can only have a single decap"
2284                                           " action in a flow");
2285         /* decap action is valid on egress only if it is followed by encap */
2286         if (attr->egress && decap &&
2287             decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
2288                 return rte_flow_error_set(error, ENOTSUP,
2289                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2290                                           NULL, "decap action not supported"
2291                                           " for egress");
2292         } else if (decap && decap->size > MLX5_ENCAPSULATION_DECISION_SIZE &&
2293                    (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) {
2294                 return rte_flow_error_set(error, EINVAL,
2295                                           RTE_FLOW_ERROR_TYPE_ACTION,
2296                                           NULL,
2297                                           "can't have decap action "
2298                                           "after modify action");
2299         }
2300         return 0;
2301 }
2302
2303 /**
2304  * Find existing encap/decap resource or create and register a new one.
2305  *
2306  * @param[in, out] dev
2307  *   Pointer to rte_eth_dev structure.
2308  * @param[in, out] resource
2309  *   Pointer to encap/decap resource.
2310  * @parm[in, out] dev_flow
2311  *   Pointer to the dev_flow.
2312  * @param[out] error
2313  *   pointer to error structure.
2314  *
2315  * @return
2316  *   0 on success otherwise -errno and errno is set.
2317  */
2318 static int
2319 flow_dv_encap_decap_resource_register
2320                         (struct rte_eth_dev *dev,
2321                          struct mlx5_flow_dv_encap_decap_resource *resource,
2322                          struct mlx5_flow *dev_flow,
2323                          struct rte_flow_error *error)
2324 {
2325         struct mlx5_priv *priv = dev->data->dev_private;
2326         struct mlx5_ibv_shared *sh = priv->sh;
2327         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
2328         struct mlx5dv_dr_domain *domain;
2329
2330         resource->flags = dev_flow->group ? 0 : 1;
2331         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2332                 domain = sh->fdb_domain;
2333         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2334                 domain = sh->rx_domain;
2335         else
2336                 domain = sh->tx_domain;
2337
2338         /* Lookup a matching resource from cache. */
2339         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
2340                 if (resource->reformat_type == cache_resource->reformat_type &&
2341                     resource->ft_type == cache_resource->ft_type &&
2342                     resource->flags == cache_resource->flags &&
2343                     resource->size == cache_resource->size &&
2344                     !memcmp((const void *)resource->buf,
2345                             (const void *)cache_resource->buf,
2346                             resource->size)) {
2347                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
2348                                 (void *)cache_resource,
2349                                 rte_atomic32_read(&cache_resource->refcnt));
2350                         rte_atomic32_inc(&cache_resource->refcnt);
2351                         dev_flow->dv.encap_decap = cache_resource;
2352                         return 0;
2353                 }
2354         }
2355         /* Register new encap/decap resource. */
2356         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2357         if (!cache_resource)
2358                 return rte_flow_error_set(error, ENOMEM,
2359                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2360                                           "cannot allocate resource memory");
2361         *cache_resource = *resource;
2362         cache_resource->verbs_action =
2363                 mlx5_glue->dv_create_flow_action_packet_reformat
2364                         (sh->ctx, cache_resource->reformat_type,
2365                          cache_resource->ft_type, domain, cache_resource->flags,
2366                          cache_resource->size,
2367                          (cache_resource->size ? cache_resource->buf : NULL));
2368         if (!cache_resource->verbs_action) {
2369                 rte_free(cache_resource);
2370                 return rte_flow_error_set(error, ENOMEM,
2371                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2372                                           NULL, "cannot create action");
2373         }
2374         rte_atomic32_init(&cache_resource->refcnt);
2375         rte_atomic32_inc(&cache_resource->refcnt);
2376         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
2377         dev_flow->dv.encap_decap = cache_resource;
2378         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
2379                 (void *)cache_resource,
2380                 rte_atomic32_read(&cache_resource->refcnt));
2381         return 0;
2382 }
2383
2384 /**
2385  * Find existing table jump resource or create and register a new one.
2386  *
2387  * @param[in, out] dev
2388  *   Pointer to rte_eth_dev structure.
2389  * @param[in, out] tbl
2390  *   Pointer to flow table resource.
2391  * @parm[in, out] dev_flow
2392  *   Pointer to the dev_flow.
2393  * @param[out] error
2394  *   pointer to error structure.
2395  *
2396  * @return
2397  *   0 on success otherwise -errno and errno is set.
2398  */
2399 static int
2400 flow_dv_jump_tbl_resource_register
2401                         (struct rte_eth_dev *dev __rte_unused,
2402                          struct mlx5_flow_tbl_resource *tbl,
2403                          struct mlx5_flow *dev_flow,
2404                          struct rte_flow_error *error)
2405 {
2406         struct mlx5_flow_tbl_data_entry *tbl_data =
2407                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
2408         int cnt;
2409
2410         assert(tbl);
2411         cnt = rte_atomic32_read(&tbl_data->jump.refcnt);
2412         if (!cnt) {
2413                 tbl_data->jump.action =
2414                         mlx5_glue->dr_create_flow_action_dest_flow_tbl
2415                         (tbl->obj);
2416                 if (!tbl_data->jump.action)
2417                         return rte_flow_error_set(error, ENOMEM,
2418                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2419                                         NULL, "cannot create jump action");
2420                 DRV_LOG(DEBUG, "new jump table resource %p: refcnt %d++",
2421                         (void *)&tbl_data->jump, cnt);
2422         } else {
2423                 assert(tbl_data->jump.action);
2424                 DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++",
2425                         (void *)&tbl_data->jump, cnt);
2426         }
2427         rte_atomic32_inc(&tbl_data->jump.refcnt);
2428         dev_flow->dv.jump = &tbl_data->jump;
2429         return 0;
2430 }
2431
2432 /**
2433  * Find existing table port ID resource or create and register a new one.
2434  *
2435  * @param[in, out] dev
2436  *   Pointer to rte_eth_dev structure.
2437  * @param[in, out] resource
2438  *   Pointer to port ID action resource.
2439  * @parm[in, out] dev_flow
2440  *   Pointer to the dev_flow.
2441  * @param[out] error
2442  *   pointer to error structure.
2443  *
2444  * @return
2445  *   0 on success otherwise -errno and errno is set.
2446  */
2447 static int
2448 flow_dv_port_id_action_resource_register
2449                         (struct rte_eth_dev *dev,
2450                          struct mlx5_flow_dv_port_id_action_resource *resource,
2451                          struct mlx5_flow *dev_flow,
2452                          struct rte_flow_error *error)
2453 {
2454         struct mlx5_priv *priv = dev->data->dev_private;
2455         struct mlx5_ibv_shared *sh = priv->sh;
2456         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
2457
2458         /* Lookup a matching resource from cache. */
2459         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
2460                 if (resource->port_id == cache_resource->port_id) {
2461                         DRV_LOG(DEBUG, "port id action resource resource %p: "
2462                                 "refcnt %d++",
2463                                 (void *)cache_resource,
2464                                 rte_atomic32_read(&cache_resource->refcnt));
2465                         rte_atomic32_inc(&cache_resource->refcnt);
2466                         dev_flow->dv.port_id_action = cache_resource;
2467                         return 0;
2468                 }
2469         }
2470         /* Register new port id action resource. */
2471         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2472         if (!cache_resource)
2473                 return rte_flow_error_set(error, ENOMEM,
2474                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2475                                           "cannot allocate resource memory");
2476         *cache_resource = *resource;
2477         /*
2478          * Depending on rdma_core version the glue routine calls
2479          * either mlx5dv_dr_action_create_dest_ib_port(domain, ibv_port)
2480          * or mlx5dv_dr_action_create_dest_vport(domain, vport_id).
2481          */
2482         cache_resource->action =
2483                 mlx5_glue->dr_create_flow_action_dest_port
2484                         (priv->sh->fdb_domain, resource->port_id);
2485         if (!cache_resource->action) {
2486                 rte_free(cache_resource);
2487                 return rte_flow_error_set(error, ENOMEM,
2488                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2489                                           NULL, "cannot create action");
2490         }
2491         rte_atomic32_init(&cache_resource->refcnt);
2492         rte_atomic32_inc(&cache_resource->refcnt);
2493         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
2494         dev_flow->dv.port_id_action = cache_resource;
2495         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
2496                 (void *)cache_resource,
2497                 rte_atomic32_read(&cache_resource->refcnt));
2498         return 0;
2499 }
2500
2501 /**
2502  * Find existing push vlan resource or create and register a new one.
2503  *
2504  * @param [in, out] dev
2505  *   Pointer to rte_eth_dev structure.
2506  * @param[in, out] resource
2507  *   Pointer to port ID action resource.
2508  * @parm[in, out] dev_flow
2509  *   Pointer to the dev_flow.
2510  * @param[out] error
2511  *   pointer to error structure.
2512  *
2513  * @return
2514  *   0 on success otherwise -errno and errno is set.
2515  */
2516 static int
2517 flow_dv_push_vlan_action_resource_register
2518                        (struct rte_eth_dev *dev,
2519                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
2520                         struct mlx5_flow *dev_flow,
2521                         struct rte_flow_error *error)
2522 {
2523         struct mlx5_priv *priv = dev->data->dev_private;
2524         struct mlx5_ibv_shared *sh = priv->sh;
2525         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
2526         struct mlx5dv_dr_domain *domain;
2527
2528         /* Lookup a matching resource from cache. */
2529         LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
2530                 if (resource->vlan_tag == cache_resource->vlan_tag &&
2531                     resource->ft_type == cache_resource->ft_type) {
2532                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
2533                                 "refcnt %d++",
2534                                 (void *)cache_resource,
2535                                 rte_atomic32_read(&cache_resource->refcnt));
2536                         rte_atomic32_inc(&cache_resource->refcnt);
2537                         dev_flow->dv.push_vlan_res = cache_resource;
2538                         return 0;
2539                 }
2540         }
2541         /* Register new push_vlan action resource. */
2542         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2543         if (!cache_resource)
2544                 return rte_flow_error_set(error, ENOMEM,
2545                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2546                                           "cannot allocate resource memory");
2547         *cache_resource = *resource;
2548         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2549                 domain = sh->fdb_domain;
2550         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
2551                 domain = sh->rx_domain;
2552         else
2553                 domain = sh->tx_domain;
2554         cache_resource->action =
2555                 mlx5_glue->dr_create_flow_action_push_vlan(domain,
2556                                                            resource->vlan_tag);
2557         if (!cache_resource->action) {
2558                 rte_free(cache_resource);
2559                 return rte_flow_error_set(error, ENOMEM,
2560                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2561                                           NULL, "cannot create action");
2562         }
2563         rte_atomic32_init(&cache_resource->refcnt);
2564         rte_atomic32_inc(&cache_resource->refcnt);
2565         LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
2566         dev_flow->dv.push_vlan_res = cache_resource;
2567         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
2568                 (void *)cache_resource,
2569                 rte_atomic32_read(&cache_resource->refcnt));
2570         return 0;
2571 }
2572 /**
2573  * Get the size of specific rte_flow_item_type
2574  *
2575  * @param[in] item_type
2576  *   Tested rte_flow_item_type.
2577  *
2578  * @return
2579  *   sizeof struct item_type, 0 if void or irrelevant.
2580  */
2581 static size_t
2582 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
2583 {
2584         size_t retval;
2585
2586         switch (item_type) {
2587         case RTE_FLOW_ITEM_TYPE_ETH:
2588                 retval = sizeof(struct rte_flow_item_eth);
2589                 break;
2590         case RTE_FLOW_ITEM_TYPE_VLAN:
2591                 retval = sizeof(struct rte_flow_item_vlan);
2592                 break;
2593         case RTE_FLOW_ITEM_TYPE_IPV4:
2594                 retval = sizeof(struct rte_flow_item_ipv4);
2595                 break;
2596         case RTE_FLOW_ITEM_TYPE_IPV6:
2597                 retval = sizeof(struct rte_flow_item_ipv6);
2598                 break;
2599         case RTE_FLOW_ITEM_TYPE_UDP:
2600                 retval = sizeof(struct rte_flow_item_udp);
2601                 break;
2602         case RTE_FLOW_ITEM_TYPE_TCP:
2603                 retval = sizeof(struct rte_flow_item_tcp);
2604                 break;
2605         case RTE_FLOW_ITEM_TYPE_VXLAN:
2606                 retval = sizeof(struct rte_flow_item_vxlan);
2607                 break;
2608         case RTE_FLOW_ITEM_TYPE_GRE:
2609                 retval = sizeof(struct rte_flow_item_gre);
2610                 break;
2611         case RTE_FLOW_ITEM_TYPE_NVGRE:
2612                 retval = sizeof(struct rte_flow_item_nvgre);
2613                 break;
2614         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2615                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
2616                 break;
2617         case RTE_FLOW_ITEM_TYPE_MPLS:
2618                 retval = sizeof(struct rte_flow_item_mpls);
2619                 break;
2620         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
2621         default:
2622                 retval = 0;
2623                 break;
2624         }
2625         return retval;
2626 }
2627
2628 #define MLX5_ENCAP_IPV4_VERSION         0x40
2629 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
2630 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
2631 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
2632 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
2633 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
2634 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
2635
2636 /**
2637  * Convert the encap action data from list of rte_flow_item to raw buffer
2638  *
2639  * @param[in] items
2640  *   Pointer to rte_flow_item objects list.
2641  * @param[out] buf
2642  *   Pointer to the output buffer.
2643  * @param[out] size
2644  *   Pointer to the output buffer size.
2645  * @param[out] error
2646  *   Pointer to the error structure.
2647  *
2648  * @return
2649  *   0 on success, a negative errno value otherwise and rte_errno is set.
2650  */
2651 static int
2652 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2653                            size_t *size, struct rte_flow_error *error)
2654 {
2655         struct rte_ether_hdr *eth = NULL;
2656         struct rte_vlan_hdr *vlan = NULL;
2657         struct rte_ipv4_hdr *ipv4 = NULL;
2658         struct rte_ipv6_hdr *ipv6 = NULL;
2659         struct rte_udp_hdr *udp = NULL;
2660         struct rte_vxlan_hdr *vxlan = NULL;
2661         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
2662         struct rte_gre_hdr *gre = NULL;
2663         size_t len;
2664         size_t temp_size = 0;
2665
2666         if (!items)
2667                 return rte_flow_error_set(error, EINVAL,
2668                                           RTE_FLOW_ERROR_TYPE_ACTION,
2669                                           NULL, "invalid empty data");
2670         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2671                 len = flow_dv_get_item_len(items->type);
2672                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
2673                         return rte_flow_error_set(error, EINVAL,
2674                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2675                                                   (void *)items->type,
2676                                                   "items total size is too big"
2677                                                   " for encap action");
2678                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
2679                 switch (items->type) {
2680                 case RTE_FLOW_ITEM_TYPE_ETH:
2681                         eth = (struct rte_ether_hdr *)&buf[temp_size];
2682                         break;
2683                 case RTE_FLOW_ITEM_TYPE_VLAN:
2684                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
2685                         if (!eth)
2686                                 return rte_flow_error_set(error, EINVAL,
2687                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2688                                                 (void *)items->type,
2689                                                 "eth header not found");
2690                         if (!eth->ether_type)
2691                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
2692                         break;
2693                 case RTE_FLOW_ITEM_TYPE_IPV4:
2694                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
2695                         if (!vlan && !eth)
2696                                 return rte_flow_error_set(error, EINVAL,
2697                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2698                                                 (void *)items->type,
2699                                                 "neither eth nor vlan"
2700                                                 " header found");
2701                         if (vlan && !vlan->eth_proto)
2702                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2703                         else if (eth && !eth->ether_type)
2704                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
2705                         if (!ipv4->version_ihl)
2706                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
2707                                                     MLX5_ENCAP_IPV4_IHL_MIN;
2708                         if (!ipv4->time_to_live)
2709                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
2710                         break;
2711                 case RTE_FLOW_ITEM_TYPE_IPV6:
2712                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
2713                         if (!vlan && !eth)
2714                                 return rte_flow_error_set(error, EINVAL,
2715                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2716                                                 (void *)items->type,
2717                                                 "neither eth nor vlan"
2718                                                 " header found");
2719                         if (vlan && !vlan->eth_proto)
2720                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2721                         else if (eth && !eth->ether_type)
2722                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
2723                         if (!ipv6->vtc_flow)
2724                                 ipv6->vtc_flow =
2725                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
2726                         if (!ipv6->hop_limits)
2727                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
2728                         break;
2729                 case RTE_FLOW_ITEM_TYPE_UDP:
2730                         udp = (struct rte_udp_hdr *)&buf[temp_size];
2731                         if (!ipv4 && !ipv6)
2732                                 return rte_flow_error_set(error, EINVAL,
2733                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2734                                                 (void *)items->type,
2735                                                 "ip header not found");
2736                         if (ipv4 && !ipv4->next_proto_id)
2737                                 ipv4->next_proto_id = IPPROTO_UDP;
2738                         else if (ipv6 && !ipv6->proto)
2739                                 ipv6->proto = IPPROTO_UDP;
2740                         break;
2741                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2742                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
2743                         if (!udp)
2744                                 return rte_flow_error_set(error, EINVAL,
2745                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2746                                                 (void *)items->type,
2747                                                 "udp header not found");
2748                         if (!udp->dst_port)
2749                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
2750                         if (!vxlan->vx_flags)
2751                                 vxlan->vx_flags =
2752                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
2753                         break;
2754                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2755                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
2756                         if (!udp)
2757                                 return rte_flow_error_set(error, EINVAL,
2758                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2759                                                 (void *)items->type,
2760                                                 "udp header not found");
2761                         if (!vxlan_gpe->proto)
2762                                 return rte_flow_error_set(error, EINVAL,
2763                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2764                                                 (void *)items->type,
2765                                                 "next protocol not found");
2766                         if (!udp->dst_port)
2767                                 udp->dst_port =
2768                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
2769                         if (!vxlan_gpe->vx_flags)
2770                                 vxlan_gpe->vx_flags =
2771                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
2772                         break;
2773                 case RTE_FLOW_ITEM_TYPE_GRE:
2774                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2775                         gre = (struct rte_gre_hdr *)&buf[temp_size];
2776                         if (!gre->proto)
2777                                 return rte_flow_error_set(error, EINVAL,
2778                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2779                                                 (void *)items->type,
2780                                                 "next protocol not found");
2781                         if (!ipv4 && !ipv6)
2782                                 return rte_flow_error_set(error, EINVAL,
2783                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2784                                                 (void *)items->type,
2785                                                 "ip header not found");
2786                         if (ipv4 && !ipv4->next_proto_id)
2787                                 ipv4->next_proto_id = IPPROTO_GRE;
2788                         else if (ipv6 && !ipv6->proto)
2789                                 ipv6->proto = IPPROTO_GRE;
2790                         break;
2791                 case RTE_FLOW_ITEM_TYPE_VOID:
2792                         break;
2793                 default:
2794                         return rte_flow_error_set(error, EINVAL,
2795                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2796                                                   (void *)items->type,
2797                                                   "unsupported item type");
2798                         break;
2799                 }
2800                 temp_size += len;
2801         }
2802         *size = temp_size;
2803         return 0;
2804 }
2805
2806 static int
2807 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
2808 {
2809         struct rte_ether_hdr *eth = NULL;
2810         struct rte_vlan_hdr *vlan = NULL;
2811         struct rte_ipv6_hdr *ipv6 = NULL;
2812         struct rte_udp_hdr *udp = NULL;
2813         char *next_hdr;
2814         uint16_t proto;
2815
2816         eth = (struct rte_ether_hdr *)data;
2817         next_hdr = (char *)(eth + 1);
2818         proto = RTE_BE16(eth->ether_type);
2819
2820         /* VLAN skipping */
2821         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
2822                 vlan = (struct rte_vlan_hdr *)next_hdr;
2823                 proto = RTE_BE16(vlan->eth_proto);
2824                 next_hdr += sizeof(struct rte_vlan_hdr);
2825         }
2826
2827         /* HW calculates IPv4 csum. no need to proceed */
2828         if (proto == RTE_ETHER_TYPE_IPV4)
2829                 return 0;
2830
2831         /* non IPv4/IPv6 header. not supported */
2832         if (proto != RTE_ETHER_TYPE_IPV6) {
2833                 return rte_flow_error_set(error, ENOTSUP,
2834                                           RTE_FLOW_ERROR_TYPE_ACTION,
2835                                           NULL, "Cannot offload non IPv4/IPv6");
2836         }
2837
2838         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
2839
2840         /* ignore non UDP */
2841         if (ipv6->proto != IPPROTO_UDP)
2842                 return 0;
2843
2844         udp = (struct rte_udp_hdr *)(ipv6 + 1);
2845         udp->dgram_cksum = 0;
2846
2847         return 0;
2848 }
2849
2850 /**
2851  * Convert L2 encap action to DV specification.
2852  *
2853  * @param[in] dev
2854  *   Pointer to rte_eth_dev structure.
2855  * @param[in] action
2856  *   Pointer to action structure.
2857  * @param[in, out] dev_flow
2858  *   Pointer to the mlx5_flow.
2859  * @param[in] transfer
2860  *   Mark if the flow is E-Switch flow.
2861  * @param[out] error
2862  *   Pointer to the error structure.
2863  *
2864  * @return
2865  *   0 on success, a negative errno value otherwise and rte_errno is set.
2866  */
2867 static int
2868 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2869                                const struct rte_flow_action *action,
2870                                struct mlx5_flow *dev_flow,
2871                                uint8_t transfer,
2872                                struct rte_flow_error *error)
2873 {
2874         const struct rte_flow_item *encap_data;
2875         const struct rte_flow_action_raw_encap *raw_encap_data;
2876         struct mlx5_flow_dv_encap_decap_resource res = {
2877                 .reformat_type =
2878                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2879                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2880                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2881         };
2882
2883         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2884                 raw_encap_data =
2885                         (const struct rte_flow_action_raw_encap *)action->conf;
2886                 res.size = raw_encap_data->size;
2887                 memcpy(res.buf, raw_encap_data->data, res.size);
2888                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
2889                         return -rte_errno;
2890         } else {
2891                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2892                         encap_data =
2893                                 ((const struct rte_flow_action_vxlan_encap *)
2894                                                 action->conf)->definition;
2895                 else
2896                         encap_data =
2897                                 ((const struct rte_flow_action_nvgre_encap *)
2898                                                 action->conf)->definition;
2899                 if (flow_dv_convert_encap_data(encap_data, res.buf,
2900                                                &res.size, error))
2901                         return -rte_errno;
2902         }
2903         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2904                 return rte_flow_error_set(error, EINVAL,
2905                                           RTE_FLOW_ERROR_TYPE_ACTION,
2906                                           NULL, "can't create L2 encap action");
2907         return 0;
2908 }
2909
2910 /**
2911  * Convert L2 decap action to DV specification.
2912  *
2913  * @param[in] dev
2914  *   Pointer to rte_eth_dev structure.
2915  * @param[in, out] dev_flow
2916  *   Pointer to the mlx5_flow.
2917  * @param[in] transfer
2918  *   Mark if the flow is E-Switch flow.
2919  * @param[out] error
2920  *   Pointer to the error structure.
2921  *
2922  * @return
2923  *   0 on success, a negative errno value otherwise and rte_errno is set.
2924  */
2925 static int
2926 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
2927                                struct mlx5_flow *dev_flow,
2928                                uint8_t transfer,
2929                                struct rte_flow_error *error)
2930 {
2931         struct mlx5_flow_dv_encap_decap_resource res = {
2932                 .size = 0,
2933                 .reformat_type =
2934                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
2935                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2936                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
2937         };
2938
2939         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2940                 return rte_flow_error_set(error, EINVAL,
2941                                           RTE_FLOW_ERROR_TYPE_ACTION,
2942                                           NULL, "can't create L2 decap action");
2943         return 0;
2944 }
2945
2946 /**
2947  * Convert raw decap/encap (L3 tunnel) action to DV specification.
2948  *
2949  * @param[in] dev
2950  *   Pointer to rte_eth_dev structure.
2951  * @param[in] action
2952  *   Pointer to action structure.
2953  * @param[in, out] dev_flow
2954  *   Pointer to the mlx5_flow.
2955  * @param[in] attr
2956  *   Pointer to the flow attributes.
2957  * @param[out] error
2958  *   Pointer to the error structure.
2959  *
2960  * @return
2961  *   0 on success, a negative errno value otherwise and rte_errno is set.
2962  */
2963 static int
2964 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
2965                                 const struct rte_flow_action *action,
2966                                 struct mlx5_flow *dev_flow,
2967                                 const struct rte_flow_attr *attr,
2968                                 struct rte_flow_error *error)
2969 {
2970         const struct rte_flow_action_raw_encap *encap_data;
2971         struct mlx5_flow_dv_encap_decap_resource res;
2972
2973         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
2974         res.size = encap_data->size;
2975         memcpy(res.buf, encap_data->data, res.size);
2976         res.reformat_type = res.size < MLX5_ENCAPSULATION_DECISION_SIZE ?
2977                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 :
2978                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
2979         if (attr->transfer)
2980                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2981         else
2982                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2983                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2984         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2985                 return rte_flow_error_set(error, EINVAL,
2986                                           RTE_FLOW_ERROR_TYPE_ACTION,
2987                                           NULL, "can't create encap action");
2988         return 0;
2989 }
2990
2991 /**
2992  * Create action push VLAN.
2993  *
2994  * @param[in] dev
2995  *   Pointer to rte_eth_dev structure.
2996  * @param[in] vlan_tag
2997  *   the vlan tag to push to the Ethernet header.
2998  * @param[in, out] dev_flow
2999  *   Pointer to the mlx5_flow.
3000  * @param[in] attr
3001  *   Pointer to the flow attributes.
3002  * @param[out] error
3003  *   Pointer to the error structure.
3004  *
3005  * @return
3006  *   0 on success, a negative errno value otherwise and rte_errno is set.
3007  */
3008 static int
3009 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
3010                                 const struct rte_flow_attr *attr,
3011                                 const struct rte_vlan_hdr *vlan,
3012                                 struct mlx5_flow *dev_flow,
3013                                 struct rte_flow_error *error)
3014 {
3015         struct mlx5_flow_dv_push_vlan_action_resource res;
3016
3017         res.vlan_tag =
3018                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
3019                                  vlan->vlan_tci);
3020         if (attr->transfer)
3021                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3022         else
3023                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3024                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
3025         return flow_dv_push_vlan_action_resource_register
3026                                             (dev, &res, dev_flow, error);
3027 }
3028
3029 /**
3030  * Validate the modify-header actions.
3031  *
3032  * @param[in] action_flags
3033  *   Holds the actions detected until now.
3034  * @param[in] action
3035  *   Pointer to the modify action.
3036  * @param[out] error
3037  *   Pointer to error structure.
3038  *
3039  * @return
3040  *   0 on success, a negative errno value otherwise and rte_errno is set.
3041  */
3042 static int
3043 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
3044                                    const struct rte_flow_action *action,
3045                                    struct rte_flow_error *error)
3046 {
3047         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
3048                 return rte_flow_error_set(error, EINVAL,
3049                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3050                                           NULL, "action configuration not set");
3051         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
3052                 return rte_flow_error_set(error, EINVAL,
3053                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3054                                           "can't have encap action before"
3055                                           " modify action");
3056         return 0;
3057 }
3058
3059 /**
3060  * Validate the modify-header MAC address actions.
3061  *
3062  * @param[in] action_flags
3063  *   Holds the actions detected until now.
3064  * @param[in] action
3065  *   Pointer to the modify action.
3066  * @param[in] item_flags
3067  *   Holds the items detected.
3068  * @param[out] error
3069  *   Pointer to error structure.
3070  *
3071  * @return
3072  *   0 on success, a negative errno value otherwise and rte_errno is set.
3073  */
3074 static int
3075 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
3076                                    const struct rte_flow_action *action,
3077                                    const uint64_t item_flags,
3078                                    struct rte_flow_error *error)
3079 {
3080         int ret = 0;
3081
3082         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3083         if (!ret) {
3084                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
3085                         return rte_flow_error_set(error, EINVAL,
3086                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3087                                                   NULL,
3088                                                   "no L2 item in pattern");
3089         }
3090         return ret;
3091 }
3092
3093 /**
3094  * Validate the modify-header IPv4 address actions.
3095  *
3096  * @param[in] action_flags
3097  *   Holds the actions detected until now.
3098  * @param[in] action
3099  *   Pointer to the modify action.
3100  * @param[in] item_flags
3101  *   Holds the items detected.
3102  * @param[out] error
3103  *   Pointer to error structure.
3104  *
3105  * @return
3106  *   0 on success, a negative errno value otherwise and rte_errno is set.
3107  */
3108 static int
3109 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
3110                                     const struct rte_flow_action *action,
3111                                     const uint64_t item_flags,
3112                                     struct rte_flow_error *error)
3113 {
3114         int ret = 0;
3115
3116         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3117         if (!ret) {
3118                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3119                         return rte_flow_error_set(error, EINVAL,
3120                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3121                                                   NULL,
3122                                                   "no ipv4 item in pattern");
3123         }
3124         return ret;
3125 }
3126
3127 /**
3128  * Validate the modify-header IPv6 address actions.
3129  *
3130  * @param[in] action_flags
3131  *   Holds the actions detected until now.
3132  * @param[in] action
3133  *   Pointer to the modify action.
3134  * @param[in] item_flags
3135  *   Holds the items detected.
3136  * @param[out] error
3137  *   Pointer to error structure.
3138  *
3139  * @return
3140  *   0 on success, a negative errno value otherwise and rte_errno is set.
3141  */
3142 static int
3143 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
3144                                     const struct rte_flow_action *action,
3145                                     const uint64_t item_flags,
3146                                     struct rte_flow_error *error)
3147 {
3148         int ret = 0;
3149
3150         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3151         if (!ret) {
3152                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3153                         return rte_flow_error_set(error, EINVAL,
3154                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3155                                                   NULL,
3156                                                   "no ipv6 item in pattern");
3157         }
3158         return ret;
3159 }
3160
3161 /**
3162  * Validate the modify-header TP actions.
3163  *
3164  * @param[in] action_flags
3165  *   Holds the actions detected until now.
3166  * @param[in] action
3167  *   Pointer to the modify action.
3168  * @param[in] item_flags
3169  *   Holds the items detected.
3170  * @param[out] error
3171  *   Pointer to error structure.
3172  *
3173  * @return
3174  *   0 on success, a negative errno value otherwise and rte_errno is set.
3175  */
3176 static int
3177 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
3178                                   const struct rte_flow_action *action,
3179                                   const uint64_t item_flags,
3180                                   struct rte_flow_error *error)
3181 {
3182         int ret = 0;
3183
3184         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3185         if (!ret) {
3186                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
3187                         return rte_flow_error_set(error, EINVAL,
3188                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3189                                                   NULL, "no transport layer "
3190                                                   "in pattern");
3191         }
3192         return ret;
3193 }
3194
3195 /**
3196  * Validate the modify-header actions of increment/decrement
3197  * TCP Sequence-number.
3198  *
3199  * @param[in] action_flags
3200  *   Holds the actions detected until now.
3201  * @param[in] action
3202  *   Pointer to the modify action.
3203  * @param[in] item_flags
3204  *   Holds the items detected.
3205  * @param[out] error
3206  *   Pointer to error structure.
3207  *
3208  * @return
3209  *   0 on success, a negative errno value otherwise and rte_errno is set.
3210  */
3211 static int
3212 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
3213                                        const struct rte_flow_action *action,
3214                                        const uint64_t item_flags,
3215                                        struct rte_flow_error *error)
3216 {
3217         int ret = 0;
3218
3219         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3220         if (!ret) {
3221                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3222                         return rte_flow_error_set(error, EINVAL,
3223                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3224                                                   NULL, "no TCP item in"
3225                                                   " pattern");
3226                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
3227                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
3228                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
3229                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
3230                         return rte_flow_error_set(error, EINVAL,
3231                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3232                                                   NULL,
3233                                                   "cannot decrease and increase"
3234                                                   " TCP sequence number"
3235                                                   " at the same time");
3236         }
3237         return ret;
3238 }
3239
3240 /**
3241  * Validate the modify-header actions of increment/decrement
3242  * TCP Acknowledgment number.
3243  *
3244  * @param[in] action_flags
3245  *   Holds the actions detected until now.
3246  * @param[in] action
3247  *   Pointer to the modify action.
3248  * @param[in] item_flags
3249  *   Holds the items detected.
3250  * @param[out] error
3251  *   Pointer to error structure.
3252  *
3253  * @return
3254  *   0 on success, a negative errno value otherwise and rte_errno is set.
3255  */
3256 static int
3257 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
3258                                        const struct rte_flow_action *action,
3259                                        const uint64_t item_flags,
3260                                        struct rte_flow_error *error)
3261 {
3262         int ret = 0;
3263
3264         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3265         if (!ret) {
3266                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
3267                         return rte_flow_error_set(error, EINVAL,
3268                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3269                                                   NULL, "no TCP item in"
3270                                                   " pattern");
3271                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
3272                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
3273                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
3274                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
3275                         return rte_flow_error_set(error, EINVAL,
3276                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3277                                                   NULL,
3278                                                   "cannot decrease and increase"
3279                                                   " TCP acknowledgment number"
3280                                                   " at the same time");
3281         }
3282         return ret;
3283 }
3284
3285 /**
3286  * Validate the modify-header TTL actions.
3287  *
3288  * @param[in] action_flags
3289  *   Holds the actions detected until now.
3290  * @param[in] action
3291  *   Pointer to the modify action.
3292  * @param[in] item_flags
3293  *   Holds the items detected.
3294  * @param[out] error
3295  *   Pointer to error structure.
3296  *
3297  * @return
3298  *   0 on success, a negative errno value otherwise and rte_errno is set.
3299  */
3300 static int
3301 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
3302                                    const struct rte_flow_action *action,
3303                                    const uint64_t item_flags,
3304                                    struct rte_flow_error *error)
3305 {
3306         int ret = 0;
3307
3308         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3309         if (!ret) {
3310                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
3311                         return rte_flow_error_set(error, EINVAL,
3312                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3313                                                   NULL,
3314                                                   "no IP protocol in pattern");
3315         }
3316         return ret;
3317 }
3318
3319 /**
3320  * Validate jump action.
3321  *
3322  * @param[in] action
3323  *   Pointer to the jump action.
3324  * @param[in] action_flags
3325  *   Holds the actions detected until now.
3326  * @param[in] attributes
3327  *   Pointer to flow attributes
3328  * @param[in] external
3329  *   Action belongs to flow rule created by request external to PMD.
3330  * @param[out] error
3331  *   Pointer to error structure.
3332  *
3333  * @return
3334  *   0 on success, a negative errno value otherwise and rte_errno is set.
3335  */
3336 static int
3337 flow_dv_validate_action_jump(const struct rte_flow_action *action,
3338                              uint64_t action_flags,
3339                              const struct rte_flow_attr *attributes,
3340                              bool external, struct rte_flow_error *error)
3341 {
3342         uint32_t target_group, table;
3343         int ret = 0;
3344
3345         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3346                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3347                 return rte_flow_error_set(error, EINVAL,
3348                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3349                                           "can't have 2 fate actions in"
3350                                           " same flow");
3351         if (action_flags & MLX5_FLOW_ACTION_METER)
3352                 return rte_flow_error_set(error, ENOTSUP,
3353                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3354                                           "jump with meter not support");
3355         if (!action->conf)
3356                 return rte_flow_error_set(error, EINVAL,
3357                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3358                                           NULL, "action configuration not set");
3359         target_group =
3360                 ((const struct rte_flow_action_jump *)action->conf)->group;
3361         ret = mlx5_flow_group_to_table(attributes, external, target_group,
3362                                        &table, error);
3363         if (ret)
3364                 return ret;
3365         if (attributes->group == target_group)
3366                 return rte_flow_error_set(error, EINVAL,
3367                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3368                                           "target group must be other than"
3369                                           " the current flow group");
3370         return 0;
3371 }
3372
3373 /*
3374  * Validate the port_id action.
3375  *
3376  * @param[in] dev
3377  *   Pointer to rte_eth_dev structure.
3378  * @param[in] action_flags
3379  *   Bit-fields that holds the actions detected until now.
3380  * @param[in] action
3381  *   Port_id RTE action structure.
3382  * @param[in] attr
3383  *   Attributes of flow that includes this action.
3384  * @param[out] error
3385  *   Pointer to error structure.
3386  *
3387  * @return
3388  *   0 on success, a negative errno value otherwise and rte_errno is set.
3389  */
3390 static int
3391 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
3392                                 uint64_t action_flags,
3393                                 const struct rte_flow_action *action,
3394                                 const struct rte_flow_attr *attr,
3395                                 struct rte_flow_error *error)
3396 {
3397         const struct rte_flow_action_port_id *port_id;
3398         struct mlx5_priv *act_priv;
3399         struct mlx5_priv *dev_priv;
3400         uint16_t port;
3401
3402         if (!attr->transfer)
3403                 return rte_flow_error_set(error, ENOTSUP,
3404                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3405                                           NULL,
3406                                           "port id action is valid in transfer"
3407                                           " mode only");
3408         if (!action || !action->conf)
3409                 return rte_flow_error_set(error, ENOTSUP,
3410                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
3411                                           NULL,
3412                                           "port id action parameters must be"
3413                                           " specified");
3414         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
3415                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3416                 return rte_flow_error_set(error, EINVAL,
3417                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3418                                           "can have only one fate actions in"
3419                                           " a flow");
3420         dev_priv = mlx5_dev_to_eswitch_info(dev);
3421         if (!dev_priv)
3422                 return rte_flow_error_set(error, rte_errno,
3423                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3424                                           NULL,
3425                                           "failed to obtain E-Switch info");
3426         port_id = action->conf;
3427         port = port_id->original ? dev->data->port_id : port_id->id;
3428         act_priv = mlx5_port_to_eswitch_info(port, false);
3429         if (!act_priv)
3430                 return rte_flow_error_set
3431                                 (error, rte_errno,
3432                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
3433                                  "failed to obtain E-Switch port id for port");
3434         if (act_priv->domain_id != dev_priv->domain_id)
3435                 return rte_flow_error_set
3436                                 (error, EINVAL,
3437                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3438                                  "port does not belong to"
3439                                  " E-Switch being configured");
3440         return 0;
3441 }
3442
3443 /**
3444  * Get the maximum number of modify header actions.
3445  *
3446  * @param dev
3447  *   Pointer to rte_eth_dev structure.
3448  *
3449  * @return
3450  *   Max number of modify header actions device can support.
3451  */
3452 static unsigned int
3453 flow_dv_modify_hdr_action_max(struct rte_eth_dev *dev)
3454 {
3455         /*
3456          * There's no way to directly query the max cap. Although it has to be
3457          * acquried by iterative trial, it is a safe assumption that more
3458          * actions are supported by FW if extensive metadata register is
3459          * supported.
3460          */
3461         return mlx5_flow_ext_mreg_supported(dev) ? MLX5_MODIFY_NUM :
3462                                                    MLX5_MODIFY_NUM_NO_MREG;
3463 }
3464
3465 /**
3466  * Validate the meter action.
3467  *
3468  * @param[in] dev
3469  *   Pointer to rte_eth_dev structure.
3470  * @param[in] action_flags
3471  *   Bit-fields that holds the actions detected until now.
3472  * @param[in] action
3473  *   Pointer to the meter action.
3474  * @param[in] attr
3475  *   Attributes of flow that includes this action.
3476  * @param[out] error
3477  *   Pointer to error structure.
3478  *
3479  * @return
3480  *   0 on success, a negative errno value otherwise and rte_ernno is set.
3481  */
3482 static int
3483 mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
3484                                 uint64_t action_flags,
3485                                 const struct rte_flow_action *action,
3486                                 const struct rte_flow_attr *attr,
3487                                 struct rte_flow_error *error)
3488 {
3489         struct mlx5_priv *priv = dev->data->dev_private;
3490         const struct rte_flow_action_meter *am = action->conf;
3491         struct mlx5_flow_meter *fm;
3492
3493         if (!am)
3494                 return rte_flow_error_set(error, EINVAL,
3495                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3496                                           "meter action conf is NULL");
3497
3498         if (action_flags & MLX5_FLOW_ACTION_METER)
3499                 return rte_flow_error_set(error, ENOTSUP,
3500                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3501                                           "meter chaining not support");
3502         if (action_flags & MLX5_FLOW_ACTION_JUMP)
3503                 return rte_flow_error_set(error, ENOTSUP,
3504                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3505                                           "meter with jump not support");
3506         if (!priv->mtr_en)
3507                 return rte_flow_error_set(error, ENOTSUP,
3508                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3509                                           NULL,
3510                                           "meter action not supported");
3511         fm = mlx5_flow_meter_find(priv, am->mtr_id);
3512         if (!fm)
3513                 return rte_flow_error_set(error, EINVAL,
3514                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3515                                           "Meter not found");
3516         if (fm->ref_cnt && (!(fm->attr.transfer == attr->transfer ||
3517               (!fm->attr.ingress && !attr->ingress && attr->egress) ||
3518               (!fm->attr.egress && !attr->egress && attr->ingress))))
3519                 return rte_flow_error_set(error, EINVAL,
3520                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3521                                           "Flow attributes are either invalid "
3522                                           "or have a conflict with current "
3523                                           "meter attributes");
3524         return 0;
3525 }
3526
3527 /**
3528  * Validate the modify-header IPv4 DSCP actions.
3529  *
3530  * @param[in] action_flags
3531  *   Holds the actions detected until now.
3532  * @param[in] action
3533  *   Pointer to the modify action.
3534  * @param[in] item_flags
3535  *   Holds the items detected.
3536  * @param[out] error
3537  *   Pointer to error structure.
3538  *
3539  * @return
3540  *   0 on success, a negative errno value otherwise and rte_errno is set.
3541  */
3542 static int
3543 flow_dv_validate_action_modify_ipv4_dscp(const uint64_t action_flags,
3544                                          const struct rte_flow_action *action,
3545                                          const uint64_t item_flags,
3546                                          struct rte_flow_error *error)
3547 {
3548         int ret = 0;
3549
3550         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3551         if (!ret) {
3552                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
3553                         return rte_flow_error_set(error, EINVAL,
3554                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3555                                                   NULL,
3556                                                   "no ipv4 item in pattern");
3557         }
3558         return ret;
3559 }
3560
3561 /**
3562  * Validate the modify-header IPv6 DSCP actions.
3563  *
3564  * @param[in] action_flags
3565  *   Holds the actions detected until now.
3566  * @param[in] action
3567  *   Pointer to the modify action.
3568  * @param[in] item_flags
3569  *   Holds the items detected.
3570  * @param[out] error
3571  *   Pointer to error structure.
3572  *
3573  * @return
3574  *   0 on success, a negative errno value otherwise and rte_errno is set.
3575  */
3576 static int
3577 flow_dv_validate_action_modify_ipv6_dscp(const uint64_t action_flags,
3578                                          const struct rte_flow_action *action,
3579                                          const uint64_t item_flags,
3580                                          struct rte_flow_error *error)
3581 {
3582         int ret = 0;
3583
3584         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
3585         if (!ret) {
3586                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
3587                         return rte_flow_error_set(error, EINVAL,
3588                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3589                                                   NULL,
3590                                                   "no ipv6 item in pattern");
3591         }
3592         return ret;
3593 }
3594
3595 /**
3596  * Find existing modify-header resource or create and register a new one.
3597  *
3598  * @param dev[in, out]
3599  *   Pointer to rte_eth_dev structure.
3600  * @param[in, out] resource
3601  *   Pointer to modify-header resource.
3602  * @parm[in, out] dev_flow
3603  *   Pointer to the dev_flow.
3604  * @param[out] error
3605  *   pointer to error structure.
3606  *
3607  * @return
3608  *   0 on success otherwise -errno and errno is set.
3609  */
3610 static int
3611 flow_dv_modify_hdr_resource_register
3612                         (struct rte_eth_dev *dev,
3613                          struct mlx5_flow_dv_modify_hdr_resource *resource,
3614                          struct mlx5_flow *dev_flow,
3615                          struct rte_flow_error *error)
3616 {
3617         struct mlx5_priv *priv = dev->data->dev_private;
3618         struct mlx5_ibv_shared *sh = priv->sh;
3619         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
3620         struct mlx5dv_dr_domain *ns;
3621
3622         if (resource->actions_num > flow_dv_modify_hdr_action_max(dev))
3623                 return rte_flow_error_set(error, EOVERFLOW,
3624                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
3625                                           "too many modify header items");
3626         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
3627                 ns = sh->fdb_domain;
3628         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
3629                 ns = sh->tx_domain;
3630         else
3631                 ns = sh->rx_domain;
3632         resource->flags =
3633                 dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
3634         /* Lookup a matching resource from cache. */
3635         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
3636                 if (resource->ft_type == cache_resource->ft_type &&
3637                     resource->actions_num == cache_resource->actions_num &&
3638                     resource->flags == cache_resource->flags &&
3639                     !memcmp((const void *)resource->actions,
3640                             (const void *)cache_resource->actions,
3641                             (resource->actions_num *
3642                                             sizeof(resource->actions[0])))) {
3643                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
3644                                 (void *)cache_resource,
3645                                 rte_atomic32_read(&cache_resource->refcnt));
3646                         rte_atomic32_inc(&cache_resource->refcnt);
3647                         dev_flow->dv.modify_hdr = cache_resource;
3648                         return 0;
3649                 }
3650         }
3651         /* Register new modify-header resource. */
3652         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3653         if (!cache_resource)
3654                 return rte_flow_error_set(error, ENOMEM,
3655                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3656                                           "cannot allocate resource memory");
3657         *cache_resource = *resource;
3658         cache_resource->verbs_action =
3659                 mlx5_glue->dv_create_flow_action_modify_header
3660                                         (sh->ctx, cache_resource->ft_type,
3661                                          ns, cache_resource->flags,
3662                                          cache_resource->actions_num *
3663                                          sizeof(cache_resource->actions[0]),
3664                                          (uint64_t *)cache_resource->actions);
3665         if (!cache_resource->verbs_action) {
3666                 rte_free(cache_resource);
3667                 return rte_flow_error_set(error, ENOMEM,
3668                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3669                                           NULL, "cannot create action");
3670         }
3671         rte_atomic32_init(&cache_resource->refcnt);
3672         rte_atomic32_inc(&cache_resource->refcnt);
3673         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
3674         dev_flow->dv.modify_hdr = cache_resource;
3675         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
3676                 (void *)cache_resource,
3677                 rte_atomic32_read(&cache_resource->refcnt));
3678         return 0;
3679 }
3680
3681 #define MLX5_CNT_CONTAINER_RESIZE 64
3682
3683 /**
3684  * Get or create a flow counter.
3685  *
3686  * @param[in] dev
3687  *   Pointer to the Ethernet device structure.
3688  * @param[in] shared
3689  *   Indicate if this counter is shared with other flows.
3690  * @param[in] id
3691  *   Counter identifier.
3692  *
3693  * @return
3694  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
3695  */
3696 static struct mlx5_flow_counter *
3697 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
3698                                uint32_t id)
3699 {
3700         struct mlx5_priv *priv = dev->data->dev_private;
3701         struct mlx5_flow_counter *cnt = NULL;
3702         struct mlx5_devx_obj *dcs = NULL;
3703
3704         if (!priv->config.devx) {
3705                 rte_errno = ENOTSUP;
3706                 return NULL;
3707         }
3708         if (shared) {
3709                 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
3710                         if (cnt->shared && cnt->id == id) {
3711                                 cnt->ref_cnt++;
3712                                 return cnt;
3713                         }
3714                 }
3715         }
3716         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
3717         if (!dcs)
3718                 return NULL;
3719         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
3720         if (!cnt) {
3721                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
3722                 rte_errno = ENOMEM;
3723                 return NULL;
3724         }
3725         struct mlx5_flow_counter tmpl = {
3726                 .shared = shared,
3727                 .ref_cnt = 1,
3728                 .id = id,
3729                 .dcs = dcs,
3730         };
3731         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
3732         if (!tmpl.action) {
3733                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
3734                 rte_errno = errno;
3735                 rte_free(cnt);
3736                 return NULL;
3737         }
3738         *cnt = tmpl;
3739         TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
3740         return cnt;
3741 }
3742
3743 /**
3744  * Release a flow counter.
3745  *
3746  * @param[in] dev
3747  *   Pointer to the Ethernet device structure.
3748  * @param[in] counter
3749  *   Pointer to the counter handler.
3750  */
3751 static void
3752 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
3753                                  struct mlx5_flow_counter *counter)
3754 {
3755         struct mlx5_priv *priv = dev->data->dev_private;
3756
3757         if (!counter)
3758                 return;
3759         if (--counter->ref_cnt == 0) {
3760                 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
3761                 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
3762                 rte_free(counter);
3763         }
3764 }
3765
3766 /**
3767  * Query a devx flow counter.
3768  *
3769  * @param[in] dev
3770  *   Pointer to the Ethernet device structure.
3771  * @param[in] cnt
3772  *   Pointer to the flow counter.
3773  * @param[out] pkts
3774  *   The statistics value of packets.
3775  * @param[out] bytes
3776  *   The statistics value of bytes.
3777  *
3778  * @return
3779  *   0 on success, otherwise a negative errno value and rte_errno is set.
3780  */
3781 static inline int
3782 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
3783                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
3784                      uint64_t *bytes)
3785 {
3786         return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
3787                                                 0, NULL, NULL, 0);
3788 }
3789
3790 /**
3791  * Get a pool by a counter.
3792  *
3793  * @param[in] cnt
3794  *   Pointer to the counter.
3795  *
3796  * @return
3797  *   The counter pool.
3798  */
3799 static struct mlx5_flow_counter_pool *
3800 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
3801 {
3802         if (!cnt->batch) {
3803                 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
3804                 return (struct mlx5_flow_counter_pool *)cnt - 1;
3805         }
3806         return cnt->pool;
3807 }
3808
3809 /**
3810  * Get a pool by devx counter ID.
3811  *
3812  * @param[in] cont
3813  *   Pointer to the counter container.
3814  * @param[in] id
3815  *   The counter devx ID.
3816  *
3817  * @return
3818  *   The counter pool pointer if exists, NULL otherwise,
3819  */
3820 static struct mlx5_flow_counter_pool *
3821 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
3822 {
3823         struct mlx5_flow_counter_pool *pool;
3824
3825         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3826                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
3827                                 MLX5_COUNTERS_PER_POOL;
3828
3829                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
3830                         return pool;
3831         };
3832         return NULL;
3833 }
3834
3835 /**
3836  * Allocate a new memory for the counter values wrapped by all the needed
3837  * management.
3838  *
3839  * @param[in] dev
3840  *   Pointer to the Ethernet device structure.
3841  * @param[in] raws_n
3842  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
3843  *
3844  * @return
3845  *   The new memory management pointer on success, otherwise NULL and rte_errno
3846  *   is set.
3847  */
3848 static struct mlx5_counter_stats_mem_mng *
3849 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
3850 {
3851         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
3852                                         (dev->data->dev_private))->sh;
3853         struct mlx5_devx_mkey_attr mkey_attr;
3854         struct mlx5_counter_stats_mem_mng *mem_mng;
3855         volatile struct flow_counter_stats *raw_data;
3856         int size = (sizeof(struct flow_counter_stats) *
3857                         MLX5_COUNTERS_PER_POOL +
3858                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
3859                         sizeof(struct mlx5_counter_stats_mem_mng);
3860         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
3861         int i;
3862
3863         if (!mem) {
3864                 rte_errno = ENOMEM;
3865                 return NULL;
3866         }
3867         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
3868         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
3869         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
3870                                                  IBV_ACCESS_LOCAL_WRITE);
3871         if (!mem_mng->umem) {
3872                 rte_errno = errno;
3873                 rte_free(mem);
3874                 return NULL;
3875         }
3876         mkey_attr.addr = (uintptr_t)mem;
3877         mkey_attr.size = size;
3878         mkey_attr.umem_id = mem_mng->umem->umem_id;
3879         mkey_attr.pd = sh->pdn;
3880         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
3881         if (!mem_mng->dm) {
3882                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
3883                 rte_errno = errno;
3884                 rte_free(mem);
3885                 return NULL;
3886         }
3887         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
3888         raw_data = (volatile struct flow_counter_stats *)mem;
3889         for (i = 0; i < raws_n; ++i) {
3890                 mem_mng->raws[i].mem_mng = mem_mng;
3891                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
3892         }
3893         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
3894         return mem_mng;
3895 }
3896
3897 /**
3898  * Resize a counter container.
3899  *
3900  * @param[in] dev
3901  *   Pointer to the Ethernet device structure.
3902  * @param[in] batch
3903  *   Whether the pool is for counter that was allocated by batch command.
3904  *
3905  * @return
3906  *   The new container pointer on success, otherwise NULL and rte_errno is set.
3907  */
3908 static struct mlx5_pools_container *
3909 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
3910 {
3911         struct mlx5_priv *priv = dev->data->dev_private;
3912         struct mlx5_pools_container *cont =
3913                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
3914         struct mlx5_pools_container *new_cont =
3915                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
3916         struct mlx5_counter_stats_mem_mng *mem_mng;
3917         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
3918         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
3919         int i;
3920
3921         if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
3922                 /* The last resize still hasn't detected by the host thread. */
3923                 rte_errno = EAGAIN;
3924                 return NULL;
3925         }
3926         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
3927         if (!new_cont->pools) {
3928                 rte_errno = ENOMEM;
3929                 return NULL;
3930         }
3931         if (cont->n)
3932                 memcpy(new_cont->pools, cont->pools, cont->n *
3933                        sizeof(struct mlx5_flow_counter_pool *));
3934         mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
3935                 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
3936         if (!mem_mng) {
3937                 rte_free(new_cont->pools);
3938                 return NULL;
3939         }
3940         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
3941                 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
3942                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
3943                                  i, next);
3944         new_cont->n = resize;
3945         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
3946         TAILQ_INIT(&new_cont->pool_list);
3947         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
3948         new_cont->init_mem_mng = mem_mng;
3949         rte_cio_wmb();
3950          /* Flip the master container. */
3951         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
3952         return new_cont;
3953 }
3954
3955 /**
3956  * Query a devx flow counter.
3957  *
3958  * @param[in] dev
3959  *   Pointer to the Ethernet device structure.
3960  * @param[in] cnt
3961  *   Pointer to the flow counter.
3962  * @param[out] pkts
3963  *   The statistics value of packets.
3964  * @param[out] bytes
3965  *   The statistics value of bytes.
3966  *
3967  * @return
3968  *   0 on success, otherwise a negative errno value and rte_errno is set.
3969  */
3970 static inline int
3971 _flow_dv_query_count(struct rte_eth_dev *dev,
3972                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
3973                      uint64_t *bytes)
3974 {
3975         struct mlx5_priv *priv = dev->data->dev_private;
3976         struct mlx5_flow_counter_pool *pool =
3977                         flow_dv_counter_pool_get(cnt);
3978         int offset = cnt - &pool->counters_raw[0];
3979
3980         if (priv->counter_fallback)
3981                 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
3982
3983         rte_spinlock_lock(&pool->sl);
3984         /*
3985          * The single counters allocation may allocate smaller ID than the
3986          * current allocated in parallel to the host reading.
3987          * In this case the new counter values must be reported as 0.
3988          */
3989         if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
3990                 *pkts = 0;
3991                 *bytes = 0;
3992         } else {
3993                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
3994                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
3995         }
3996         rte_spinlock_unlock(&pool->sl);
3997         return 0;
3998 }
3999
4000 /**
4001  * Create and initialize a new counter pool.
4002  *
4003  * @param[in] dev
4004  *   Pointer to the Ethernet device structure.
4005  * @param[out] dcs
4006  *   The devX counter handle.
4007  * @param[in] batch
4008  *   Whether the pool is for counter that was allocated by batch command.
4009  *
4010  * @return
4011  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
4012  */
4013 static struct mlx5_flow_counter_pool *
4014 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
4015                     uint32_t batch)
4016 {
4017         struct mlx5_priv *priv = dev->data->dev_private;
4018         struct mlx5_flow_counter_pool *pool;
4019         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4020                                                                0);
4021         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
4022         uint32_t size;
4023
4024         if (cont->n == n_valid) {
4025                 cont = flow_dv_container_resize(dev, batch);
4026                 if (!cont)
4027                         return NULL;
4028         }
4029         size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
4030                         sizeof(struct mlx5_flow_counter);
4031         pool = rte_calloc(__func__, 1, size, 0);
4032         if (!pool) {
4033                 rte_errno = ENOMEM;
4034                 return NULL;
4035         }
4036         pool->min_dcs = dcs;
4037         pool->raw = cont->init_mem_mng->raws + n_valid %
4038                                                      MLX5_CNT_CONTAINER_RESIZE;
4039         pool->raw_hw = NULL;
4040         rte_spinlock_init(&pool->sl);
4041         /*
4042          * The generation of the new allocated counters in this pool is 0, 2 in
4043          * the pool generation makes all the counters valid for allocation.
4044          */
4045         rte_atomic64_set(&pool->query_gen, 0x2);
4046         TAILQ_INIT(&pool->counters);
4047         TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
4048         cont->pools[n_valid] = pool;
4049         /* Pool initialization must be updated before host thread access. */
4050         rte_cio_wmb();
4051         rte_atomic16_add(&cont->n_valid, 1);
4052         return pool;
4053 }
4054
4055 /**
4056  * Prepare a new counter and/or a new counter pool.
4057  *
4058  * @param[in] dev
4059  *   Pointer to the Ethernet device structure.
4060  * @param[out] cnt_free
4061  *   Where to put the pointer of a new counter.
4062  * @param[in] batch
4063  *   Whether the pool is for counter that was allocated by batch command.
4064  *
4065  * @return
4066  *   The free counter pool pointer and @p cnt_free is set on success,
4067  *   NULL otherwise and rte_errno is set.
4068  */
4069 static struct mlx5_flow_counter_pool *
4070 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
4071                              struct mlx5_flow_counter **cnt_free,
4072                              uint32_t batch)
4073 {
4074         struct mlx5_priv *priv = dev->data->dev_private;
4075         struct mlx5_flow_counter_pool *pool;
4076         struct mlx5_devx_obj *dcs = NULL;
4077         struct mlx5_flow_counter *cnt;
4078         uint32_t i;
4079
4080         if (!batch) {
4081                 /* bulk_bitmap must be 0 for single counter allocation. */
4082                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
4083                 if (!dcs)
4084                         return NULL;
4085                 pool = flow_dv_find_pool_by_id
4086                         (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
4087                 if (!pool) {
4088                         pool = flow_dv_pool_create(dev, dcs, batch);
4089                         if (!pool) {
4090                                 mlx5_devx_cmd_destroy(dcs);
4091                                 return NULL;
4092                         }
4093                 } else if (dcs->id < pool->min_dcs->id) {
4094                         rte_atomic64_set(&pool->a64_dcs,
4095                                          (int64_t)(uintptr_t)dcs);
4096                 }
4097                 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
4098                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
4099                 cnt->dcs = dcs;
4100                 *cnt_free = cnt;
4101                 return pool;
4102         }
4103         /* bulk_bitmap is in 128 counters units. */
4104         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
4105                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
4106         if (!dcs) {
4107                 rte_errno = ENODATA;
4108                 return NULL;
4109         }
4110         pool = flow_dv_pool_create(dev, dcs, batch);
4111         if (!pool) {
4112                 mlx5_devx_cmd_destroy(dcs);
4113                 return NULL;
4114         }
4115         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
4116                 cnt = &pool->counters_raw[i];
4117                 cnt->pool = pool;
4118                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
4119         }
4120         *cnt_free = &pool->counters_raw[0];
4121         return pool;
4122 }
4123
4124 /**
4125  * Search for existed shared counter.
4126  *
4127  * @param[in] cont
4128  *   Pointer to the relevant counter pool container.
4129  * @param[in] id
4130  *   The shared counter ID to search.
4131  *
4132  * @return
4133  *   NULL if not existed, otherwise pointer to the shared counter.
4134  */
4135 static struct mlx5_flow_counter *
4136 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
4137                               uint32_t id)
4138 {
4139         static struct mlx5_flow_counter *cnt;
4140         struct mlx5_flow_counter_pool *pool;
4141         int i;
4142
4143         TAILQ_FOREACH(pool, &cont->pool_list, next) {
4144                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
4145                         cnt = &pool->counters_raw[i];
4146                         if (cnt->ref_cnt && cnt->shared && cnt->id == id)
4147                                 return cnt;
4148                 }
4149         }
4150         return NULL;
4151 }
4152
4153 /**
4154  * Allocate a flow counter.
4155  *
4156  * @param[in] dev
4157  *   Pointer to the Ethernet device structure.
4158  * @param[in] shared
4159  *   Indicate if this counter is shared with other flows.
4160  * @param[in] id
4161  *   Counter identifier.
4162  * @param[in] group
4163  *   Counter flow group.
4164  *
4165  * @return
4166  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
4167  */
4168 static struct mlx5_flow_counter *
4169 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
4170                       uint16_t group)
4171 {
4172         struct mlx5_priv *priv = dev->data->dev_private;
4173         struct mlx5_flow_counter_pool *pool = NULL;
4174         struct mlx5_flow_counter *cnt_free = NULL;
4175         /*
4176          * Currently group 0 flow counter cannot be assigned to a flow if it is
4177          * not the first one in the batch counter allocation, so it is better
4178          * to allocate counters one by one for these flows in a separate
4179          * container.
4180          * A counter can be shared between different groups so need to take
4181          * shared counters from the single container.
4182          */
4183         uint32_t batch = (group && !shared) ? 1 : 0;
4184         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
4185                                                                0);
4186
4187         if (priv->counter_fallback)
4188                 return flow_dv_counter_alloc_fallback(dev, shared, id);
4189         if (!priv->config.devx) {
4190                 rte_errno = ENOTSUP;
4191                 return NULL;
4192         }
4193         if (shared) {
4194                 cnt_free = flow_dv_counter_shared_search(cont, id);
4195                 if (cnt_free) {
4196                         if (cnt_free->ref_cnt + 1 == 0) {
4197                                 rte_errno = E2BIG;
4198                                 return NULL;
4199                         }
4200                         cnt_free->ref_cnt++;
4201                         return cnt_free;
4202                 }
4203         }
4204         /* Pools which has a free counters are in the start. */
4205         TAILQ_FOREACH(pool, &cont->pool_list, next) {
4206                 /*
4207                  * The free counter reset values must be updated between the
4208                  * counter release to the counter allocation, so, at least one
4209                  * query must be done in this time. ensure it by saving the
4210                  * query generation in the release time.
4211                  * The free list is sorted according to the generation - so if
4212                  * the first one is not updated, all the others are not
4213                  * updated too.
4214                  */
4215                 cnt_free = TAILQ_FIRST(&pool->counters);
4216                 if (cnt_free && cnt_free->query_gen + 1 <
4217                     rte_atomic64_read(&pool->query_gen))
4218                         break;
4219                 cnt_free = NULL;
4220         }
4221         if (!cnt_free) {
4222                 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
4223                 if (!pool)
4224                         return NULL;
4225         }
4226         cnt_free->batch = batch;
4227         /* Create a DV counter action only in the first time usage. */
4228         if (!cnt_free->action) {
4229                 uint16_t offset;
4230                 struct mlx5_devx_obj *dcs;
4231
4232                 if (batch) {
4233                         offset = cnt_free - &pool->counters_raw[0];
4234                         dcs = pool->min_dcs;
4235                 } else {
4236                         offset = 0;
4237                         dcs = cnt_free->dcs;
4238                 }
4239                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
4240                                         (dcs->obj, offset);
4241                 if (!cnt_free->action) {
4242                         rte_errno = errno;
4243                         return NULL;
4244                 }
4245         }
4246         /* Update the counter reset values. */
4247         if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
4248                                  &cnt_free->bytes))
4249                 return NULL;
4250         cnt_free->shared = shared;
4251         cnt_free->ref_cnt = 1;
4252         cnt_free->id = id;
4253         if (!priv->sh->cmng.query_thread_on)
4254                 /* Start the asynchronous batch query by the host thread. */
4255                 mlx5_set_query_alarm(priv->sh);
4256         TAILQ_REMOVE(&pool->counters, cnt_free, next);
4257         if (TAILQ_EMPTY(&pool->counters)) {
4258                 /* Move the pool to the end of the container pool list. */
4259                 TAILQ_REMOVE(&cont->pool_list, pool, next);
4260                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
4261         }
4262         return cnt_free;
4263 }
4264
4265 /**
4266  * Release a flow counter.
4267  *
4268  * @param[in] dev
4269  *   Pointer to the Ethernet device structure.
4270  * @param[in] counter
4271  *   Pointer to the counter handler.
4272  */
4273 static void
4274 flow_dv_counter_release(struct rte_eth_dev *dev,
4275                         struct mlx5_flow_counter *counter)
4276 {
4277         struct mlx5_priv *priv = dev->data->dev_private;
4278
4279         if (!counter)
4280                 return;
4281         if (priv->counter_fallback) {
4282                 flow_dv_counter_release_fallback(dev, counter);
4283                 return;
4284         }
4285         if (--counter->ref_cnt == 0) {
4286                 struct mlx5_flow_counter_pool *pool =
4287                                 flow_dv_counter_pool_get(counter);
4288
4289                 /* Put the counter in the end - the last updated one. */
4290                 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
4291                 counter->query_gen = rte_atomic64_read(&pool->query_gen);
4292         }
4293 }
4294
4295 /**
4296  * Verify the @p attributes will be correctly understood by the NIC and store
4297  * them in the @p flow if everything is correct.
4298  *
4299  * @param[in] dev
4300  *   Pointer to dev struct.
4301  * @param[in] attributes
4302  *   Pointer to flow attributes
4303  * @param[in] external
4304  *   This flow rule is created by request external to PMD.
4305  * @param[out] error
4306  *   Pointer to error structure.
4307  *
4308  * @return
4309  *   0 on success, a negative errno value otherwise and rte_errno is set.
4310  */
4311 static int
4312 flow_dv_validate_attributes(struct rte_eth_dev *dev,
4313                             const struct rte_flow_attr *attributes,
4314                             bool external __rte_unused,
4315                             struct rte_flow_error *error)
4316 {
4317         struct mlx5_priv *priv = dev->data->dev_private;
4318         uint32_t priority_max = priv->config.flow_prio - 1;
4319
4320 #ifndef HAVE_MLX5DV_DR
4321         if (attributes->group)
4322                 return rte_flow_error_set(error, ENOTSUP,
4323                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
4324                                           NULL,
4325                                           "groups are not supported");
4326 #else
4327         uint32_t table;
4328         int ret;
4329
4330         ret = mlx5_flow_group_to_table(attributes, external,
4331                                        attributes->group,
4332                                        &table, error);
4333         if (ret)
4334                 return ret;
4335 #endif
4336         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
4337             attributes->priority >= priority_max)
4338                 return rte_flow_error_set(error, ENOTSUP,
4339                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
4340                                           NULL,
4341                                           "priority out of range");
4342         if (attributes->transfer) {
4343                 if (!priv->config.dv_esw_en)
4344                         return rte_flow_error_set
4345                                 (error, ENOTSUP,
4346                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4347                                  "E-Switch dr is not supported");
4348                 if (!(priv->representor || priv->master))
4349                         return rte_flow_error_set
4350                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4351                                  NULL, "E-Switch configuration can only be"
4352                                  " done by a master or a representor device");
4353                 if (attributes->egress)
4354                         return rte_flow_error_set
4355                                 (error, ENOTSUP,
4356                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
4357                                  "egress is not supported");
4358         }
4359         if (!(attributes->egress ^ attributes->ingress))
4360                 return rte_flow_error_set(error, ENOTSUP,
4361                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
4362                                           "must specify exactly one of "
4363                                           "ingress or egress");
4364         return 0;
4365 }
4366
4367 /**
4368  * Internal validation function. For validating both actions and items.
4369  *
4370  * @param[in] dev
4371  *   Pointer to the rte_eth_dev structure.
4372  * @param[in] attr
4373  *   Pointer to the flow attributes.
4374  * @param[in] items
4375  *   Pointer to the list of items.
4376  * @param[in] actions
4377  *   Pointer to the list of actions.
4378  * @param[in] external
4379  *   This flow rule is created by request external to PMD.
4380  * @param[out] error
4381  *   Pointer to the error structure.
4382  *
4383  * @return
4384  *   0 on success, a negative errno value otherwise and rte_errno is set.
4385  */
4386 static int
4387 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
4388                  const struct rte_flow_item items[],
4389                  const struct rte_flow_action actions[],
4390                  bool external, struct rte_flow_error *error)
4391 {
4392         int ret;
4393         uint64_t action_flags = 0;
4394         uint64_t item_flags = 0;
4395         uint64_t last_item = 0;
4396         uint8_t next_protocol = 0xff;
4397         uint16_t ether_type = 0;
4398         int actions_n = 0;
4399         const struct rte_flow_item *gre_item = NULL;
4400         struct rte_flow_item_tcp nic_tcp_mask = {
4401                 .hdr = {
4402                         .tcp_flags = 0xFF,
4403                         .src_port = RTE_BE16(UINT16_MAX),
4404                         .dst_port = RTE_BE16(UINT16_MAX),
4405                 }
4406         };
4407         struct mlx5_priv *priv = dev->data->dev_private;
4408         struct mlx5_dev_config *dev_conf = &priv->config;
4409
4410         if (items == NULL)
4411                 return -1;
4412         ret = flow_dv_validate_attributes(dev, attr, external, error);
4413         if (ret < 0)
4414                 return ret;
4415         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4416                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4417                 int type = items->type;
4418
4419                 switch (type) {
4420                 case RTE_FLOW_ITEM_TYPE_VOID:
4421                         break;
4422                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4423                         ret = flow_dv_validate_item_port_id
4424                                         (dev, items, attr, item_flags, error);
4425                         if (ret < 0)
4426                                 return ret;
4427                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4428                         break;
4429                 case RTE_FLOW_ITEM_TYPE_ETH:
4430                         ret = mlx5_flow_validate_item_eth(items, item_flags,
4431                                                           error);
4432                         if (ret < 0)
4433                                 return ret;
4434                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4435                                              MLX5_FLOW_LAYER_OUTER_L2;
4436                         if (items->mask != NULL && items->spec != NULL) {
4437                                 ether_type =
4438                                         ((const struct rte_flow_item_eth *)
4439                                          items->spec)->type;
4440                                 ether_type &=
4441                                         ((const struct rte_flow_item_eth *)
4442                                          items->mask)->type;
4443                                 ether_type = rte_be_to_cpu_16(ether_type);
4444                         } else {
4445                                 ether_type = 0;
4446                         }
4447                         break;
4448                 case RTE_FLOW_ITEM_TYPE_VLAN:
4449                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
4450                                                            dev, error);
4451                         if (ret < 0)
4452                                 return ret;
4453                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
4454                                              MLX5_FLOW_LAYER_OUTER_VLAN;
4455                         if (items->mask != NULL && items->spec != NULL) {
4456                                 ether_type =
4457                                         ((const struct rte_flow_item_vlan *)
4458                                          items->spec)->inner_type;
4459                                 ether_type &=
4460                                         ((const struct rte_flow_item_vlan *)
4461                                          items->mask)->inner_type;
4462                                 ether_type = rte_be_to_cpu_16(ether_type);
4463                         } else {
4464                                 ether_type = 0;
4465                         }
4466                         break;
4467                 case RTE_FLOW_ITEM_TYPE_IPV4:
4468                         mlx5_flow_tunnel_ip_check(items, next_protocol,
4469                                                   &item_flags, &tunnel);
4470                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
4471                                                            last_item,
4472                                                            ether_type, NULL,
4473                                                            error);
4474                         if (ret < 0)
4475                                 return ret;
4476                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4477                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4478                         if (items->mask != NULL &&
4479                             ((const struct rte_flow_item_ipv4 *)
4480                              items->mask)->hdr.next_proto_id) {
4481                                 next_protocol =
4482                                         ((const struct rte_flow_item_ipv4 *)
4483                                          (items->spec))->hdr.next_proto_id;
4484                                 next_protocol &=
4485                                         ((const struct rte_flow_item_ipv4 *)
4486                                          (items->mask))->hdr.next_proto_id;
4487                         } else {
4488                                 /* Reset for inner layer. */
4489                                 next_protocol = 0xff;
4490                         }
4491                         break;
4492                 case RTE_FLOW_ITEM_TYPE_IPV6:
4493                         mlx5_flow_tunnel_ip_check(items, next_protocol,
4494                                                   &item_flags, &tunnel);
4495                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
4496                                                            last_item,
4497                                                            ether_type, NULL,
4498                                                            error);
4499                         if (ret < 0)
4500                                 return ret;
4501                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4502                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4503                         if (items->mask != NULL &&
4504                             ((const struct rte_flow_item_ipv6 *)
4505                              items->mask)->hdr.proto) {
4506                                 next_protocol =
4507                                         ((const struct rte_flow_item_ipv6 *)
4508                                          items->spec)->hdr.proto;
4509                                 next_protocol &=
4510                                         ((const struct rte_flow_item_ipv6 *)
4511                                          items->mask)->hdr.proto;
4512                         } else {
4513                                 /* Reset for inner layer. */
4514                                 next_protocol = 0xff;
4515                         }
4516                         break;
4517                 case RTE_FLOW_ITEM_TYPE_TCP:
4518                         ret = mlx5_flow_validate_item_tcp
4519                                                 (items, item_flags,
4520                                                  next_protocol,
4521                                                  &nic_tcp_mask,
4522                                                  error);
4523                         if (ret < 0)
4524                                 return ret;
4525                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
4526                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
4527                         break;
4528                 case RTE_FLOW_ITEM_TYPE_UDP:
4529                         ret = mlx5_flow_validate_item_udp(items, item_flags,
4530                                                           next_protocol,
4531                                                           error);
4532                         if (ret < 0)
4533                                 return ret;
4534                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
4535                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
4536                         break;
4537                 case RTE_FLOW_ITEM_TYPE_GRE:
4538                         ret = mlx5_flow_validate_item_gre(items, item_flags,
4539                                                           next_protocol, error);
4540                         if (ret < 0)
4541                                 return ret;
4542                         gre_item = items;
4543                         last_item = MLX5_FLOW_LAYER_GRE;
4544                         break;
4545                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4546                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
4547                                                             next_protocol,
4548                                                             error);
4549                         if (ret < 0)
4550                                 return ret;
4551                         last_item = MLX5_FLOW_LAYER_NVGRE;
4552                         break;
4553                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
4554                         ret = mlx5_flow_validate_item_gre_key
4555                                 (items, item_flags, gre_item, error);
4556                         if (ret < 0)
4557                                 return ret;
4558                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
4559                         break;
4560                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4561                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
4562                                                             error);
4563                         if (ret < 0)
4564                                 return ret;
4565                         last_item = MLX5_FLOW_LAYER_VXLAN;
4566                         break;
4567                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4568                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
4569                                                                 item_flags, dev,
4570                                                                 error);
4571                         if (ret < 0)
4572                                 return ret;
4573                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4574                         break;
4575                 case RTE_FLOW_ITEM_TYPE_GENEVE:
4576                         ret = mlx5_flow_validate_item_geneve(items,
4577                                                              item_flags, dev,
4578                                                              error);
4579                         if (ret < 0)
4580                                 return ret;
4581                         last_item = MLX5_FLOW_LAYER_GENEVE;
4582                         break;
4583                 case RTE_FLOW_ITEM_TYPE_MPLS:
4584                         ret = mlx5_flow_validate_item_mpls(dev, items,
4585                                                            item_flags,
4586                                                            last_item, error);
4587                         if (ret < 0)
4588                                 return ret;
4589                         last_item = MLX5_FLOW_LAYER_MPLS;
4590                         break;
4591
4592                 case RTE_FLOW_ITEM_TYPE_MARK:
4593                         ret = flow_dv_validate_item_mark(dev, items, attr,
4594                                                          error);
4595                         if (ret < 0)
4596                                 return ret;
4597                         last_item = MLX5_FLOW_ITEM_MARK;
4598                         break;
4599                 case RTE_FLOW_ITEM_TYPE_META:
4600                         ret = flow_dv_validate_item_meta(dev, items, attr,
4601                                                          error);
4602                         if (ret < 0)
4603                                 return ret;
4604                         last_item = MLX5_FLOW_ITEM_METADATA;
4605                         break;
4606                 case RTE_FLOW_ITEM_TYPE_ICMP:
4607                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
4608                                                            next_protocol,
4609                                                            error);
4610                         if (ret < 0)
4611                                 return ret;
4612                         last_item = MLX5_FLOW_LAYER_ICMP;
4613                         break;
4614                 case RTE_FLOW_ITEM_TYPE_ICMP6:
4615                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
4616                                                             next_protocol,
4617                                                             error);
4618                         if (ret < 0)
4619                                 return ret;
4620                         last_item = MLX5_FLOW_LAYER_ICMP6;
4621                         break;
4622                 case RTE_FLOW_ITEM_TYPE_TAG:
4623                         ret = flow_dv_validate_item_tag(dev, items,
4624                                                         attr, error);
4625                         if (ret < 0)
4626                                 return ret;
4627                         last_item = MLX5_FLOW_ITEM_TAG;
4628                         break;
4629                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
4630                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
4631                         break;
4632                 default:
4633                         return rte_flow_error_set(error, ENOTSUP,
4634                                                   RTE_FLOW_ERROR_TYPE_ITEM,
4635                                                   NULL, "item not supported");
4636                 }
4637                 item_flags |= last_item;
4638         }
4639         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4640                 int type = actions->type;
4641                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
4642                         return rte_flow_error_set(error, ENOTSUP,
4643                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4644                                                   actions, "too many actions");
4645                 switch (type) {
4646                 case RTE_FLOW_ACTION_TYPE_VOID:
4647                         break;
4648                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4649                         ret = flow_dv_validate_action_port_id(dev,
4650                                                               action_flags,
4651                                                               actions,
4652                                                               attr,
4653                                                               error);
4654                         if (ret)
4655                                 return ret;
4656                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4657                         ++actions_n;
4658                         break;
4659                 case RTE_FLOW_ACTION_TYPE_FLAG:
4660                         ret = flow_dv_validate_action_flag(dev, action_flags,
4661                                                            attr, error);
4662                         if (ret < 0)
4663                                 return ret;
4664                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
4665                                 /* Count all modify-header actions as one. */
4666                                 if (!(action_flags &
4667                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
4668                                         ++actions_n;
4669                                 action_flags |= MLX5_FLOW_ACTION_FLAG |
4670                                                 MLX5_FLOW_ACTION_MARK_EXT;
4671                         } else {
4672                                 action_flags |= MLX5_FLOW_ACTION_FLAG;
4673                                 ++actions_n;
4674                         }
4675                         break;
4676                 case RTE_FLOW_ACTION_TYPE_MARK:
4677                         ret = flow_dv_validate_action_mark(dev, actions,
4678                                                            action_flags,
4679                                                            attr, error);
4680                         if (ret < 0)
4681                                 return ret;
4682                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
4683                                 /* Count all modify-header actions as one. */
4684                                 if (!(action_flags &
4685                                       MLX5_FLOW_MODIFY_HDR_ACTIONS))
4686                                         ++actions_n;
4687                                 action_flags |= MLX5_FLOW_ACTION_MARK |
4688                                                 MLX5_FLOW_ACTION_MARK_EXT;
4689                         } else {
4690                                 action_flags |= MLX5_FLOW_ACTION_MARK;
4691                                 ++actions_n;
4692                         }
4693                         break;
4694                 case RTE_FLOW_ACTION_TYPE_SET_META:
4695                         ret = flow_dv_validate_action_set_meta(dev, actions,
4696                                                                action_flags,
4697                                                                attr, error);
4698                         if (ret < 0)
4699                                 return ret;
4700                         /* Count all modify-header actions as one action. */
4701                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4702                                 ++actions_n;
4703                         action_flags |= MLX5_FLOW_ACTION_SET_META;
4704                         break;
4705                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
4706                         ret = flow_dv_validate_action_set_tag(dev, actions,
4707                                                               action_flags,
4708                                                               attr, error);
4709                         if (ret < 0)
4710                                 return ret;
4711                         /* Count all modify-header actions as one action. */
4712                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4713                                 ++actions_n;
4714                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
4715                         break;
4716                 case RTE_FLOW_ACTION_TYPE_DROP:
4717                         ret = mlx5_flow_validate_action_drop(action_flags,
4718                                                              attr, error);
4719                         if (ret < 0)
4720                                 return ret;
4721                         action_flags |= MLX5_FLOW_ACTION_DROP;
4722                         ++actions_n;
4723                         break;
4724                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4725                         ret = mlx5_flow_validate_action_queue(actions,
4726                                                               action_flags, dev,
4727                                                               attr, error);
4728                         if (ret < 0)
4729                                 return ret;
4730                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
4731                         ++actions_n;
4732                         break;
4733                 case RTE_FLOW_ACTION_TYPE_RSS:
4734                         ret = mlx5_flow_validate_action_rss(actions,
4735                                                             action_flags, dev,
4736                                                             attr, item_flags,
4737                                                             error);
4738                         if (ret < 0)
4739                                 return ret;
4740                         action_flags |= MLX5_FLOW_ACTION_RSS;
4741                         ++actions_n;
4742                         break;
4743                 case RTE_FLOW_ACTION_TYPE_COUNT:
4744                         ret = flow_dv_validate_action_count(dev, error);
4745                         if (ret < 0)
4746                                 return ret;
4747                         action_flags |= MLX5_FLOW_ACTION_COUNT;
4748                         ++actions_n;
4749                         break;
4750                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
4751                         if (flow_dv_validate_action_pop_vlan(dev,
4752                                                              action_flags,
4753                                                              actions,
4754                                                              item_flags, attr,
4755                                                              error))
4756                                 return -rte_errno;
4757                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
4758                         ++actions_n;
4759                         break;
4760                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4761                         ret = flow_dv_validate_action_push_vlan(action_flags,
4762                                                                 item_flags,
4763                                                                 actions, attr,
4764                                                                 error);
4765                         if (ret < 0)
4766                                 return ret;
4767                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
4768                         ++actions_n;
4769                         break;
4770                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4771                         ret = flow_dv_validate_action_set_vlan_pcp
4772                                                 (action_flags, actions, error);
4773                         if (ret < 0)
4774                                 return ret;
4775                         /* Count PCP with push_vlan command. */
4776                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
4777                         break;
4778                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4779                         ret = flow_dv_validate_action_set_vlan_vid
4780                                                 (item_flags, action_flags,
4781                                                  actions, error);
4782                         if (ret < 0)
4783                                 return ret;
4784                         /* Count VID with push_vlan command. */
4785                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
4786                         break;
4787                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4788                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4789                         ret = flow_dv_validate_action_l2_encap(action_flags,
4790                                                                actions, attr,
4791                                                                error);
4792                         if (ret < 0)
4793                                 return ret;
4794                         action_flags |= actions->type ==
4795                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
4796                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
4797                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
4798                         ++actions_n;
4799                         break;
4800                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4801                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4802                         ret = flow_dv_validate_action_l2_decap(action_flags,
4803                                                                attr, error);
4804                         if (ret < 0)
4805                                 return ret;
4806                         action_flags |= actions->type ==
4807                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
4808                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
4809                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
4810                         ++actions_n;
4811                         break;
4812                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4813                         ret = flow_dv_validate_action_raw_encap(action_flags,
4814                                                                 actions, attr,
4815                                                                 error);
4816                         if (ret < 0)
4817                                 return ret;
4818                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4819                         ++actions_n;
4820                         break;
4821                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4822                         ret = flow_dv_validate_action_raw_decap(action_flags,
4823                                                                 actions, attr,
4824                                                                 error);
4825                         if (ret < 0)
4826                                 return ret;
4827                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4828                         ++actions_n;
4829                         break;
4830                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4831                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4832                         ret = flow_dv_validate_action_modify_mac(action_flags,
4833                                                                  actions,
4834                                                                  item_flags,
4835                                                                  error);
4836                         if (ret < 0)
4837                                 return ret;
4838                         /* Count all modify-header actions as one action. */
4839                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4840                                 ++actions_n;
4841                         action_flags |= actions->type ==
4842                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4843                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
4844                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
4845                         break;
4846
4847                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4848                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4849                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
4850                                                                   actions,
4851                                                                   item_flags,
4852                                                                   error);
4853                         if (ret < 0)
4854                                 return ret;
4855                         /* Count all modify-header actions as one action. */
4856                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4857                                 ++actions_n;
4858                         action_flags |= actions->type ==
4859                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4860                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
4861                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
4862                         break;
4863                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4864                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4865                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
4866                                                                   actions,
4867                                                                   item_flags,
4868                                                                   error);
4869                         if (ret < 0)
4870                                 return ret;
4871                         /* Count all modify-header actions as one action. */
4872                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4873                                 ++actions_n;
4874                         action_flags |= actions->type ==
4875                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4876                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
4877                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
4878                         break;
4879                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4880                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4881                         ret = flow_dv_validate_action_modify_tp(action_flags,
4882                                                                 actions,
4883                                                                 item_flags,
4884                                                                 error);
4885                         if (ret < 0)
4886                                 return ret;
4887                         /* Count all modify-header actions as one action. */
4888                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4889                                 ++actions_n;
4890                         action_flags |= actions->type ==
4891                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4892                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
4893                                                 MLX5_FLOW_ACTION_SET_TP_DST;
4894                         break;
4895                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4896                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4897                         ret = flow_dv_validate_action_modify_ttl(action_flags,
4898                                                                  actions,
4899                                                                  item_flags,
4900                                                                  error);
4901                         if (ret < 0)
4902                                 return ret;
4903                         /* Count all modify-header actions as one action. */
4904                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4905                                 ++actions_n;
4906                         action_flags |= actions->type ==
4907                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
4908                                                 MLX5_FLOW_ACTION_SET_TTL :
4909                                                 MLX5_FLOW_ACTION_DEC_TTL;
4910                         break;
4911                 case RTE_FLOW_ACTION_TYPE_JUMP:
4912                         ret = flow_dv_validate_action_jump(actions,
4913                                                            action_flags,
4914                                                            attr, external,
4915                                                            error);
4916                         if (ret)
4917                                 return ret;
4918                         ++actions_n;
4919                         action_flags |= MLX5_FLOW_ACTION_JUMP;
4920                         break;
4921                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4922                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4923                         ret = flow_dv_validate_action_modify_tcp_seq
4924                                                                 (action_flags,
4925                                                                  actions,
4926                                                                  item_flags,
4927                                                                  error);
4928                         if (ret < 0)
4929                                 return ret;
4930                         /* Count all modify-header actions as one action. */
4931                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4932                                 ++actions_n;
4933                         action_flags |= actions->type ==
4934                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4935                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
4936                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4937                         break;
4938                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4939                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4940                         ret = flow_dv_validate_action_modify_tcp_ack
4941                                                                 (action_flags,
4942                                                                  actions,
4943                                                                  item_flags,
4944                                                                  error);
4945                         if (ret < 0)
4946                                 return ret;
4947                         /* Count all modify-header actions as one action. */
4948                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4949                                 ++actions_n;
4950                         action_flags |= actions->type ==
4951                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4952                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
4953                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
4954                         break;
4955                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
4956                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
4957                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
4958                         break;
4959                 case RTE_FLOW_ACTION_TYPE_METER:
4960                         ret = mlx5_flow_validate_action_meter(dev,
4961                                                               action_flags,
4962                                                               actions, attr,
4963                                                               error);
4964                         if (ret < 0)
4965                                 return ret;
4966                         action_flags |= MLX5_FLOW_ACTION_METER;
4967                         ++actions_n;
4968                         break;
4969                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
4970                         ret = flow_dv_validate_action_modify_ipv4_dscp
4971                                                          (action_flags,
4972                                                           actions,
4973                                                           item_flags,
4974                                                           error);
4975                         if (ret < 0)
4976                                 return ret;
4977                         /* Count all modify-header actions as one action. */
4978                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4979                                 ++actions_n;
4980                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
4981                         break;
4982                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
4983                         ret = flow_dv_validate_action_modify_ipv6_dscp
4984                                                                 (action_flags,
4985                                                                  actions,
4986                                                                  item_flags,
4987                                                                  error);
4988                         if (ret < 0)
4989                                 return ret;
4990                         /* Count all modify-header actions as one action. */
4991                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4992                                 ++actions_n;
4993                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
4994                         break;
4995                 default:
4996                         return rte_flow_error_set(error, ENOTSUP,
4997                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4998                                                   actions,
4999                                                   "action not supported");
5000                 }
5001         }
5002         if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
5003             (action_flags & MLX5_FLOW_VLAN_ACTIONS))
5004                 return rte_flow_error_set(error, ENOTSUP,
5005                                           RTE_FLOW_ERROR_TYPE_ACTION,
5006                                           actions,
5007                                           "can't have vxlan and vlan"
5008                                           " actions in the same rule");
5009         /* Eswitch has few restrictions on using items and actions */
5010         if (attr->transfer) {
5011                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5012                     action_flags & MLX5_FLOW_ACTION_FLAG)
5013                         return rte_flow_error_set(error, ENOTSUP,
5014                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5015                                                   NULL,
5016                                                   "unsupported action FLAG");
5017                 if (!mlx5_flow_ext_mreg_supported(dev) &&
5018                     action_flags & MLX5_FLOW_ACTION_MARK)
5019                         return rte_flow_error_set(error, ENOTSUP,
5020                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5021                                                   NULL,
5022                                                   "unsupported action MARK");
5023                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
5024                         return rte_flow_error_set(error, ENOTSUP,
5025                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5026                                                   NULL,
5027                                                   "unsupported action QUEUE");
5028                 if (action_flags & MLX5_FLOW_ACTION_RSS)
5029                         return rte_flow_error_set(error, ENOTSUP,
5030                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5031                                                   NULL,
5032                                                   "unsupported action RSS");
5033                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
5034                         return rte_flow_error_set(error, EINVAL,
5035                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5036                                                   actions,
5037                                                   "no fate action is found");
5038         } else {
5039                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
5040                         return rte_flow_error_set(error, EINVAL,
5041                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5042                                                   actions,
5043                                                   "no fate action is found");
5044         }
5045         return 0;
5046 }
5047
5048 /**
5049  * Internal preparation function. Allocates the DV flow size,
5050  * this size is constant.
5051  *
5052  * @param[in] attr
5053  *   Pointer to the flow attributes.
5054  * @param[in] items
5055  *   Pointer to the list of items.
5056  * @param[in] actions
5057  *   Pointer to the list of actions.
5058  * @param[out] error
5059  *   Pointer to the error structure.
5060  *
5061  * @return
5062  *   Pointer to mlx5_flow object on success,
5063  *   otherwise NULL and rte_errno is set.
5064  */
5065 static struct mlx5_flow *
5066 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
5067                 const struct rte_flow_item items[] __rte_unused,
5068                 const struct rte_flow_action actions[] __rte_unused,
5069                 struct rte_flow_error *error)
5070 {
5071         size_t size = sizeof(struct mlx5_flow);
5072         struct mlx5_flow *dev_flow;
5073
5074         dev_flow = rte_calloc(__func__, 1, size, 0);
5075         if (!dev_flow) {
5076                 rte_flow_error_set(error, ENOMEM,
5077                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5078                                    "not enough memory to create flow");
5079                 return NULL;
5080         }
5081         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
5082         dev_flow->ingress = attr->ingress;
5083         dev_flow->transfer = attr->transfer;
5084         return dev_flow;
5085 }
5086
5087 #ifndef NDEBUG
5088 /**
5089  * Sanity check for match mask and value. Similar to check_valid_spec() in
5090  * kernel driver. If unmasked bit is present in value, it returns failure.
5091  *
5092  * @param match_mask
5093  *   pointer to match mask buffer.
5094  * @param match_value
5095  *   pointer to match value buffer.
5096  *
5097  * @return
5098  *   0 if valid, -EINVAL otherwise.
5099  */
5100 static int
5101 flow_dv_check_valid_spec(void *match_mask, void *match_value)
5102 {
5103         uint8_t *m = match_mask;
5104         uint8_t *v = match_value;
5105         unsigned int i;
5106
5107         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
5108                 if (v[i] & ~m[i]) {
5109                         DRV_LOG(ERR,
5110                                 "match_value differs from match_criteria"
5111                                 " %p[%u] != %p[%u]",
5112                                 match_value, i, match_mask, i);
5113                         return -EINVAL;
5114                 }
5115         }
5116         return 0;
5117 }
5118 #endif
5119
5120 /**
5121  * Add Ethernet item to matcher and to the value.
5122  *
5123  * @param[in, out] matcher
5124  *   Flow matcher.
5125  * @param[in, out] key
5126  *   Flow matcher value.
5127  * @param[in] item
5128  *   Flow pattern to translate.
5129  * @param[in] inner
5130  *   Item is inner pattern.
5131  */
5132 static void
5133 flow_dv_translate_item_eth(void *matcher, void *key,
5134                            const struct rte_flow_item *item, int inner)
5135 {
5136         const struct rte_flow_item_eth *eth_m = item->mask;
5137         const struct rte_flow_item_eth *eth_v = item->spec;
5138         const struct rte_flow_item_eth nic_mask = {
5139                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5140                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
5141                 .type = RTE_BE16(0xffff),
5142         };
5143         void *headers_m;
5144         void *headers_v;
5145         char *l24_v;
5146         unsigned int i;
5147
5148         if (!eth_v)
5149                 return;
5150         if (!eth_m)
5151                 eth_m = &nic_mask;
5152         if (inner) {
5153                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5154                                          inner_headers);
5155                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5156         } else {
5157                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5158                                          outer_headers);
5159                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5160         }
5161         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
5162                &eth_m->dst, sizeof(eth_m->dst));
5163         /* The value must be in the range of the mask. */
5164         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
5165         for (i = 0; i < sizeof(eth_m->dst); ++i)
5166                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
5167         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
5168                &eth_m->src, sizeof(eth_m->src));
5169         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
5170         /* The value must be in the range of the mask. */
5171         for (i = 0; i < sizeof(eth_m->dst); ++i)
5172                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
5173         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5174                  rte_be_to_cpu_16(eth_m->type));
5175         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
5176         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
5177 }
5178
5179 /**
5180  * Add VLAN item to matcher and to the value.
5181  *
5182  * @param[in, out] dev_flow
5183  *   Flow descriptor.
5184  * @param[in, out] matcher
5185  *   Flow matcher.
5186  * @param[in, out] key
5187  *   Flow matcher value.
5188  * @param[in] item
5189  *   Flow pattern to translate.
5190  * @param[in] inner
5191  *   Item is inner pattern.
5192  */
5193 static void
5194 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
5195                             void *matcher, void *key,
5196                             const struct rte_flow_item *item,
5197                             int inner)
5198 {
5199         const struct rte_flow_item_vlan *vlan_m = item->mask;
5200         const struct rte_flow_item_vlan *vlan_v = item->spec;
5201         void *headers_m;
5202         void *headers_v;
5203         uint16_t tci_m;
5204         uint16_t tci_v;
5205
5206         if (!vlan_v)
5207                 return;
5208         if (!vlan_m)
5209                 vlan_m = &rte_flow_item_vlan_mask;
5210         if (inner) {
5211                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5212                                          inner_headers);
5213                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5214         } else {
5215                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5216                                          outer_headers);
5217                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5218                 /*
5219                  * This is workaround, masks are not supported,
5220                  * and pre-validated.
5221                  */
5222                 dev_flow->dv.vf_vlan.tag =
5223                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
5224         }
5225         tci_m = rte_be_to_cpu_16(vlan_m->tci);
5226         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
5227         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
5228         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
5229         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
5230         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
5231         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
5232         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
5233         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
5234         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
5235         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
5236                  rte_be_to_cpu_16(vlan_m->inner_type));
5237         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
5238                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
5239 }
5240
5241 /**
5242  * Add IPV4 item to matcher and to the value.
5243  *
5244  * @param[in, out] matcher
5245  *   Flow matcher.
5246  * @param[in, out] key
5247  *   Flow matcher value.
5248  * @param[in] item
5249  *   Flow pattern to translate.
5250  * @param[in] inner
5251  *   Item is inner pattern.
5252  * @param[in] group
5253  *   The group to insert the rule.
5254  */
5255 static void
5256 flow_dv_translate_item_ipv4(void *matcher, void *key,
5257                             const struct rte_flow_item *item,
5258                             int inner, uint32_t group)
5259 {
5260         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
5261         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
5262         const struct rte_flow_item_ipv4 nic_mask = {
5263                 .hdr = {
5264                         .src_addr = RTE_BE32(0xffffffff),
5265                         .dst_addr = RTE_BE32(0xffffffff),
5266                         .type_of_service = 0xff,
5267                         .next_proto_id = 0xff,
5268                 },
5269         };
5270         void *headers_m;
5271         void *headers_v;
5272         char *l24_m;
5273         char *l24_v;
5274         uint8_t tos;
5275
5276         if (inner) {
5277                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5278                                          inner_headers);
5279                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5280         } else {
5281                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5282                                          outer_headers);
5283                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5284         }
5285         if (group == 0)
5286                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5287         else
5288                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
5289         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
5290         if (!ipv4_v)
5291                 return;
5292         if (!ipv4_m)
5293                 ipv4_m = &nic_mask;
5294         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5295                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
5296         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5297                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
5298         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
5299         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
5300         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5301                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
5302         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5303                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
5304         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
5305         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
5306         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
5307         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
5308                  ipv4_m->hdr.type_of_service);
5309         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
5310         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
5311                  ipv4_m->hdr.type_of_service >> 2);
5312         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
5313         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
5314                  ipv4_m->hdr.next_proto_id);
5315         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5316                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
5317 }
5318
5319 /**
5320  * Add IPV6 item to matcher and to the value.
5321  *
5322  * @param[in, out] matcher
5323  *   Flow matcher.
5324  * @param[in, out] key
5325  *   Flow matcher value.
5326  * @param[in] item
5327  *   Flow pattern to translate.
5328  * @param[in] inner
5329  *   Item is inner pattern.
5330  * @param[in] group
5331  *   The group to insert the rule.
5332  */
5333 static void
5334 flow_dv_translate_item_ipv6(void *matcher, void *key,
5335                             const struct rte_flow_item *item,
5336                             int inner, uint32_t group)
5337 {
5338         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
5339         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
5340         const struct rte_flow_item_ipv6 nic_mask = {
5341                 .hdr = {
5342                         .src_addr =
5343                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
5344                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
5345                         .dst_addr =
5346                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
5347                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
5348                         .vtc_flow = RTE_BE32(0xffffffff),
5349                         .proto = 0xff,
5350                         .hop_limits = 0xff,
5351                 },
5352         };
5353         void *headers_m;
5354         void *headers_v;
5355         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5356         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5357         char *l24_m;
5358         char *l24_v;
5359         uint32_t vtc_m;
5360         uint32_t vtc_v;
5361         int i;
5362         int size;
5363
5364         if (inner) {
5365                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5366                                          inner_headers);
5367                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5368         } else {
5369                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5370                                          outer_headers);
5371                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5372         }
5373         if (group == 0)
5374                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
5375         else
5376                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
5377         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
5378         if (!ipv6_v)
5379                 return;
5380         if (!ipv6_m)
5381                 ipv6_m = &nic_mask;
5382         size = sizeof(ipv6_m->hdr.dst_addr);
5383         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5384                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
5385         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5386                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
5387         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
5388         for (i = 0; i < size; ++i)
5389                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
5390         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
5391                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
5392         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
5393                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
5394         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
5395         for (i = 0; i < size; ++i)
5396                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
5397         /* TOS. */
5398         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
5399         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
5400         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
5401         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
5402         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
5403         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
5404         /* Label. */
5405         if (inner) {
5406                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
5407                          vtc_m);
5408                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
5409                          vtc_v);
5410         } else {
5411                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
5412                          vtc_m);
5413                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
5414                          vtc_v);
5415         }
5416         /* Protocol. */
5417         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
5418                  ipv6_m->hdr.proto);
5419         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5420                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
5421 }
5422
5423 /**
5424  * Add TCP item to matcher and to the value.
5425  *
5426  * @param[in, out] matcher
5427  *   Flow matcher.
5428  * @param[in, out] key
5429  *   Flow matcher value.
5430  * @param[in] item
5431  *   Flow pattern to translate.
5432  * @param[in] inner
5433  *   Item is inner pattern.
5434  */
5435 static void
5436 flow_dv_translate_item_tcp(void *matcher, void *key,
5437                            const struct rte_flow_item *item,
5438                            int inner)
5439 {
5440         const struct rte_flow_item_tcp *tcp_m = item->mask;
5441         const struct rte_flow_item_tcp *tcp_v = item->spec;
5442         void *headers_m;
5443         void *headers_v;
5444
5445         if (inner) {
5446                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5447                                          inner_headers);
5448                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5449         } else {
5450                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5451                                          outer_headers);
5452                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5453         }
5454         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5455         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
5456         if (!tcp_v)
5457                 return;
5458         if (!tcp_m)
5459                 tcp_m = &rte_flow_item_tcp_mask;
5460         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
5461                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
5462         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
5463                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
5464         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
5465                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
5466         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
5467                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
5468         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
5469                  tcp_m->hdr.tcp_flags);
5470         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
5471                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
5472 }
5473
5474 /**
5475  * Add UDP item to matcher and to the value.
5476  *
5477  * @param[in, out] matcher
5478  *   Flow matcher.
5479  * @param[in, out] key
5480  *   Flow matcher value.
5481  * @param[in] item
5482  *   Flow pattern to translate.
5483  * @param[in] inner
5484  *   Item is inner pattern.
5485  */
5486 static void
5487 flow_dv_translate_item_udp(void *matcher, void *key,
5488                            const struct rte_flow_item *item,
5489                            int inner)
5490 {
5491         const struct rte_flow_item_udp *udp_m = item->mask;
5492         const struct rte_flow_item_udp *udp_v = item->spec;
5493         void *headers_m;
5494         void *headers_v;
5495
5496         if (inner) {
5497                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5498                                          inner_headers);
5499                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5500         } else {
5501                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5502                                          outer_headers);
5503                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5504         }
5505         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5506         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
5507         if (!udp_v)
5508                 return;
5509         if (!udp_m)
5510                 udp_m = &rte_flow_item_udp_mask;
5511         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
5512                  rte_be_to_cpu_16(udp_m->hdr.src_port));
5513         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
5514                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
5515         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
5516                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
5517         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
5518                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
5519 }
5520
5521 /**
5522  * Add GRE optional Key item to matcher and to the value.
5523  *
5524  * @param[in, out] matcher
5525  *   Flow matcher.
5526  * @param[in, out] key
5527  *   Flow matcher value.
5528  * @param[in] item
5529  *   Flow pattern to translate.
5530  * @param[in] inner
5531  *   Item is inner pattern.
5532  */
5533 static void
5534 flow_dv_translate_item_gre_key(void *matcher, void *key,
5535                                    const struct rte_flow_item *item)
5536 {
5537         const rte_be32_t *key_m = item->mask;
5538         const rte_be32_t *key_v = item->spec;
5539         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5540         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5541         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
5542
5543         if (!key_v)
5544                 return;
5545         if (!key_m)
5546                 key_m = &gre_key_default_mask;
5547         /* GRE K bit must be on and should already be validated */
5548         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
5549         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
5550         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
5551                  rte_be_to_cpu_32(*key_m) >> 8);
5552         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
5553                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
5554         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
5555                  rte_be_to_cpu_32(*key_m) & 0xFF);
5556         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
5557                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
5558 }
5559
5560 /**
5561  * Add GRE item to matcher and to the value.
5562  *
5563  * @param[in, out] matcher
5564  *   Flow matcher.
5565  * @param[in, out] key
5566  *   Flow matcher value.
5567  * @param[in] item
5568  *   Flow pattern to translate.
5569  * @param[in] inner
5570  *   Item is inner pattern.
5571  */
5572 static void
5573 flow_dv_translate_item_gre(void *matcher, void *key,
5574                            const struct rte_flow_item *item,
5575                            int inner)
5576 {
5577         const struct rte_flow_item_gre *gre_m = item->mask;
5578         const struct rte_flow_item_gre *gre_v = item->spec;
5579         void *headers_m;
5580         void *headers_v;
5581         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5582         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5583         struct {
5584                 union {
5585                         __extension__
5586                         struct {
5587                                 uint16_t version:3;
5588                                 uint16_t rsvd0:9;
5589                                 uint16_t s_present:1;
5590                                 uint16_t k_present:1;
5591                                 uint16_t rsvd_bit1:1;
5592                                 uint16_t c_present:1;
5593                         };
5594                         uint16_t value;
5595                 };
5596         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
5597
5598         if (inner) {
5599                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5600                                          inner_headers);
5601                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5602         } else {
5603                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5604                                          outer_headers);
5605                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5606         }
5607         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5608         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
5609         if (!gre_v)
5610                 return;
5611         if (!gre_m)
5612                 gre_m = &rte_flow_item_gre_mask;
5613         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
5614                  rte_be_to_cpu_16(gre_m->protocol));
5615         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
5616                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
5617         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
5618         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
5619         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
5620                  gre_crks_rsvd0_ver_m.c_present);
5621         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
5622                  gre_crks_rsvd0_ver_v.c_present &
5623                  gre_crks_rsvd0_ver_m.c_present);
5624         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
5625                  gre_crks_rsvd0_ver_m.k_present);
5626         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
5627                  gre_crks_rsvd0_ver_v.k_present &
5628                  gre_crks_rsvd0_ver_m.k_present);
5629         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
5630                  gre_crks_rsvd0_ver_m.s_present);
5631         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
5632                  gre_crks_rsvd0_ver_v.s_present &
5633                  gre_crks_rsvd0_ver_m.s_present);
5634 }
5635
5636 /**
5637  * Add NVGRE item to matcher and to the value.
5638  *
5639  * @param[in, out] matcher
5640  *   Flow matcher.
5641  * @param[in, out] key
5642  *   Flow matcher value.
5643  * @param[in] item
5644  *   Flow pattern to translate.
5645  * @param[in] inner
5646  *   Item is inner pattern.
5647  */
5648 static void
5649 flow_dv_translate_item_nvgre(void *matcher, void *key,
5650                              const struct rte_flow_item *item,
5651                              int inner)
5652 {
5653         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
5654         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
5655         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5656         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5657         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
5658         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
5659         char *gre_key_m;
5660         char *gre_key_v;
5661         int size;
5662         int i;
5663
5664         /* For NVGRE, GRE header fields must be set with defined values. */
5665         const struct rte_flow_item_gre gre_spec = {
5666                 .c_rsvd0_ver = RTE_BE16(0x2000),
5667                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
5668         };
5669         const struct rte_flow_item_gre gre_mask = {
5670                 .c_rsvd0_ver = RTE_BE16(0xB000),
5671                 .protocol = RTE_BE16(UINT16_MAX),
5672         };
5673         const struct rte_flow_item gre_item = {
5674                 .spec = &gre_spec,
5675                 .mask = &gre_mask,
5676                 .last = NULL,
5677         };
5678         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
5679         if (!nvgre_v)
5680                 return;
5681         if (!nvgre_m)
5682                 nvgre_m = &rte_flow_item_nvgre_mask;
5683         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
5684         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
5685         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
5686         memcpy(gre_key_m, tni_flow_id_m, size);
5687         for (i = 0; i < size; ++i)
5688                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
5689 }
5690
5691 /**
5692  * Add VXLAN item to matcher and to the value.
5693  *
5694  * @param[in, out] matcher
5695  *   Flow matcher.
5696  * @param[in, out] key
5697  *   Flow matcher value.
5698  * @param[in] item
5699  *   Flow pattern to translate.
5700  * @param[in] inner
5701  *   Item is inner pattern.
5702  */
5703 static void
5704 flow_dv_translate_item_vxlan(void *matcher, void *key,
5705                              const struct rte_flow_item *item,
5706                              int inner)
5707 {
5708         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
5709         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
5710         void *headers_m;
5711         void *headers_v;
5712         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5713         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5714         char *vni_m;
5715         char *vni_v;
5716         uint16_t dport;
5717         int size;
5718         int i;
5719
5720         if (inner) {
5721                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5722                                          inner_headers);
5723                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5724         } else {
5725                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5726                                          outer_headers);
5727                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5728         }
5729         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
5730                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
5731         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
5732                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
5733                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
5734         }
5735         if (!vxlan_v)
5736                 return;
5737         if (!vxlan_m)
5738                 vxlan_m = &rte_flow_item_vxlan_mask;
5739         size = sizeof(vxlan_m->vni);
5740         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
5741         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
5742         memcpy(vni_m, vxlan_m->vni, size);
5743         for (i = 0; i < size; ++i)
5744                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
5745 }
5746
5747 /**
5748  * Add Geneve item to matcher and to the value.
5749  *
5750  * @param[in, out] matcher
5751  *   Flow matcher.
5752  * @param[in, out] key
5753  *   Flow matcher value.
5754  * @param[in] item
5755  *   Flow pattern to translate.
5756  * @param[in] inner
5757  *   Item is inner pattern.
5758  */
5759
5760 static void
5761 flow_dv_translate_item_geneve(void *matcher, void *key,
5762                               const struct rte_flow_item *item, int inner)
5763 {
5764         const struct rte_flow_item_geneve *geneve_m = item->mask;
5765         const struct rte_flow_item_geneve *geneve_v = item->spec;
5766         void *headers_m;
5767         void *headers_v;
5768         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5769         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5770         uint16_t dport;
5771         uint16_t gbhdr_m;
5772         uint16_t gbhdr_v;
5773         char *vni_m;
5774         char *vni_v;
5775         size_t size, i;
5776
5777         if (inner) {
5778                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5779                                          inner_headers);
5780                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5781         } else {
5782                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5783                                          outer_headers);
5784                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5785         }
5786         dport = MLX5_UDP_PORT_GENEVE;
5787         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
5788                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
5789                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
5790         }
5791         if (!geneve_v)
5792                 return;
5793         if (!geneve_m)
5794                 geneve_m = &rte_flow_item_geneve_mask;
5795         size = sizeof(geneve_m->vni);
5796         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
5797         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
5798         memcpy(vni_m, geneve_m->vni, size);
5799         for (i = 0; i < size; ++i)
5800                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
5801         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
5802                  rte_be_to_cpu_16(geneve_m->protocol));
5803         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
5804                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
5805         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
5806         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
5807         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
5808                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
5809         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
5810                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
5811         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
5812                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
5813         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
5814                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
5815                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
5816 }
5817
5818 /**
5819  * Add MPLS item to matcher and to the value.
5820  *
5821  * @param[in, out] matcher
5822  *   Flow matcher.
5823  * @param[in, out] key
5824  *   Flow matcher value.
5825  * @param[in] item
5826  *   Flow pattern to translate.
5827  * @param[in] prev_layer
5828  *   The protocol layer indicated in previous item.
5829  * @param[in] inner
5830  *   Item is inner pattern.
5831  */
5832 static void
5833 flow_dv_translate_item_mpls(void *matcher, void *key,
5834                             const struct rte_flow_item *item,
5835                             uint64_t prev_layer,
5836                             int inner)
5837 {
5838         const uint32_t *in_mpls_m = item->mask;
5839         const uint32_t *in_mpls_v = item->spec;
5840         uint32_t *out_mpls_m = 0;
5841         uint32_t *out_mpls_v = 0;
5842         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5843         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5844         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
5845                                      misc_parameters_2);
5846         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
5847         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
5848         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5849
5850         switch (prev_layer) {
5851         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
5852                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
5853                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
5854                          MLX5_UDP_PORT_MPLS);
5855                 break;
5856         case MLX5_FLOW_LAYER_GRE:
5857                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
5858                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
5859                          RTE_ETHER_TYPE_MPLS);
5860                 break;
5861         default:
5862                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
5863                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
5864                          IPPROTO_MPLS);
5865                 break;
5866         }
5867         if (!in_mpls_v)
5868                 return;
5869         if (!in_mpls_m)
5870                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
5871         switch (prev_layer) {
5872         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
5873                 out_mpls_m =
5874                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
5875                                                  outer_first_mpls_over_udp);
5876                 out_mpls_v =
5877                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
5878                                                  outer_first_mpls_over_udp);
5879                 break;
5880         case MLX5_FLOW_LAYER_GRE:
5881                 out_mpls_m =
5882                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
5883                                                  outer_first_mpls_over_gre);
5884                 out_mpls_v =
5885                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
5886                                                  outer_first_mpls_over_gre);
5887                 break;
5888         default:
5889                 /* Inner MPLS not over GRE is not supported. */
5890                 if (!inner) {
5891                         out_mpls_m =
5892                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
5893                                                          misc2_m,
5894                                                          outer_first_mpls);
5895                         out_mpls_v =
5896                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
5897                                                          misc2_v,
5898                                                          outer_first_mpls);
5899                 }
5900                 break;
5901         }
5902         if (out_mpls_m && out_mpls_v) {
5903                 *out_mpls_m = *in_mpls_m;
5904                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
5905         }
5906 }
5907
5908 /**
5909  * Add metadata register item to matcher
5910  *
5911  * @param[in, out] matcher
5912  *   Flow matcher.
5913  * @param[in, out] key
5914  *   Flow matcher value.
5915  * @param[in] reg_type
5916  *   Type of device metadata register
5917  * @param[in] value
5918  *   Register value
5919  * @param[in] mask
5920  *   Register mask
5921  */
5922 static void
5923 flow_dv_match_meta_reg(void *matcher, void *key,
5924                        enum modify_reg reg_type,
5925                        uint32_t data, uint32_t mask)
5926 {
5927         void *misc2_m =
5928                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
5929         void *misc2_v =
5930                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
5931         uint32_t temp;
5932
5933         data &= mask;
5934         switch (reg_type) {
5935         case REG_A:
5936                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
5937                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
5938                 break;
5939         case REG_B:
5940                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
5941                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
5942                 break;
5943         case REG_C_0:
5944                 /*
5945                  * The metadata register C0 field might be divided into
5946                  * source vport index and META item value, we should set
5947                  * this field according to specified mask, not as whole one.
5948                  */
5949                 temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
5950                 temp |= mask;
5951                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
5952                 temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
5953                 temp &= ~mask;
5954                 temp |= data;
5955                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
5956                 break;
5957         case REG_C_1:
5958                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
5959                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
5960                 break;
5961         case REG_C_2:
5962                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
5963                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
5964                 break;
5965         case REG_C_3:
5966                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
5967                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
5968                 break;
5969         case REG_C_4:
5970                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
5971                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
5972                 break;
5973         case REG_C_5:
5974                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
5975                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
5976                 break;
5977         case REG_C_6:
5978                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
5979                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
5980                 break;
5981         case REG_C_7:
5982                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
5983                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
5984                 break;
5985         default:
5986                 assert(false);
5987                 break;
5988         }
5989 }
5990
5991 /**
5992  * Add MARK item to matcher
5993  *
5994  * @param[in] dev
5995  *   The device to configure through.
5996  * @param[in, out] matcher
5997  *   Flow matcher.
5998  * @param[in, out] key
5999  *   Flow matcher value.
6000  * @param[in] item
6001  *   Flow pattern to translate.
6002  */
6003 static void
6004 flow_dv_translate_item_mark(struct rte_eth_dev *dev,
6005                             void *matcher, void *key,
6006                             const struct rte_flow_item *item)
6007 {
6008         struct mlx5_priv *priv = dev->data->dev_private;
6009         const struct rte_flow_item_mark *mark;
6010         uint32_t value;
6011         uint32_t mask;
6012
6013         mark = item->mask ? (const void *)item->mask :
6014                             &rte_flow_item_mark_mask;
6015         mask = mark->id & priv->sh->dv_mark_mask;
6016         mark = (const void *)item->spec;
6017         assert(mark);
6018         value = mark->id & priv->sh->dv_mark_mask & mask;
6019         if (mask) {
6020                 enum modify_reg reg;
6021
6022                 /* Get the metadata register index for the mark. */
6023                 reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
6024                 assert(reg > 0);
6025                 if (reg == REG_C_0) {
6026                         struct mlx5_priv *priv = dev->data->dev_private;
6027                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6028                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6029
6030                         mask &= msk_c0;
6031                         mask <<= shl_c0;
6032                         value <<= shl_c0;
6033                 }
6034                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6035         }
6036 }
6037
6038 /**
6039  * Add META item to matcher
6040  *
6041  * @param[in] dev
6042  *   The devich to configure through.
6043  * @param[in, out] matcher
6044  *   Flow matcher.
6045  * @param[in, out] key
6046  *   Flow matcher value.
6047  * @param[in] attr
6048  *   Attributes of flow that includes this item.
6049  * @param[in] item
6050  *   Flow pattern to translate.
6051  */
6052 static void
6053 flow_dv_translate_item_meta(struct rte_eth_dev *dev,
6054                             void *matcher, void *key,
6055                             const struct rte_flow_attr *attr,
6056                             const struct rte_flow_item *item)
6057 {
6058         const struct rte_flow_item_meta *meta_m;
6059         const struct rte_flow_item_meta *meta_v;
6060
6061         meta_m = (const void *)item->mask;
6062         if (!meta_m)
6063                 meta_m = &rte_flow_item_meta_mask;
6064         meta_v = (const void *)item->spec;
6065         if (meta_v) {
6066                 enum modify_reg reg;
6067                 uint32_t value = meta_v->data;
6068                 uint32_t mask = meta_m->data;
6069
6070                 reg = flow_dv_get_metadata_reg(dev, attr, NULL);
6071                 if (reg < 0)
6072                         return;
6073                 /*
6074                  * In datapath code there is no endianness
6075                  * coversions for perfromance reasons, all
6076                  * pattern conversions are done in rte_flow.
6077                  */
6078                 value = rte_cpu_to_be_32(value);
6079                 mask = rte_cpu_to_be_32(mask);
6080                 if (reg == REG_C_0) {
6081                         struct mlx5_priv *priv = dev->data->dev_private;
6082                         uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6083                         uint32_t shl_c0 = rte_bsf32(msk_c0);
6084
6085                         msk_c0 = rte_cpu_to_be_32(msk_c0);
6086                         value <<= shl_c0;
6087                         mask <<= shl_c0;
6088                         assert(msk_c0);
6089                         assert(!(~msk_c0 & mask));
6090                 }
6091                 flow_dv_match_meta_reg(matcher, key, reg, value, mask);
6092         }
6093 }
6094
6095 /**
6096  * Add vport metadata Reg C0 item to matcher
6097  *
6098  * @param[in, out] matcher
6099  *   Flow matcher.
6100  * @param[in, out] key
6101  *   Flow matcher value.
6102  * @param[in] reg
6103  *   Flow pattern to translate.
6104  */
6105 static void
6106 flow_dv_translate_item_meta_vport(void *matcher, void *key,
6107                                   uint32_t value, uint32_t mask)
6108 {
6109         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
6110 }
6111
6112 /**
6113  * Add tag item to matcher
6114  *
6115  * @param[in] dev
6116  *   The devich to configure through.
6117  * @param[in, out] matcher
6118  *   Flow matcher.
6119  * @param[in, out] key
6120  *   Flow matcher value.
6121  * @param[in] item
6122  *   Flow pattern to translate.
6123  */
6124 static void
6125 flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
6126                                 void *matcher, void *key,
6127                                 const struct rte_flow_item *item)
6128 {
6129         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
6130         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
6131         uint32_t mask, value;
6132
6133         assert(tag_v);
6134         value = tag_v->data;
6135         mask = tag_m ? tag_m->data : UINT32_MAX;
6136         if (tag_v->id == REG_C_0) {
6137                 struct mlx5_priv *priv = dev->data->dev_private;
6138                 uint32_t msk_c0 = priv->sh->dv_regc0_mask;
6139                 uint32_t shl_c0 = rte_bsf32(msk_c0);
6140
6141                 mask &= msk_c0;
6142                 mask <<= shl_c0;
6143                 value <<= shl_c0;
6144         }
6145         flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
6146 }
6147
6148 /**
6149  * Add TAG item to matcher
6150  *
6151  * @param[in] dev
6152  *   The devich to configure through.
6153  * @param[in, out] matcher
6154  *   Flow matcher.
6155  * @param[in, out] key
6156  *   Flow matcher value.
6157  * @param[in] item
6158  *   Flow pattern to translate.
6159  */
6160 static void
6161 flow_dv_translate_item_tag(struct rte_eth_dev *dev,
6162                            void *matcher, void *key,
6163                            const struct rte_flow_item *item)
6164 {
6165         const struct rte_flow_item_tag *tag_v = item->spec;
6166         const struct rte_flow_item_tag *tag_m = item->mask;
6167         enum modify_reg reg;
6168
6169         assert(tag_v);
6170         tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
6171         /* Get the metadata register index for the tag. */
6172         reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
6173         assert(reg > 0);
6174         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
6175 }
6176
6177 /**
6178  * Add source vport match to the specified matcher.
6179  *
6180  * @param[in, out] matcher
6181  *   Flow matcher.
6182  * @param[in, out] key
6183  *   Flow matcher value.
6184  * @param[in] port
6185  *   Source vport value to match
6186  * @param[in] mask
6187  *   Mask
6188  */
6189 static void
6190 flow_dv_translate_item_source_vport(void *matcher, void *key,
6191                                     int16_t port, uint16_t mask)
6192 {
6193         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6194         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6195
6196         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
6197         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
6198 }
6199
6200 /**
6201  * Translate port-id item to eswitch match on  port-id.
6202  *
6203  * @param[in] dev
6204  *   The devich to configure through.
6205  * @param[in, out] matcher
6206  *   Flow matcher.
6207  * @param[in, out] key
6208  *   Flow matcher value.
6209  * @param[in] item
6210  *   Flow pattern to translate.
6211  *
6212  * @return
6213  *   0 on success, a negative errno value otherwise.
6214  */
6215 static int
6216 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
6217                                void *key, const struct rte_flow_item *item)
6218 {
6219         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
6220         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
6221         struct mlx5_priv *priv;
6222         uint16_t mask, id;
6223
6224         mask = pid_m ? pid_m->id : 0xffff;
6225         id = pid_v ? pid_v->id : dev->data->port_id;
6226         priv = mlx5_port_to_eswitch_info(id, item == NULL);
6227         if (!priv)
6228                 return -rte_errno;
6229         /* Translate to vport field or to metadata, depending on mode. */
6230         if (priv->vport_meta_mask)
6231                 flow_dv_translate_item_meta_vport(matcher, key,
6232                                                   priv->vport_meta_tag,
6233                                                   priv->vport_meta_mask);
6234         else
6235                 flow_dv_translate_item_source_vport(matcher, key,
6236                                                     priv->vport_id, mask);
6237         return 0;
6238 }
6239
6240 /**
6241  * Add ICMP6 item to matcher and to the value.
6242  *
6243  * @param[in, out] matcher
6244  *   Flow matcher.
6245  * @param[in, out] key
6246  *   Flow matcher value.
6247  * @param[in] item
6248  *   Flow pattern to translate.
6249  * @param[in] inner
6250  *   Item is inner pattern.
6251  */
6252 static void
6253 flow_dv_translate_item_icmp6(void *matcher, void *key,
6254                               const struct rte_flow_item *item,
6255                               int inner)
6256 {
6257         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
6258         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
6259         void *headers_m;
6260         void *headers_v;
6261         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6262                                      misc_parameters_3);
6263         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6264         if (inner) {
6265                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6266                                          inner_headers);
6267                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6268         } else {
6269                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6270                                          outer_headers);
6271                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6272         }
6273         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
6274         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
6275         if (!icmp6_v)
6276                 return;
6277         if (!icmp6_m)
6278                 icmp6_m = &rte_flow_item_icmp6_mask;
6279         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
6280         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
6281                  icmp6_v->type & icmp6_m->type);
6282         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
6283         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
6284                  icmp6_v->code & icmp6_m->code);
6285 }
6286
6287 /**
6288  * Add ICMP item to matcher and to the value.
6289  *
6290  * @param[in, out] matcher
6291  *   Flow matcher.
6292  * @param[in, out] key
6293  *   Flow matcher value.
6294  * @param[in] item
6295  *   Flow pattern to translate.
6296  * @param[in] inner
6297  *   Item is inner pattern.
6298  */
6299 static void
6300 flow_dv_translate_item_icmp(void *matcher, void *key,
6301                             const struct rte_flow_item *item,
6302                             int inner)
6303 {
6304         const struct rte_flow_item_icmp *icmp_m = item->mask;
6305         const struct rte_flow_item_icmp *icmp_v = item->spec;
6306         void *headers_m;
6307         void *headers_v;
6308         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
6309                                      misc_parameters_3);
6310         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
6311         if (inner) {
6312                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6313                                          inner_headers);
6314                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
6315         } else {
6316                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
6317                                          outer_headers);
6318                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
6319         }
6320         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
6321         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
6322         if (!icmp_v)
6323                 return;
6324         if (!icmp_m)
6325                 icmp_m = &rte_flow_item_icmp_mask;
6326         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
6327                  icmp_m->hdr.icmp_type);
6328         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
6329                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
6330         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
6331                  icmp_m->hdr.icmp_code);
6332         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
6333                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
6334 }
6335
6336 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
6337
6338 #define HEADER_IS_ZERO(match_criteria, headers)                              \
6339         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
6340                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
6341
6342 /**
6343  * Calculate flow matcher enable bitmap.
6344  *
6345  * @param match_criteria
6346  *   Pointer to flow matcher criteria.
6347  *
6348  * @return
6349  *   Bitmap of enabled fields.
6350  */
6351 static uint8_t
6352 flow_dv_matcher_enable(uint32_t *match_criteria)
6353 {
6354         uint8_t match_criteria_enable;
6355
6356         match_criteria_enable =
6357                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
6358                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
6359         match_criteria_enable |=
6360                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
6361                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
6362         match_criteria_enable |=
6363                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
6364                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
6365         match_criteria_enable |=
6366                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
6367                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
6368         match_criteria_enable |=
6369                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
6370                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
6371         return match_criteria_enable;
6372 }
6373
6374
6375 /**
6376  * Get a flow table.
6377  *
6378  * @param[in, out] dev
6379  *   Pointer to rte_eth_dev structure.
6380  * @param[in] table_id
6381  *   Table id to use.
6382  * @param[in] egress
6383  *   Direction of the table.
6384  * @param[in] transfer
6385  *   E-Switch or NIC flow.
6386  * @param[out] error
6387  *   pointer to error structure.
6388  *
6389  * @return
6390  *   Returns tables resource based on the index, NULL in case of failed.
6391  */
6392 static struct mlx5_flow_tbl_resource *
6393 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
6394                          uint32_t table_id, uint8_t egress,
6395                          uint8_t transfer,
6396                          struct rte_flow_error *error)
6397 {
6398         struct mlx5_priv *priv = dev->data->dev_private;
6399         struct mlx5_ibv_shared *sh = priv->sh;
6400         struct mlx5_flow_tbl_resource *tbl;
6401         union mlx5_flow_tbl_key table_key = {
6402                 {
6403                         .table_id = table_id,
6404                         .reserved = 0,
6405                         .domain = !!transfer,
6406                         .direction = !!egress,
6407                 }
6408         };
6409         struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
6410                                                          table_key.v64);
6411         struct mlx5_flow_tbl_data_entry *tbl_data;
6412         int ret;
6413         void *domain;
6414
6415         if (pos) {
6416                 tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
6417                                         entry);
6418                 tbl = &tbl_data->tbl;
6419                 rte_atomic32_inc(&tbl->refcnt);
6420                 return tbl;
6421         }
6422         tbl_data = rte_zmalloc(NULL, sizeof(*tbl_data), 0);
6423         if (!tbl_data) {
6424                 rte_flow_error_set(error, ENOMEM,
6425                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6426                                    NULL,
6427                                    "cannot allocate flow table data entry");
6428                 return NULL;
6429         }
6430         tbl = &tbl_data->tbl;
6431         pos = &tbl_data->entry;
6432         if (transfer)
6433                 domain = sh->fdb_domain;
6434         else if (egress)
6435                 domain = sh->tx_domain;
6436         else
6437                 domain = sh->rx_domain;
6438         tbl->obj = mlx5_glue->dr_create_flow_tbl(domain, table_id);
6439         if (!tbl->obj) {
6440                 rte_flow_error_set(error, ENOMEM,
6441                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6442                                    NULL, "cannot create flow table object");
6443                 rte_free(tbl_data);
6444                 return NULL;
6445         }
6446         /*
6447          * No multi-threads now, but still better to initialize the reference
6448          * count before insert it into the hash list.
6449          */
6450         rte_atomic32_init(&tbl->refcnt);
6451         /* Jump action reference count is initialized here. */
6452         rte_atomic32_init(&tbl_data->jump.refcnt);
6453         pos->key = table_key.v64;
6454         ret = mlx5_hlist_insert(sh->flow_tbls, pos);
6455         if (ret < 0) {
6456                 rte_flow_error_set(error, -ret,
6457                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6458                                    "cannot insert flow table data entry");
6459                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
6460                 rte_free(tbl_data);
6461         }
6462         rte_atomic32_inc(&tbl->refcnt);
6463         return tbl;
6464 }
6465
6466 /**
6467  * Release a flow table.
6468  *
6469  * @param[in] dev
6470  *   Pointer to rte_eth_dev structure.
6471  * @param[in] tbl
6472  *   Table resource to be released.
6473  *
6474  * @return
6475  *   Returns 0 if table was released, else return 1;
6476  */
6477 static int
6478 flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
6479                              struct mlx5_flow_tbl_resource *tbl)
6480 {
6481         struct mlx5_priv *priv = dev->data->dev_private;
6482         struct mlx5_ibv_shared *sh = priv->sh;
6483         struct mlx5_flow_tbl_data_entry *tbl_data =
6484                 container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
6485
6486         if (!tbl)
6487                 return 0;
6488         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
6489                 struct mlx5_hlist_entry *pos = &tbl_data->entry;
6490
6491                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
6492                 tbl->obj = NULL;
6493                 /* remove the entry from the hash list and free memory. */
6494                 mlx5_hlist_remove(sh->flow_tbls, pos);
6495                 rte_free(tbl_data);
6496                 return 0;
6497         }
6498         return 1;
6499 }
6500
6501 /**
6502  * Register the flow matcher.
6503  *
6504  * @param[in, out] dev
6505  *   Pointer to rte_eth_dev structure.
6506  * @param[in, out] matcher
6507  *   Pointer to flow matcher.
6508  * @param[in, out] key
6509  *   Pointer to flow table key.
6510  * @parm[in, out] dev_flow
6511  *   Pointer to the dev_flow.
6512  * @param[out] error
6513  *   pointer to error structure.
6514  *
6515  * @return
6516  *   0 on success otherwise -errno and errno is set.
6517  */
6518 static int
6519 flow_dv_matcher_register(struct rte_eth_dev *dev,
6520                          struct mlx5_flow_dv_matcher *matcher,
6521                          union mlx5_flow_tbl_key *key,
6522                          struct mlx5_flow *dev_flow,
6523                          struct rte_flow_error *error)
6524 {
6525         struct mlx5_priv *priv = dev->data->dev_private;
6526         struct mlx5_ibv_shared *sh = priv->sh;
6527         struct mlx5_flow_dv_matcher *cache_matcher;
6528         struct mlx5dv_flow_matcher_attr dv_attr = {
6529                 .type = IBV_FLOW_ATTR_NORMAL,
6530                 .match_mask = (void *)&matcher->mask,
6531         };
6532         struct mlx5_flow_tbl_resource *tbl;
6533         struct mlx5_flow_tbl_data_entry *tbl_data;
6534
6535         tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
6536                                        key->domain, error);
6537         if (!tbl)
6538                 return -rte_errno;      /* No need to refill the error info */
6539         tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
6540         /* Lookup from cache. */
6541         LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
6542                 if (matcher->crc == cache_matcher->crc &&
6543                     matcher->priority == cache_matcher->priority &&
6544                     !memcmp((const void *)matcher->mask.buf,
6545                             (const void *)cache_matcher->mask.buf,
6546                             cache_matcher->mask.size)) {
6547                         DRV_LOG(DEBUG,
6548                                 "%s group %u priority %hd use %s "
6549                                 "matcher %p: refcnt %d++",
6550                                 key->domain ? "FDB" : "NIC", key->table_id,
6551                                 cache_matcher->priority,
6552                                 key->direction ? "tx" : "rx",
6553                                 (void *)cache_matcher,
6554                                 rte_atomic32_read(&cache_matcher->refcnt));
6555                         rte_atomic32_inc(&cache_matcher->refcnt);
6556                         dev_flow->dv.matcher = cache_matcher;
6557                         /* old matcher should not make the table ref++. */
6558                         flow_dv_tbl_resource_release(dev, tbl);
6559                         return 0;
6560                 }
6561         }
6562         /* Register new matcher. */
6563         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
6564         if (!cache_matcher) {
6565                 flow_dv_tbl_resource_release(dev, tbl);
6566                 return rte_flow_error_set(error, ENOMEM,
6567                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6568                                           "cannot allocate matcher memory");
6569         }
6570         *cache_matcher = *matcher;
6571         dv_attr.match_criteria_enable =
6572                 flow_dv_matcher_enable(cache_matcher->mask.buf);
6573         dv_attr.priority = matcher->priority;
6574         if (key->direction)
6575                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
6576         cache_matcher->matcher_object =
6577                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
6578         if (!cache_matcher->matcher_object) {
6579                 rte_free(cache_matcher);
6580 #ifdef HAVE_MLX5DV_DR
6581                 flow_dv_tbl_resource_release(dev, tbl);
6582 #endif
6583                 return rte_flow_error_set(error, ENOMEM,
6584                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6585                                           NULL, "cannot create matcher");
6586         }
6587         /* Save the table information */
6588         cache_matcher->tbl = tbl;
6589         rte_atomic32_init(&cache_matcher->refcnt);
6590         /* only matcher ref++, table ref++ already done above in get API. */
6591         rte_atomic32_inc(&cache_matcher->refcnt);
6592         LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
6593         dev_flow->dv.matcher = cache_matcher;
6594         DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
6595                 key->domain ? "FDB" : "NIC", key->table_id,
6596                 cache_matcher->priority,
6597                 key->direction ? "tx" : "rx", (void *)cache_matcher,
6598                 rte_atomic32_read(&cache_matcher->refcnt));
6599         return 0;
6600 }
6601
6602 /**
6603  * Find existing tag resource or create and register a new one.
6604  *
6605  * @param dev[in, out]
6606  *   Pointer to rte_eth_dev structure.
6607  * @param[in, out] tag_be24
6608  *   Tag value in big endian then R-shift 8.
6609  * @parm[in, out] dev_flow
6610  *   Pointer to the dev_flow.
6611  * @param[out] error
6612  *   pointer to error structure.
6613  *
6614  * @return
6615  *   0 on success otherwise -errno and errno is set.
6616  */
6617 static int
6618 flow_dv_tag_resource_register
6619                         (struct rte_eth_dev *dev,
6620                          uint32_t tag_be24,
6621                          struct mlx5_flow *dev_flow,
6622                          struct rte_flow_error *error)
6623 {
6624         struct mlx5_priv *priv = dev->data->dev_private;
6625         struct mlx5_ibv_shared *sh = priv->sh;
6626         struct mlx5_flow_dv_tag_resource *cache_resource;
6627         struct mlx5_hlist_entry *entry;
6628
6629         /* Lookup a matching resource from cache. */
6630         entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
6631         if (entry) {
6632                 cache_resource = container_of
6633                         (entry, struct mlx5_flow_dv_tag_resource, entry);
6634                 rte_atomic32_inc(&cache_resource->refcnt);
6635                 dev_flow->dv.tag_resource = cache_resource;
6636                 DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
6637                         (void *)cache_resource,
6638                         rte_atomic32_read(&cache_resource->refcnt));
6639                 return 0;
6640         }
6641         /* Register new resource. */
6642         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
6643         if (!cache_resource)
6644                 return rte_flow_error_set(error, ENOMEM,
6645                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6646                                           "cannot allocate resource memory");
6647         cache_resource->entry.key = (uint64_t)tag_be24;
6648         cache_resource->action = mlx5_glue->dv_create_flow_action_tag(tag_be24);
6649         if (!cache_resource->action) {
6650                 rte_free(cache_resource);
6651                 return rte_flow_error_set(error, ENOMEM,
6652                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6653                                           NULL, "cannot create action");
6654         }
6655         rte_atomic32_init(&cache_resource->refcnt);
6656         rte_atomic32_inc(&cache_resource->refcnt);
6657         if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
6658                 mlx5_glue->destroy_flow_action(cache_resource->action);
6659                 rte_free(cache_resource);
6660                 return rte_flow_error_set(error, EEXIST,
6661                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6662                                           NULL, "cannot insert tag");
6663         }
6664         dev_flow->dv.tag_resource = cache_resource;
6665         DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
6666                 (void *)cache_resource,
6667                 rte_atomic32_read(&cache_resource->refcnt));
6668         return 0;
6669 }
6670
6671 /**
6672  * Release the tag.
6673  *
6674  * @param dev
6675  *   Pointer to Ethernet device.
6676  * @param flow
6677  *   Pointer to mlx5_flow.
6678  *
6679  * @return
6680  *   1 while a reference on it exists, 0 when freed.
6681  */
6682 static int
6683 flow_dv_tag_release(struct rte_eth_dev *dev,
6684                     struct mlx5_flow_dv_tag_resource *tag)
6685 {
6686         struct mlx5_priv *priv = dev->data->dev_private;
6687         struct mlx5_ibv_shared *sh = priv->sh;
6688
6689         assert(tag);
6690         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
6691                 dev->data->port_id, (void *)tag,
6692                 rte_atomic32_read(&tag->refcnt));
6693         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
6694                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
6695                 mlx5_hlist_remove(sh->tag_table, &tag->entry);
6696                 DRV_LOG(DEBUG, "port %u tag %p: removed",
6697                         dev->data->port_id, (void *)tag);
6698                 rte_free(tag);
6699                 return 0;
6700         }
6701         return 1;
6702 }
6703
6704 /**
6705  * Translate port ID action to vport.
6706  *
6707  * @param[in] dev
6708  *   Pointer to rte_eth_dev structure.
6709  * @param[in] action
6710  *   Pointer to the port ID action.
6711  * @param[out] dst_port_id
6712  *   The target port ID.
6713  * @param[out] error
6714  *   Pointer to the error structure.
6715  *
6716  * @return
6717  *   0 on success, a negative errno value otherwise and rte_errno is set.
6718  */
6719 static int
6720 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
6721                                  const struct rte_flow_action *action,
6722                                  uint32_t *dst_port_id,
6723                                  struct rte_flow_error *error)
6724 {
6725         uint32_t port;
6726         struct mlx5_priv *priv;
6727         const struct rte_flow_action_port_id *conf =
6728                         (const struct rte_flow_action_port_id *)action->conf;
6729
6730         port = conf->original ? dev->data->port_id : conf->id;
6731         priv = mlx5_port_to_eswitch_info(port, false);
6732         if (!priv)
6733                 return rte_flow_error_set(error, -rte_errno,
6734                                           RTE_FLOW_ERROR_TYPE_ACTION,
6735                                           NULL,
6736                                           "No eswitch info was found for port");
6737 #ifdef HAVE_MLX5DV_DR_DEVX_PORT
6738         /*
6739          * This parameter is transferred to
6740          * mlx5dv_dr_action_create_dest_ib_port().
6741          */
6742         *dst_port_id = priv->ibv_port;
6743 #else
6744         /*
6745          * Legacy mode, no LAG configurations is supported.
6746          * This parameter is transferred to
6747          * mlx5dv_dr_action_create_dest_vport().
6748          */
6749         *dst_port_id = priv->vport_id;
6750 #endif
6751         return 0;
6752 }
6753
6754 /**
6755  * Add Tx queue matcher
6756  *
6757  * @param[in] dev
6758  *   Pointer to the dev struct.
6759  * @param[in, out] matcher
6760  *   Flow matcher.
6761  * @param[in, out] key
6762  *   Flow matcher value.
6763  * @param[in] item
6764  *   Flow pattern to translate.
6765  * @param[in] inner
6766  *   Item is inner pattern.
6767  */
6768 static void
6769 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
6770                                 void *matcher, void *key,
6771                                 const struct rte_flow_item *item)
6772 {
6773         const struct mlx5_rte_flow_item_tx_queue *queue_m;
6774         const struct mlx5_rte_flow_item_tx_queue *queue_v;
6775         void *misc_m =
6776                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
6777         void *misc_v =
6778                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
6779         struct mlx5_txq_ctrl *txq;
6780         uint32_t queue;
6781
6782
6783         queue_m = (const void *)item->mask;
6784         if (!queue_m)
6785                 return;
6786         queue_v = (const void *)item->spec;
6787         if (!queue_v)
6788                 return;
6789         txq = mlx5_txq_get(dev, queue_v->queue);
6790         if (!txq)
6791                 return;
6792         queue = txq->obj->sq->id;
6793         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
6794         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
6795                  queue & queue_m->queue);
6796         mlx5_txq_release(dev, queue_v->queue);
6797 }
6798
6799 /**
6800  * Set the hash fields according to the @p flow information.
6801  *
6802  * @param[in] dev_flow
6803  *   Pointer to the mlx5_flow.
6804  */
6805 static void
6806 flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
6807 {
6808         struct rte_flow *flow = dev_flow->flow;
6809         uint64_t items = dev_flow->layers;
6810         int rss_inner = 0;
6811         uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
6812
6813         dev_flow->hash_fields = 0;
6814 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
6815         if (flow->rss.level >= 2) {
6816                 dev_flow->hash_fields |= IBV_RX_HASH_INNER;
6817                 rss_inner = 1;
6818         }
6819 #endif
6820         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
6821             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
6822                 if (rss_types & MLX5_IPV4_LAYER_TYPES) {
6823                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
6824                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
6825                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
6826                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
6827                         else
6828                                 dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
6829                 }
6830         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
6831                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
6832                 if (rss_types & MLX5_IPV6_LAYER_TYPES) {
6833                         if (rss_types & ETH_RSS_L3_SRC_ONLY)
6834                                 dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
6835                         else if (rss_types & ETH_RSS_L3_DST_ONLY)
6836                                 dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
6837                         else
6838                                 dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
6839                 }
6840         }
6841         if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
6842             (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
6843                 if (rss_types & ETH_RSS_UDP) {
6844                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
6845                                 dev_flow->hash_fields |=
6846                                                 IBV_RX_HASH_SRC_PORT_UDP;
6847                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
6848                                 dev_flow->hash_fields |=
6849                                                 IBV_RX_HASH_DST_PORT_UDP;
6850                         else
6851                                 dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
6852                 }
6853         } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
6854                    (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
6855                 if (rss_types & ETH_RSS_TCP) {
6856                         if (rss_types & ETH_RSS_L4_SRC_ONLY)
6857                                 dev_flow->hash_fields |=
6858                                                 IBV_RX_HASH_SRC_PORT_TCP;
6859                         else if (rss_types & ETH_RSS_L4_DST_ONLY)
6860                                 dev_flow->hash_fields |=
6861                                                 IBV_RX_HASH_DST_PORT_TCP;
6862                         else
6863                                 dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
6864                 }
6865         }
6866 }
6867
6868 /**
6869  * Fill the flow with DV spec, lock free
6870  * (mutex should be acquired by caller).
6871  *
6872  * @param[in] dev
6873  *   Pointer to rte_eth_dev structure.
6874  * @param[in, out] dev_flow
6875  *   Pointer to the sub flow.
6876  * @param[in] attr
6877  *   Pointer to the flow attributes.
6878  * @param[in] items
6879  *   Pointer to the list of items.
6880  * @param[in] actions
6881  *   Pointer to the list of actions.
6882  * @param[out] error
6883  *   Pointer to the error structure.
6884  *
6885  * @return
6886  *   0 on success, a negative errno value otherwise and rte_errno is set.
6887  */
6888 static int
6889 __flow_dv_translate(struct rte_eth_dev *dev,
6890                     struct mlx5_flow *dev_flow,
6891                     const struct rte_flow_attr *attr,
6892                     const struct rte_flow_item items[],
6893                     const struct rte_flow_action actions[],
6894                     struct rte_flow_error *error)
6895 {
6896         struct mlx5_priv *priv = dev->data->dev_private;
6897         struct mlx5_dev_config *dev_conf = &priv->config;
6898         struct rte_flow *flow = dev_flow->flow;
6899         uint64_t item_flags = 0;
6900         uint64_t last_item = 0;
6901         uint64_t action_flags = 0;
6902         uint64_t priority = attr->priority;
6903         struct mlx5_flow_dv_matcher matcher = {
6904                 .mask = {
6905                         .size = sizeof(matcher.mask.buf),
6906                 },
6907         };
6908         int actions_n = 0;
6909         bool actions_end = false;
6910         struct mlx5_flow_dv_modify_hdr_resource mhdr_res = {
6911                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
6912                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
6913         };
6914         union flow_dv_attr flow_attr = { .attr = 0 };
6915         uint32_t tag_be;
6916         union mlx5_flow_tbl_key tbl_key;
6917         uint32_t modify_action_position = UINT32_MAX;
6918         void *match_mask = matcher.mask.buf;
6919         void *match_value = dev_flow->dv.value.buf;
6920         uint8_t next_protocol = 0xff;
6921         struct rte_vlan_hdr vlan = { 0 };
6922         uint32_t table;
6923         int ret = 0;
6924
6925         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
6926                                        &table, error);
6927         if (ret)
6928                 return ret;
6929         dev_flow->group = table;
6930         if (attr->transfer)
6931                 mhdr_res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
6932         if (priority == MLX5_FLOW_PRIO_RSVD)
6933                 priority = dev_conf->flow_prio - 1;
6934         for (; !actions_end ; actions++) {
6935                 const struct rte_flow_action_queue *queue;
6936                 const struct rte_flow_action_rss *rss;
6937                 const struct rte_flow_action *action = actions;
6938                 const struct rte_flow_action_count *count = action->conf;
6939                 const uint8_t *rss_key;
6940                 const struct rte_flow_action_jump *jump_data;
6941                 const struct rte_flow_action_meter *mtr;
6942                 struct mlx5_flow_tbl_resource *tbl;
6943                 uint32_t port_id = 0;
6944                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
6945                 int action_type = actions->type;
6946                 const struct rte_flow_action *found_action = NULL;
6947
6948                 switch (action_type) {
6949                 case RTE_FLOW_ACTION_TYPE_VOID:
6950                         break;
6951                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
6952                         if (flow_dv_translate_action_port_id(dev, action,
6953                                                              &port_id, error))
6954                                 return -rte_errno;
6955                         port_id_resource.port_id = port_id;
6956                         if (flow_dv_port_id_action_resource_register
6957                             (dev, &port_id_resource, dev_flow, error))
6958                                 return -rte_errno;
6959                         dev_flow->dv.actions[actions_n++] =
6960                                 dev_flow->dv.port_id_action->action;
6961                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
6962                         break;
6963                 case RTE_FLOW_ACTION_TYPE_FLAG:
6964                         action_flags |= MLX5_FLOW_ACTION_FLAG;
6965                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6966                                 struct rte_flow_action_mark mark = {
6967                                         .id = MLX5_FLOW_MARK_DEFAULT,
6968                                 };
6969
6970                                 if (flow_dv_convert_action_mark(dev, &mark,
6971                                                                 &mhdr_res,
6972                                                                 error))
6973                                         return -rte_errno;
6974                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
6975                                 break;
6976                         }
6977                         tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
6978                         if (!dev_flow->dv.tag_resource)
6979                                 if (flow_dv_tag_resource_register
6980                                     (dev, tag_be, dev_flow, error))
6981                                         return -rte_errno;
6982                         dev_flow->dv.actions[actions_n++] =
6983                                 dev_flow->dv.tag_resource->action;
6984                         break;
6985                 case RTE_FLOW_ACTION_TYPE_MARK:
6986                         action_flags |= MLX5_FLOW_ACTION_MARK;
6987                         if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
6988                                 const struct rte_flow_action_mark *mark =
6989                                         (const struct rte_flow_action_mark *)
6990                                                 actions->conf;
6991
6992                                 if (flow_dv_convert_action_mark(dev, mark,
6993                                                                 &mhdr_res,
6994                                                                 error))
6995                                         return -rte_errno;
6996                                 action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
6997                                 break;
6998                         }
6999                         /* Fall-through */
7000                 case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
7001                         /* Legacy (non-extensive) MARK action. */
7002                         tag_be = mlx5_flow_mark_set
7003                               (((const struct rte_flow_action_mark *)
7004                                (actions->conf))->id);
7005                         if (!dev_flow->dv.tag_resource)
7006                                 if (flow_dv_tag_resource_register
7007                                     (dev, tag_be, dev_flow, error))
7008                                         return -rte_errno;
7009                         dev_flow->dv.actions[actions_n++] =
7010                                 dev_flow->dv.tag_resource->action;
7011                         break;
7012                 case RTE_FLOW_ACTION_TYPE_SET_META:
7013                         if (flow_dv_convert_action_set_meta
7014                                 (dev, &mhdr_res, attr,
7015                                  (const struct rte_flow_action_set_meta *)
7016                                   actions->conf, error))
7017                                 return -rte_errno;
7018                         action_flags |= MLX5_FLOW_ACTION_SET_META;
7019                         break;
7020                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
7021                         if (flow_dv_convert_action_set_tag
7022                                 (dev, &mhdr_res,
7023                                  (const struct rte_flow_action_set_tag *)
7024                                   actions->conf, error))
7025                                 return -rte_errno;
7026                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7027                         break;
7028                 case RTE_FLOW_ACTION_TYPE_DROP:
7029                         action_flags |= MLX5_FLOW_ACTION_DROP;
7030                         break;
7031                 case RTE_FLOW_ACTION_TYPE_QUEUE:
7032                         assert(flow->rss.queue);
7033                         queue = actions->conf;
7034                         flow->rss.queue_num = 1;
7035                         (*flow->rss.queue)[0] = queue->index;
7036                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
7037                         break;
7038                 case RTE_FLOW_ACTION_TYPE_RSS:
7039                         assert(flow->rss.queue);
7040                         rss = actions->conf;
7041                         if (flow->rss.queue)
7042                                 memcpy((*flow->rss.queue), rss->queue,
7043                                        rss->queue_num * sizeof(uint16_t));
7044                         flow->rss.queue_num = rss->queue_num;
7045                         /* NULL RSS key indicates default RSS key. */
7046                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
7047                         memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
7048                         /*
7049                          * rss->level and rss.types should be set in advance
7050                          * when expanding items for RSS.
7051                          */
7052                         action_flags |= MLX5_FLOW_ACTION_RSS;
7053                         break;
7054                 case RTE_FLOW_ACTION_TYPE_COUNT:
7055                         if (!dev_conf->devx) {
7056                                 rte_errno = ENOTSUP;
7057                                 goto cnt_err;
7058                         }
7059                         flow->counter = flow_dv_counter_alloc(dev,
7060                                                               count->shared,
7061                                                               count->id,
7062                                                               dev_flow->group);
7063                         if (flow->counter == NULL)
7064                                 goto cnt_err;
7065                         dev_flow->dv.actions[actions_n++] =
7066                                 flow->counter->action;
7067                         action_flags |= MLX5_FLOW_ACTION_COUNT;
7068                         break;
7069 cnt_err:
7070                         if (rte_errno == ENOTSUP)
7071                                 return rte_flow_error_set
7072                                               (error, ENOTSUP,
7073                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7074                                                NULL,
7075                                                "count action not supported");
7076                         else
7077                                 return rte_flow_error_set
7078                                                 (error, rte_errno,
7079                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7080                                                  action,
7081                                                  "cannot create counter"
7082                                                   " object.");
7083                         break;
7084                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
7085                         dev_flow->dv.actions[actions_n++] =
7086                                                 priv->sh->pop_vlan_action;
7087                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
7088                         break;
7089                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
7090                         flow_dev_get_vlan_info_from_items(items, &vlan);
7091                         vlan.eth_proto = rte_be_to_cpu_16
7092                              ((((const struct rte_flow_action_of_push_vlan *)
7093                                                    actions->conf)->ethertype));
7094                         found_action = mlx5_flow_find_action
7095                                         (actions + 1,
7096                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
7097                         if (found_action)
7098                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
7099                         found_action = mlx5_flow_find_action
7100                                         (actions + 1,
7101                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
7102                         if (found_action)
7103                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
7104                         if (flow_dv_create_action_push_vlan
7105                                             (dev, attr, &vlan, dev_flow, error))
7106                                 return -rte_errno;
7107                         dev_flow->dv.actions[actions_n++] =
7108                                            dev_flow->dv.push_vlan_res->action;
7109                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
7110                         break;
7111                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
7112                         /* of_vlan_push action handled this action */
7113                         assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
7114                         break;
7115                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
7116                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
7117                                 break;
7118                         flow_dev_get_vlan_info_from_items(items, &vlan);
7119                         mlx5_update_vlan_vid_pcp(actions, &vlan);
7120                         /* If no VLAN push - this is a modify header action */
7121                         if (flow_dv_convert_action_modify_vlan_vid
7122                                                 (&mhdr_res, actions, error))
7123                                 return -rte_errno;
7124                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
7125                         break;
7126                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
7127                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
7128                         if (flow_dv_create_action_l2_encap(dev, actions,
7129                                                            dev_flow,
7130                                                            attr->transfer,
7131                                                            error))
7132                                 return -rte_errno;
7133                         dev_flow->dv.actions[actions_n++] =
7134                                 dev_flow->dv.encap_decap->verbs_action;
7135                         action_flags |= actions->type ==
7136                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
7137                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
7138                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
7139                         break;
7140                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
7141                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
7142                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
7143                                                            attr->transfer,
7144                                                            error))
7145                                 return -rte_errno;
7146                         dev_flow->dv.actions[actions_n++] =
7147                                 dev_flow->dv.encap_decap->verbs_action;
7148                         action_flags |= actions->type ==
7149                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
7150                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
7151                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
7152                         break;
7153                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7154                         /* Handle encap with preceding decap. */
7155                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
7156                                 if (flow_dv_create_action_raw_encap
7157                                         (dev, actions, dev_flow, attr, error))
7158                                         return -rte_errno;
7159                                 dev_flow->dv.actions[actions_n++] =
7160                                         dev_flow->dv.encap_decap->verbs_action;
7161                         } else {
7162                                 /* Handle encap without preceding decap. */
7163                                 if (flow_dv_create_action_l2_encap
7164                                     (dev, actions, dev_flow, attr->transfer,
7165                                      error))
7166                                         return -rte_errno;
7167                                 dev_flow->dv.actions[actions_n++] =
7168                                         dev_flow->dv.encap_decap->verbs_action;
7169                         }
7170                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
7171                         break;
7172                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
7173                         /* Check if this decap is followed by encap. */
7174                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
7175                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
7176                                action++) {
7177                         }
7178                         /* Handle decap only if it isn't followed by encap. */
7179                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
7180                                 if (flow_dv_create_action_l2_decap
7181                                     (dev, dev_flow, attr->transfer, error))
7182                                         return -rte_errno;
7183                                 dev_flow->dv.actions[actions_n++] =
7184                                         dev_flow->dv.encap_decap->verbs_action;
7185                         }
7186                         /* If decap is followed by encap, handle it at encap. */
7187                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
7188                         break;
7189                 case RTE_FLOW_ACTION_TYPE_JUMP:
7190                         jump_data = action->conf;
7191                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
7192                                                        jump_data->group, &table,
7193                                                        error);
7194                         if (ret)
7195                                 return ret;
7196                         tbl = flow_dv_tbl_resource_get(dev, table,
7197                                                        attr->egress,
7198                                                        attr->transfer, error);
7199                         if (!tbl)
7200                                 return rte_flow_error_set
7201                                                 (error, errno,
7202                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7203                                                  NULL,
7204                                                  "cannot create jump action.");
7205                         if (flow_dv_jump_tbl_resource_register
7206                             (dev, tbl, dev_flow, error)) {
7207                                 flow_dv_tbl_resource_release(dev, tbl);
7208                                 return rte_flow_error_set
7209                                                 (error, errno,
7210                                                  RTE_FLOW_ERROR_TYPE_ACTION,
7211                                                  NULL,
7212                                                  "cannot create jump action.");
7213                         }
7214                         dev_flow->dv.actions[actions_n++] =
7215                                 dev_flow->dv.jump->action;
7216                         action_flags |= MLX5_FLOW_ACTION_JUMP;
7217                         break;
7218                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
7219                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
7220                         if (flow_dv_convert_action_modify_mac
7221                                         (&mhdr_res, actions, error))
7222                                 return -rte_errno;
7223                         action_flags |= actions->type ==
7224                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
7225                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
7226                                         MLX5_FLOW_ACTION_SET_MAC_DST;
7227                         break;
7228                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
7229                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
7230                         if (flow_dv_convert_action_modify_ipv4
7231                                         (&mhdr_res, actions, error))
7232                                 return -rte_errno;
7233                         action_flags |= actions->type ==
7234                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
7235                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
7236                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
7237                         break;
7238                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
7239                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
7240                         if (flow_dv_convert_action_modify_ipv6
7241                                         (&mhdr_res, actions, error))
7242                                 return -rte_errno;
7243                         action_flags |= actions->type ==
7244                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
7245                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
7246                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
7247                         break;
7248                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
7249                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
7250                         if (flow_dv_convert_action_modify_tp
7251                                         (&mhdr_res, actions, items,
7252                                          &flow_attr, error))
7253                                 return -rte_errno;
7254                         action_flags |= actions->type ==
7255                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
7256                                         MLX5_FLOW_ACTION_SET_TP_SRC :
7257                                         MLX5_FLOW_ACTION_SET_TP_DST;
7258                         break;
7259                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
7260                         if (flow_dv_convert_action_modify_dec_ttl
7261                                         (&mhdr_res, items, &flow_attr, error))
7262                                 return -rte_errno;
7263                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
7264                         break;
7265                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
7266                         if (flow_dv_convert_action_modify_ttl
7267                                         (&mhdr_res, actions, items,
7268                                          &flow_attr, error))
7269                                 return -rte_errno;
7270                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
7271                         break;
7272                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
7273                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
7274                         if (flow_dv_convert_action_modify_tcp_seq
7275                                         (&mhdr_res, actions, error))
7276                                 return -rte_errno;
7277                         action_flags |= actions->type ==
7278                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
7279                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
7280                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
7281                         break;
7282
7283                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
7284                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
7285                         if (flow_dv_convert_action_modify_tcp_ack
7286                                         (&mhdr_res, actions, error))
7287                                 return -rte_errno;
7288                         action_flags |= actions->type ==
7289                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
7290                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
7291                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
7292                         break;
7293                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
7294                         if (flow_dv_convert_action_set_reg
7295                                         (&mhdr_res, actions, error))
7296                                 return -rte_errno;
7297                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7298                         break;
7299                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
7300                         if (flow_dv_convert_action_copy_mreg
7301                                         (dev, &mhdr_res, actions, error))
7302                                 return -rte_errno;
7303                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
7304                         break;
7305                 case RTE_FLOW_ACTION_TYPE_METER:
7306                         mtr = actions->conf;
7307                         if (!flow->meter) {
7308                                 flow->meter = mlx5_flow_meter_attach(priv,
7309                                                         mtr->mtr_id, attr,
7310                                                         error);
7311                                 if (!flow->meter)
7312                                         return rte_flow_error_set(error,
7313                                                 rte_errno,
7314                                                 RTE_FLOW_ERROR_TYPE_ACTION,
7315                                                 NULL,
7316                                                 "meter not found "
7317                                                 "or invalid parameters");
7318                         }
7319                         /* Set the meter action. */
7320                         dev_flow->dv.actions[actions_n++] =
7321                                 flow->meter->mfts->meter_action;
7322                         action_flags |= MLX5_FLOW_ACTION_METER;
7323                         break;
7324                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
7325                         if (flow_dv_convert_action_modify_ipv4_dscp(&mhdr_res,
7326                                                               actions, error))
7327                                 return -rte_errno;
7328                         action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
7329                         break;
7330                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
7331                         if (flow_dv_convert_action_modify_ipv6_dscp(&mhdr_res,
7332                                                               actions, error))
7333                                 return -rte_errno;
7334                         action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
7335                         break;
7336                 case RTE_FLOW_ACTION_TYPE_END:
7337                         actions_end = true;
7338                         if (mhdr_res.actions_num) {
7339                                 /* create modify action if needed. */
7340                                 if (flow_dv_modify_hdr_resource_register
7341                                         (dev, &mhdr_res, dev_flow, error))
7342                                         return -rte_errno;
7343                                 dev_flow->dv.actions[modify_action_position] =
7344                                         dev_flow->dv.modify_hdr->verbs_action;
7345                         }
7346                         break;
7347                 default:
7348                         break;
7349                 }
7350                 if (mhdr_res.actions_num &&
7351                     modify_action_position == UINT32_MAX)
7352                         modify_action_position = actions_n++;
7353         }
7354         dev_flow->dv.actions_n = actions_n;
7355         dev_flow->actions = action_flags;
7356         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
7357                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
7358                 int item_type = items->type;
7359
7360                 switch (item_type) {
7361                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
7362                         flow_dv_translate_item_port_id(dev, match_mask,
7363                                                        match_value, items);
7364                         last_item = MLX5_FLOW_ITEM_PORT_ID;
7365                         break;
7366                 case RTE_FLOW_ITEM_TYPE_ETH:
7367                         flow_dv_translate_item_eth(match_mask, match_value,
7368                                                    items, tunnel);
7369                         matcher.priority = MLX5_PRIORITY_MAP_L2;
7370                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
7371                                              MLX5_FLOW_LAYER_OUTER_L2;
7372                         break;
7373                 case RTE_FLOW_ITEM_TYPE_VLAN:
7374                         flow_dv_translate_item_vlan(dev_flow,
7375                                                     match_mask, match_value,
7376                                                     items, tunnel);
7377                         matcher.priority = MLX5_PRIORITY_MAP_L2;
7378                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
7379                                               MLX5_FLOW_LAYER_INNER_VLAN) :
7380                                              (MLX5_FLOW_LAYER_OUTER_L2 |
7381                                               MLX5_FLOW_LAYER_OUTER_VLAN);
7382                         break;
7383                 case RTE_FLOW_ITEM_TYPE_IPV4:
7384                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7385                                                   &item_flags, &tunnel);
7386                         flow_dv_translate_item_ipv4(match_mask, match_value,
7387                                                     items, tunnel,
7388                                                     dev_flow->group);
7389                         matcher.priority = MLX5_PRIORITY_MAP_L3;
7390                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
7391                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
7392                         if (items->mask != NULL &&
7393                             ((const struct rte_flow_item_ipv4 *)
7394                              items->mask)->hdr.next_proto_id) {
7395                                 next_protocol =
7396                                         ((const struct rte_flow_item_ipv4 *)
7397                                          (items->spec))->hdr.next_proto_id;
7398                                 next_protocol &=
7399                                         ((const struct rte_flow_item_ipv4 *)
7400                                          (items->mask))->hdr.next_proto_id;
7401                         } else {
7402                                 /* Reset for inner layer. */
7403                                 next_protocol = 0xff;
7404                         }
7405                         break;
7406                 case RTE_FLOW_ITEM_TYPE_IPV6:
7407                         mlx5_flow_tunnel_ip_check(items, next_protocol,
7408                                                   &item_flags, &tunnel);
7409                         flow_dv_translate_item_ipv6(match_mask, match_value,
7410                                                     items, tunnel,
7411                                                     dev_flow->group);
7412                         matcher.priority = MLX5_PRIORITY_MAP_L3;
7413                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
7414                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
7415                         if (items->mask != NULL &&
7416                             ((const struct rte_flow_item_ipv6 *)
7417                              items->mask)->hdr.proto) {
7418                                 next_protocol =
7419                                         ((const struct rte_flow_item_ipv6 *)
7420                                          items->spec)->hdr.proto;
7421                                 next_protocol &=
7422                                         ((const struct rte_flow_item_ipv6 *)
7423                                          items->mask)->hdr.proto;
7424                         } else {
7425                                 /* Reset for inner layer. */
7426                                 next_protocol = 0xff;
7427                         }
7428                         break;
7429                 case RTE_FLOW_ITEM_TYPE_TCP:
7430                         flow_dv_translate_item_tcp(match_mask, match_value,
7431                                                    items, tunnel);
7432                         matcher.priority = MLX5_PRIORITY_MAP_L4;
7433                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
7434                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
7435                         break;
7436                 case RTE_FLOW_ITEM_TYPE_UDP:
7437                         flow_dv_translate_item_udp(match_mask, match_value,
7438                                                    items, tunnel);
7439                         matcher.priority = MLX5_PRIORITY_MAP_L4;
7440                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
7441                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
7442                         break;
7443                 case RTE_FLOW_ITEM_TYPE_GRE:
7444                         flow_dv_translate_item_gre(match_mask, match_value,
7445                                                    items, tunnel);
7446                         last_item = MLX5_FLOW_LAYER_GRE;
7447                         break;
7448                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7449                         flow_dv_translate_item_gre_key(match_mask,
7450                                                        match_value, items);
7451                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
7452                         break;
7453                 case RTE_FLOW_ITEM_TYPE_NVGRE:
7454                         flow_dv_translate_item_nvgre(match_mask, match_value,
7455                                                      items, tunnel);
7456                         last_item = MLX5_FLOW_LAYER_GRE;
7457                         break;
7458                 case RTE_FLOW_ITEM_TYPE_VXLAN:
7459                         flow_dv_translate_item_vxlan(match_mask, match_value,
7460                                                      items, tunnel);
7461                         last_item = MLX5_FLOW_LAYER_VXLAN;
7462                         break;
7463                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7464                         flow_dv_translate_item_vxlan(match_mask, match_value,
7465                                                      items, tunnel);
7466                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
7467                         break;
7468                 case RTE_FLOW_ITEM_TYPE_GENEVE:
7469                         flow_dv_translate_item_geneve(match_mask, match_value,
7470                                                       items, tunnel);
7471                         last_item = MLX5_FLOW_LAYER_GENEVE;
7472                         break;
7473                 case RTE_FLOW_ITEM_TYPE_MPLS:
7474                         flow_dv_translate_item_mpls(match_mask, match_value,
7475                                                     items, last_item, tunnel);
7476                         last_item = MLX5_FLOW_LAYER_MPLS;
7477                         break;
7478                 case RTE_FLOW_ITEM_TYPE_MARK:
7479                         flow_dv_translate_item_mark(dev, match_mask,
7480                                                     match_value, items);
7481                         last_item = MLX5_FLOW_ITEM_MARK;
7482                         break;
7483                 case RTE_FLOW_ITEM_TYPE_META:
7484                         flow_dv_translate_item_meta(dev, match_mask,
7485                                                     match_value, attr, items);
7486                         last_item = MLX5_FLOW_ITEM_METADATA;
7487                         break;
7488                 case RTE_FLOW_ITEM_TYPE_ICMP:
7489                         flow_dv_translate_item_icmp(match_mask, match_value,
7490                                                     items, tunnel);
7491                         last_item = MLX5_FLOW_LAYER_ICMP;
7492                         break;
7493                 case RTE_FLOW_ITEM_TYPE_ICMP6:
7494                         flow_dv_translate_item_icmp6(match_mask, match_value,
7495                                                       items, tunnel);
7496                         last_item = MLX5_FLOW_LAYER_ICMP6;
7497                         break;
7498                 case RTE_FLOW_ITEM_TYPE_TAG:
7499                         flow_dv_translate_item_tag(dev, match_mask,
7500                                                    match_value, items);
7501                         last_item = MLX5_FLOW_ITEM_TAG;
7502                         break;
7503                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
7504                         flow_dv_translate_mlx5_item_tag(dev, match_mask,
7505                                                         match_value, items);
7506                         last_item = MLX5_FLOW_ITEM_TAG;
7507                         break;
7508                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
7509                         flow_dv_translate_item_tx_queue(dev, match_mask,
7510                                                         match_value,
7511                                                         items);
7512                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
7513                         break;
7514                 default:
7515                         break;
7516                 }
7517                 item_flags |= last_item;
7518         }
7519         /*
7520          * In case of ingress traffic when E-Switch mode is enabled,
7521          * we have two cases where we need to set the source port manually.
7522          * The first one, is in case of Nic steering rule, and the second is
7523          * E-Switch rule where no port_id item was found. In both cases
7524          * the source port is set according the current port in use.
7525          */
7526         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
7527             (priv->representor || priv->master)) {
7528                 if (flow_dv_translate_item_port_id(dev, match_mask,
7529                                                    match_value, NULL))
7530                         return -rte_errno;
7531         }
7532         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
7533                                          dev_flow->dv.value.buf));
7534         dev_flow->layers = item_flags;
7535         if (action_flags & MLX5_FLOW_ACTION_RSS)
7536                 flow_dv_hashfields_set(dev_flow);
7537         /* Register matcher. */
7538         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
7539                                     matcher.mask.size);
7540         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
7541                                                      matcher.priority);
7542         /* reserved field no needs to be set to 0 here. */
7543         tbl_key.domain = attr->transfer;
7544         tbl_key.direction = attr->egress;
7545         tbl_key.table_id = dev_flow->group;
7546         if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
7547                 return -rte_errno;
7548         return 0;
7549 }
7550
7551 /**
7552  * Apply the flow to the NIC, lock free,
7553  * (mutex should be acquired by caller).
7554  *
7555  * @param[in] dev
7556  *   Pointer to the Ethernet device structure.
7557  * @param[in, out] flow
7558  *   Pointer to flow structure.
7559  * @param[out] error
7560  *   Pointer to error structure.
7561  *
7562  * @return
7563  *   0 on success, a negative errno value otherwise and rte_errno is set.
7564  */
7565 static int
7566 __flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
7567                 struct rte_flow_error *error)
7568 {
7569         struct mlx5_flow_dv *dv;
7570         struct mlx5_flow *dev_flow;
7571         struct mlx5_priv *priv = dev->data->dev_private;
7572         int n;
7573         int err;
7574
7575         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7576                 dv = &dev_flow->dv;
7577                 n = dv->actions_n;
7578                 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
7579                         if (dev_flow->transfer) {
7580                                 dv->actions[n++] = priv->sh->esw_drop_action;
7581                         } else {
7582                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
7583                                 if (!dv->hrxq) {
7584                                         rte_flow_error_set
7585                                                 (error, errno,
7586                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7587                                                  NULL,
7588                                                  "cannot get drop hash queue");
7589                                         goto error;
7590                                 }
7591                                 dv->actions[n++] = dv->hrxq->action;
7592                         }
7593                 } else if (dev_flow->actions &
7594                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
7595                         struct mlx5_hrxq *hrxq;
7596
7597                         assert(flow->rss.queue);
7598                         hrxq = mlx5_hrxq_get(dev, flow->rss.key,
7599                                              MLX5_RSS_HASH_KEY_LEN,
7600                                              dev_flow->hash_fields,
7601                                              (*flow->rss.queue),
7602                                              flow->rss.queue_num);
7603                         if (!hrxq) {
7604                                 hrxq = mlx5_hrxq_new
7605                                         (dev, flow->rss.key,
7606                                          MLX5_RSS_HASH_KEY_LEN,
7607                                          dev_flow->hash_fields,
7608                                          (*flow->rss.queue),
7609                                          flow->rss.queue_num,
7610                                          !!(dev_flow->layers &
7611                                             MLX5_FLOW_LAYER_TUNNEL));
7612                         }
7613                         if (!hrxq) {
7614                                 rte_flow_error_set
7615                                         (error, rte_errno,
7616                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7617                                          "cannot get hash queue");
7618                                 goto error;
7619                         }
7620                         dv->hrxq = hrxq;
7621                         dv->actions[n++] = dv->hrxq->action;
7622                 }
7623                 dv->flow =
7624                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
7625                                                   (void *)&dv->value, n,
7626                                                   dv->actions);
7627                 if (!dv->flow) {
7628                         rte_flow_error_set(error, errno,
7629                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7630                                            NULL,
7631                                            "hardware refuses to create flow");
7632                         goto error;
7633                 }
7634                 if (priv->vmwa_context &&
7635                     dev_flow->dv.vf_vlan.tag &&
7636                     !dev_flow->dv.vf_vlan.created) {
7637                         /*
7638                          * The rule contains the VLAN pattern.
7639                          * For VF we are going to create VLAN
7640                          * interface to make hypervisor set correct
7641                          * e-Switch vport context.
7642                          */
7643                         mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
7644                 }
7645         }
7646         return 0;
7647 error:
7648         err = rte_errno; /* Save rte_errno before cleanup. */
7649         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7650                 struct mlx5_flow_dv *dv = &dev_flow->dv;
7651                 if (dv->hrxq) {
7652                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
7653                                 mlx5_hrxq_drop_release(dev);
7654                         else
7655                                 mlx5_hrxq_release(dev, dv->hrxq);
7656                         dv->hrxq = NULL;
7657                 }
7658                 if (dev_flow->dv.vf_vlan.tag &&
7659                     dev_flow->dv.vf_vlan.created)
7660                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
7661         }
7662         rte_errno = err; /* Restore rte_errno. */
7663         return -rte_errno;
7664 }
7665
7666 /**
7667  * Release the flow matcher.
7668  *
7669  * @param dev
7670  *   Pointer to Ethernet device.
7671  * @param flow
7672  *   Pointer to mlx5_flow.
7673  *
7674  * @return
7675  *   1 while a reference on it exists, 0 when freed.
7676  */
7677 static int
7678 flow_dv_matcher_release(struct rte_eth_dev *dev,
7679                         struct mlx5_flow *flow)
7680 {
7681         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
7682
7683         assert(matcher->matcher_object);
7684         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
7685                 dev->data->port_id, (void *)matcher,
7686                 rte_atomic32_read(&matcher->refcnt));
7687         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
7688                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
7689                            (matcher->matcher_object));
7690                 LIST_REMOVE(matcher, next);
7691                 /* table ref-- in release interface. */
7692                 flow_dv_tbl_resource_release(dev, matcher->tbl);
7693                 rte_free(matcher);
7694                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
7695                         dev->data->port_id, (void *)matcher);
7696                 return 0;
7697         }
7698         return 1;
7699 }
7700
7701 /**
7702  * Release an encap/decap resource.
7703  *
7704  * @param flow
7705  *   Pointer to mlx5_flow.
7706  *
7707  * @return
7708  *   1 while a reference on it exists, 0 when freed.
7709  */
7710 static int
7711 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
7712 {
7713         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
7714                                                 flow->dv.encap_decap;
7715
7716         assert(cache_resource->verbs_action);
7717         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
7718                 (void *)cache_resource,
7719                 rte_atomic32_read(&cache_resource->refcnt));
7720         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7721                 claim_zero(mlx5_glue->destroy_flow_action
7722                                 (cache_resource->verbs_action));
7723                 LIST_REMOVE(cache_resource, next);
7724                 rte_free(cache_resource);
7725                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
7726                         (void *)cache_resource);
7727                 return 0;
7728         }
7729         return 1;
7730 }
7731
7732 /**
7733  * Release an jump to table action resource.
7734  *
7735  * @param dev
7736  *   Pointer to Ethernet device.
7737  * @param flow
7738  *   Pointer to mlx5_flow.
7739  *
7740  * @return
7741  *   1 while a reference on it exists, 0 when freed.
7742  */
7743 static int
7744 flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
7745                                   struct mlx5_flow *flow)
7746 {
7747         struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
7748         struct mlx5_flow_tbl_data_entry *tbl_data =
7749                         container_of(cache_resource,
7750                                      struct mlx5_flow_tbl_data_entry, jump);
7751
7752         assert(cache_resource->action);
7753         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
7754                 (void *)cache_resource,
7755                 rte_atomic32_read(&cache_resource->refcnt));
7756         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7757                 claim_zero(mlx5_glue->destroy_flow_action
7758                                 (cache_resource->action));
7759                 /* jump action memory free is inside the table release. */
7760                 flow_dv_tbl_resource_release(dev, &tbl_data->tbl);
7761                 DRV_LOG(DEBUG, "jump table resource %p: removed",
7762                         (void *)cache_resource);
7763                 return 0;
7764         }
7765         return 1;
7766 }
7767
7768 /**
7769  * Release a modify-header resource.
7770  *
7771  * @param flow
7772  *   Pointer to mlx5_flow.
7773  *
7774  * @return
7775  *   1 while a reference on it exists, 0 when freed.
7776  */
7777 static int
7778 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
7779 {
7780         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
7781                                                 flow->dv.modify_hdr;
7782
7783         assert(cache_resource->verbs_action);
7784         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
7785                 (void *)cache_resource,
7786                 rte_atomic32_read(&cache_resource->refcnt));
7787         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7788                 claim_zero(mlx5_glue->destroy_flow_action
7789                                 (cache_resource->verbs_action));
7790                 LIST_REMOVE(cache_resource, next);
7791                 rte_free(cache_resource);
7792                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
7793                         (void *)cache_resource);
7794                 return 0;
7795         }
7796         return 1;
7797 }
7798
7799 /**
7800  * Release port ID action resource.
7801  *
7802  * @param flow
7803  *   Pointer to mlx5_flow.
7804  *
7805  * @return
7806  *   1 while a reference on it exists, 0 when freed.
7807  */
7808 static int
7809 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
7810 {
7811         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
7812                 flow->dv.port_id_action;
7813
7814         assert(cache_resource->action);
7815         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
7816                 (void *)cache_resource,
7817                 rte_atomic32_read(&cache_resource->refcnt));
7818         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7819                 claim_zero(mlx5_glue->destroy_flow_action
7820                                 (cache_resource->action));
7821                 LIST_REMOVE(cache_resource, next);
7822                 rte_free(cache_resource);
7823                 DRV_LOG(DEBUG, "port id action resource %p: removed",
7824                         (void *)cache_resource);
7825                 return 0;
7826         }
7827         return 1;
7828 }
7829
7830 /**
7831  * Release push vlan action resource.
7832  *
7833  * @param flow
7834  *   Pointer to mlx5_flow.
7835  *
7836  * @return
7837  *   1 while a reference on it exists, 0 when freed.
7838  */
7839 static int
7840 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
7841 {
7842         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
7843                 flow->dv.push_vlan_res;
7844
7845         assert(cache_resource->action);
7846         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
7847                 (void *)cache_resource,
7848                 rte_atomic32_read(&cache_resource->refcnt));
7849         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
7850                 claim_zero(mlx5_glue->destroy_flow_action
7851                                 (cache_resource->action));
7852                 LIST_REMOVE(cache_resource, next);
7853                 rte_free(cache_resource);
7854                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
7855                         (void *)cache_resource);
7856                 return 0;
7857         }
7858         return 1;
7859 }
7860
7861 /**
7862  * Remove the flow from the NIC but keeps it in memory.
7863  * Lock free, (mutex should be acquired by caller).
7864  *
7865  * @param[in] dev
7866  *   Pointer to Ethernet device.
7867  * @param[in, out] flow
7868  *   Pointer to flow structure.
7869  */
7870 static void
7871 __flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
7872 {
7873         struct mlx5_flow_dv *dv;
7874         struct mlx5_flow *dev_flow;
7875
7876         if (!flow)
7877                 return;
7878         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
7879                 dv = &dev_flow->dv;
7880                 if (dv->flow) {
7881                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
7882                         dv->flow = NULL;
7883                 }
7884                 if (dv->hrxq) {
7885                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
7886                                 mlx5_hrxq_drop_release(dev);
7887                         else
7888                                 mlx5_hrxq_release(dev, dv->hrxq);
7889                         dv->hrxq = NULL;
7890                 }
7891                 if (dev_flow->dv.vf_vlan.tag &&
7892                     dev_flow->dv.vf_vlan.created)
7893                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
7894         }
7895 }
7896
7897 /**
7898  * Remove the flow from the NIC and the memory.
7899  * Lock free, (mutex should be acquired by caller).
7900  *
7901  * @param[in] dev
7902  *   Pointer to the Ethernet device structure.
7903  * @param[in, out] flow
7904  *   Pointer to flow structure.
7905  */
7906 static void
7907 __flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
7908 {
7909         struct mlx5_flow *dev_flow;
7910
7911         if (!flow)
7912                 return;
7913         __flow_dv_remove(dev, flow);
7914         if (flow->counter) {
7915                 flow_dv_counter_release(dev, flow->counter);
7916                 flow->counter = NULL;
7917         }
7918         if (flow->meter) {
7919                 mlx5_flow_meter_detach(flow->meter);
7920                 flow->meter = NULL;
7921         }
7922         while (!LIST_EMPTY(&flow->dev_flows)) {
7923                 dev_flow = LIST_FIRST(&flow->dev_flows);
7924                 LIST_REMOVE(dev_flow, next);
7925                 if (dev_flow->dv.matcher)
7926                         flow_dv_matcher_release(dev, dev_flow);
7927                 if (dev_flow->dv.encap_decap)
7928                         flow_dv_encap_decap_resource_release(dev_flow);
7929                 if (dev_flow->dv.modify_hdr)
7930                         flow_dv_modify_hdr_resource_release(dev_flow);
7931                 if (dev_flow->dv.jump)
7932                         flow_dv_jump_tbl_resource_release(dev, dev_flow);
7933                 if (dev_flow->dv.port_id_action)
7934                         flow_dv_port_id_action_resource_release(dev_flow);
7935                 if (dev_flow->dv.push_vlan_res)
7936                         flow_dv_push_vlan_action_resource_release(dev_flow);
7937                 if (dev_flow->dv.tag_resource)
7938                         flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
7939                 rte_free(dev_flow);
7940         }
7941 }
7942
7943 /**
7944  * Query a dv flow  rule for its statistics via devx.
7945  *
7946  * @param[in] dev
7947  *   Pointer to Ethernet device.
7948  * @param[in] flow
7949  *   Pointer to the sub flow.
7950  * @param[out] data
7951  *   data retrieved by the query.
7952  * @param[out] error
7953  *   Perform verbose error reporting if not NULL.
7954  *
7955  * @return
7956  *   0 on success, a negative errno value otherwise and rte_errno is set.
7957  */
7958 static int
7959 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
7960                     void *data, struct rte_flow_error *error)
7961 {
7962         struct mlx5_priv *priv = dev->data->dev_private;
7963         struct rte_flow_query_count *qc = data;
7964
7965         if (!priv->config.devx)
7966                 return rte_flow_error_set(error, ENOTSUP,
7967                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7968                                           NULL,
7969                                           "counters are not supported");
7970         if (flow->counter) {
7971                 uint64_t pkts, bytes;
7972                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
7973                                                &bytes);
7974
7975                 if (err)
7976                         return rte_flow_error_set(error, -err,
7977                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7978                                         NULL, "cannot read counters");
7979                 qc->hits_set = 1;
7980                 qc->bytes_set = 1;
7981                 qc->hits = pkts - flow->counter->hits;
7982                 qc->bytes = bytes - flow->counter->bytes;
7983                 if (qc->reset) {
7984                         flow->counter->hits = pkts;
7985                         flow->counter->bytes = bytes;
7986                 }
7987                 return 0;
7988         }
7989         return rte_flow_error_set(error, EINVAL,
7990                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
7991                                   NULL,
7992                                   "counters are not available");
7993 }
7994
7995 /**
7996  * Query a flow.
7997  *
7998  * @see rte_flow_query()
7999  * @see rte_flow_ops
8000  */
8001 static int
8002 flow_dv_query(struct rte_eth_dev *dev,
8003               struct rte_flow *flow __rte_unused,
8004               const struct rte_flow_action *actions __rte_unused,
8005               void *data __rte_unused,
8006               struct rte_flow_error *error __rte_unused)
8007 {
8008         int ret = -EINVAL;
8009
8010         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
8011                 switch (actions->type) {
8012                 case RTE_FLOW_ACTION_TYPE_VOID:
8013                         break;
8014                 case RTE_FLOW_ACTION_TYPE_COUNT:
8015                         ret = flow_dv_query_count(dev, flow, data, error);
8016                         break;
8017                 default:
8018                         return rte_flow_error_set(error, ENOTSUP,
8019                                                   RTE_FLOW_ERROR_TYPE_ACTION,
8020                                                   actions,
8021                                                   "action not supported");
8022                 }
8023         }
8024         return ret;
8025 }
8026
8027 /**
8028  * Destroy the meter table set.
8029  * Lock free, (mutex should be acquired by caller).
8030  *
8031  * @param[in] dev
8032  *   Pointer to Ethernet device.
8033  * @param[in] tbl
8034  *   Pointer to the meter table set.
8035  *
8036  * @return
8037  *   Always 0.
8038  */
8039 static int
8040 flow_dv_destroy_mtr_tbl(struct rte_eth_dev *dev,
8041                         struct mlx5_meter_domains_infos *tbl)
8042 {
8043         struct mlx5_priv *priv = dev->data->dev_private;
8044         struct mlx5_meter_domains_infos *mtd =
8045                                 (struct mlx5_meter_domains_infos *)tbl;
8046
8047         if (!mtd || !priv->config.dv_flow_en)
8048                 return 0;
8049         if (mtd->ingress.policer_rules[RTE_MTR_DROPPED])
8050                 claim_zero(mlx5_glue->dv_destroy_flow
8051                           (mtd->ingress.policer_rules[RTE_MTR_DROPPED]));
8052         if (mtd->egress.policer_rules[RTE_MTR_DROPPED])
8053                 claim_zero(mlx5_glue->dv_destroy_flow
8054                           (mtd->egress.policer_rules[RTE_MTR_DROPPED]));
8055         if (mtd->transfer.policer_rules[RTE_MTR_DROPPED])
8056                 claim_zero(mlx5_glue->dv_destroy_flow
8057                           (mtd->transfer.policer_rules[RTE_MTR_DROPPED]));
8058         if (mtd->egress.color_matcher)
8059                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8060                           (mtd->egress.color_matcher));
8061         if (mtd->egress.any_matcher)
8062                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8063                           (mtd->egress.any_matcher));
8064         if (mtd->egress.tbl)
8065                 claim_zero(flow_dv_tbl_resource_release(dev,
8066                                                         mtd->egress.tbl));
8067         if (mtd->ingress.color_matcher)
8068                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8069                           (mtd->ingress.color_matcher));
8070         if (mtd->ingress.any_matcher)
8071                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8072                           (mtd->ingress.any_matcher));
8073         if (mtd->ingress.tbl)
8074                 claim_zero(flow_dv_tbl_resource_release(dev,
8075                                                         mtd->ingress.tbl));
8076         if (mtd->transfer.color_matcher)
8077                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8078                           (mtd->transfer.color_matcher));
8079         if (mtd->transfer.any_matcher)
8080                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
8081                           (mtd->transfer.any_matcher));
8082         if (mtd->transfer.tbl)
8083                 claim_zero(flow_dv_tbl_resource_release(dev,
8084                                                         mtd->transfer.tbl));
8085         if (mtd->drop_actn)
8086                 claim_zero(mlx5_glue->destroy_flow_action(mtd->drop_actn));
8087         rte_free(mtd);
8088         return 0;
8089 }
8090
8091 /* Number of meter flow actions, count and jump or count and drop. */
8092 #define METER_ACTIONS 2
8093
8094 /**
8095  * Create specify domain meter table and suffix table.
8096  *
8097  * @param[in] dev
8098  *   Pointer to Ethernet device.
8099  * @param[in,out] mtb
8100  *   Pointer to DV meter table set.
8101  * @param[in] egress
8102  *   Table attribute.
8103  * @param[in] transfer
8104  *   Table attribute.
8105  * @param[in] color_reg_c_idx
8106  *   Reg C index for color match.
8107  *
8108  * @return
8109  *   0 on success, -1 otherwise and rte_errno is set.
8110  */
8111 static int
8112 flow_dv_prepare_mtr_tables(struct rte_eth_dev *dev,
8113                            struct mlx5_meter_domains_infos *mtb,
8114                            uint8_t egress, uint8_t transfer,
8115                            uint32_t color_reg_c_idx)
8116 {
8117         struct mlx5_priv *priv = dev->data->dev_private;
8118         struct mlx5_ibv_shared *sh = priv->sh;
8119         struct mlx5_flow_dv_match_params mask = {
8120                 .size = sizeof(mask.buf),
8121         };
8122         struct mlx5_flow_dv_match_params value = {
8123                 .size = sizeof(value.buf),
8124         };
8125         struct mlx5dv_flow_matcher_attr dv_attr = {
8126                 .type = IBV_FLOW_ATTR_NORMAL,
8127                 .priority = 0,
8128                 .match_criteria_enable = 0,
8129                 .match_mask = (void *)&mask,
8130         };
8131         void *actions[METER_ACTIONS];
8132         struct mlx5_flow_tbl_resource **sfx_tbl;
8133         struct mlx5_meter_domain_info *dtb;
8134         struct rte_flow_error error;
8135         int i = 0;
8136
8137         if (transfer) {
8138                 sfx_tbl = &sh->fdb_mtr_sfx_tbl;
8139                 dtb = &mtb->transfer;
8140         } else if (egress) {
8141                 sfx_tbl = &sh->tx_mtr_sfx_tbl;
8142                 dtb = &mtb->egress;
8143         } else {
8144                 sfx_tbl = &sh->rx_mtr_sfx_tbl;
8145                 dtb = &mtb->ingress;
8146         }
8147         /* If the suffix table in missing, create it. */
8148         if (!(*sfx_tbl)) {
8149                 *sfx_tbl = flow_dv_tbl_resource_get(dev,
8150                                                 MLX5_FLOW_TABLE_LEVEL_SUFFIX,
8151                                                 egress, transfer, &error);
8152                 if (!(*sfx_tbl)) {
8153                         DRV_LOG(ERR, "Failed to create meter suffix table.");
8154                         return -1;
8155                 }
8156         }
8157         /* Create the meter table with METER level. */
8158         dtb->tbl = flow_dv_tbl_resource_get(dev, MLX5_FLOW_TABLE_LEVEL_METER,
8159                                             egress, transfer, &error);
8160         if (!dtb->tbl) {
8161                 DRV_LOG(ERR, "Failed to create meter policer table.");
8162                 return -1;
8163         }
8164         /* Create matchers, Any and Color. */
8165         dv_attr.priority = 3;
8166         dv_attr.match_criteria_enable = 0;
8167         dtb->any_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
8168                                                              &dv_attr,
8169                                                              dtb->tbl->obj);
8170         if (!dtb->any_matcher) {
8171                 DRV_LOG(ERR, "Failed to create meter"
8172                              " policer default matcher.");
8173                 goto error_exit;
8174         }
8175         dv_attr.priority = 0;
8176         dv_attr.match_criteria_enable =
8177                                 1 << MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
8178         flow_dv_match_meta_reg(mask.buf, value.buf, color_reg_c_idx,
8179                                rte_col_2_mlx5_col(RTE_COLORS), UINT32_MAX);
8180         dtb->color_matcher = mlx5_glue->dv_create_flow_matcher(sh->ctx,
8181                                                                &dv_attr,
8182                                                                dtb->tbl->obj);
8183         if (!dtb->color_matcher) {
8184                 DRV_LOG(ERR, "Failed to create meter policer color matcher.");
8185                 goto error_exit;
8186         }
8187         if (mtb->count_actns[RTE_MTR_DROPPED])
8188                 actions[i++] = mtb->count_actns[RTE_MTR_DROPPED];
8189         actions[i++] = mtb->drop_actn;
8190         /* Default rule: lowest priority, match any, actions: drop. */
8191         dtb->policer_rules[RTE_MTR_DROPPED] =
8192                         mlx5_glue->dv_create_flow(dtb->any_matcher,
8193                                                  (void *)&value, i, actions);
8194         if (!dtb->policer_rules[RTE_MTR_DROPPED]) {
8195                 DRV_LOG(ERR, "Failed to create meter policer drop rule.");
8196                 goto error_exit;
8197         }
8198         return 0;
8199 error_exit:
8200         return -1;
8201 }
8202
8203 /**
8204  * Create the needed meter and suffix tables.
8205  * Lock free, (mutex should be acquired by caller).
8206  *
8207  * @param[in] dev
8208  *   Pointer to Ethernet device.
8209  * @param[in] fm
8210  *   Pointer to the flow meter.
8211  *
8212  * @return
8213  *   Pointer to table set on success, NULL otherwise and rte_errno is set.
8214  */
8215 static struct mlx5_meter_domains_infos *
8216 flow_dv_create_mtr_tbl(struct rte_eth_dev *dev,
8217                        const struct mlx5_flow_meter *fm)
8218 {
8219         struct mlx5_priv *priv = dev->data->dev_private;
8220         struct mlx5_meter_domains_infos *mtb;
8221         int ret;
8222         int i;
8223
8224         if (!priv->mtr_en) {
8225                 rte_errno = ENOTSUP;
8226                 return NULL;
8227         }
8228         mtb = rte_calloc(__func__, 1, sizeof(*mtb), 0);
8229         if (!mtb) {
8230                 DRV_LOG(ERR, "Failed to allocate memory for meter.");
8231                 return NULL;
8232         }
8233         /* Create meter count actions */
8234         for (i = 0; i <= RTE_MTR_DROPPED; i++) {
8235                 if (!fm->policer_stats.cnt[i])
8236                         continue;
8237                 mtb->count_actns[i] = fm->policer_stats.cnt[i]->action;
8238         }
8239         /* Create drop action. */
8240         mtb->drop_actn = mlx5_glue->dr_create_flow_action_drop();
8241         if (!mtb->drop_actn) {
8242                 DRV_LOG(ERR, "Failed to create drop action.");
8243                 goto error_exit;
8244         }
8245         /* Egress meter table. */
8246         ret = flow_dv_prepare_mtr_tables(dev, mtb, 1, 0, priv->mtr_color_reg);
8247         if (ret) {
8248                 DRV_LOG(ERR, "Failed to prepare egress meter table.");
8249                 goto error_exit;
8250         }
8251         /* Ingress meter table. */
8252         ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 0, priv->mtr_color_reg);
8253         if (ret) {
8254                 DRV_LOG(ERR, "Failed to prepare ingress meter table.");
8255                 goto error_exit;
8256         }
8257         /* FDB meter table. */
8258         if (priv->config.dv_esw_en) {
8259                 ret = flow_dv_prepare_mtr_tables(dev, mtb, 0, 1,
8260                                                  priv->mtr_color_reg);
8261                 if (ret) {
8262                         DRV_LOG(ERR, "Failed to prepare fdb meter table.");
8263                         goto error_exit;
8264                 }
8265         }
8266         return mtb;
8267 error_exit:
8268         flow_dv_destroy_mtr_tbl(dev, mtb);
8269         return NULL;
8270 }
8271
8272 /**
8273  * Destroy domain policer rule.
8274  *
8275  * @param[in] dt
8276  *   Pointer to domain table.
8277  */
8278 static void
8279 flow_dv_destroy_domain_policer_rule(struct mlx5_meter_domain_info *dt)
8280 {
8281         int i;
8282
8283         for (i = 0; i < RTE_MTR_DROPPED; i++) {
8284                 if (dt->policer_rules[i]) {
8285                         claim_zero(mlx5_glue->dv_destroy_flow
8286                                   (dt->policer_rules[i]));
8287                         dt->policer_rules[i] = NULL;
8288                 }
8289         }
8290         if (dt->jump_actn) {
8291                 claim_zero(mlx5_glue->destroy_flow_action(dt->jump_actn));
8292                 dt->jump_actn = NULL;
8293         }
8294 }
8295
8296 /**
8297  * Destroy policer rules.
8298  *
8299  * @param[in] dev
8300  *   Pointer to Ethernet device.
8301  * @param[in] fm
8302  *   Pointer to flow meter structure.
8303  * @param[in] attr
8304  *   Pointer to flow attributes.
8305  *
8306  * @return
8307  *   Always 0.
8308  */
8309 static int
8310 flow_dv_destroy_policer_rules(struct rte_eth_dev *dev __rte_unused,
8311                               const struct mlx5_flow_meter *fm,
8312                               const struct rte_flow_attr *attr)
8313 {
8314         struct mlx5_meter_domains_infos *mtb = fm ? fm->mfts : NULL;
8315
8316         if (!mtb)
8317                 return 0;
8318         if (attr->egress)
8319                 flow_dv_destroy_domain_policer_rule(&mtb->egress);
8320         if (attr->ingress)
8321                 flow_dv_destroy_domain_policer_rule(&mtb->ingress);
8322         if (attr->transfer)
8323                 flow_dv_destroy_domain_policer_rule(&mtb->transfer);
8324         return 0;
8325 }
8326
8327 /**
8328  * Create specify domain meter policer rule.
8329  *
8330  * @param[in] fm
8331  *   Pointer to flow meter structure.
8332  * @param[in] mtb
8333  *   Pointer to DV meter table set.
8334  * @param[in] sfx_tb
8335  *   Pointer to suffix table.
8336  * @param[in] mtr_reg_c
8337  *   Color match REG_C.
8338  *
8339  * @return
8340  *   0 on success, -1 otherwise.
8341  */
8342 static int
8343 flow_dv_create_policer_forward_rule(struct mlx5_flow_meter *fm,
8344                                     struct mlx5_meter_domain_info *dtb,
8345                                     struct mlx5_flow_tbl_resource *sfx_tb,
8346                                     uint8_t mtr_reg_c)
8347 {
8348         struct mlx5_flow_dv_match_params matcher = {
8349                 .size = sizeof(matcher.buf),
8350         };
8351         struct mlx5_flow_dv_match_params value = {
8352                 .size = sizeof(value.buf),
8353         };
8354         struct mlx5_meter_domains_infos *mtb = fm->mfts;
8355         void *actions[METER_ACTIONS];
8356         int i;
8357
8358         /* Create jump action. */
8359         if (!sfx_tb)
8360                 return -1;
8361         if (!dtb->jump_actn)
8362                 dtb->jump_actn =
8363                         mlx5_glue->dr_create_flow_action_dest_flow_tbl
8364                                                         (sfx_tb->obj);
8365         if (!dtb->jump_actn) {
8366                 DRV_LOG(ERR, "Failed to create policer jump action.");
8367                 goto error;
8368         }
8369         for (i = 0; i < RTE_MTR_DROPPED; i++) {
8370                 int j = 0;
8371
8372                 flow_dv_match_meta_reg(matcher.buf, value.buf, mtr_reg_c,
8373                                        rte_col_2_mlx5_col(i), UINT32_MAX);
8374                 if (mtb->count_actns[i])
8375                         actions[j++] = mtb->count_actns[i];
8376                 if (fm->params.action[i] == MTR_POLICER_ACTION_DROP)
8377                         actions[j++] = mtb->drop_actn;
8378                 else
8379                         actions[j++] = dtb->jump_actn;
8380                 dtb->policer_rules[i] =
8381                         mlx5_glue->dv_create_flow(dtb->color_matcher,
8382                                                  (void *)&value,
8383                                                   j, actions);
8384                 if (!dtb->policer_rules[i]) {
8385                         DRV_LOG(ERR, "Failed to create policer rule.");
8386                         goto error;
8387                 }
8388         }
8389         return 0;
8390 error:
8391         rte_errno = errno;
8392         return -1;
8393 }
8394
8395 /**
8396  * Create policer rules.
8397  *
8398  * @param[in] dev
8399  *   Pointer to Ethernet device.
8400  * @param[in] fm
8401  *   Pointer to flow meter structure.
8402  * @param[in] attr
8403  *   Pointer to flow attributes.
8404  *
8405  * @return
8406  *   0 on success, -1 otherwise.
8407  */
8408 static int
8409 flow_dv_create_policer_rules(struct rte_eth_dev *dev,
8410                              struct mlx5_flow_meter *fm,
8411                              const struct rte_flow_attr *attr)
8412 {
8413         struct mlx5_priv *priv = dev->data->dev_private;
8414         struct mlx5_meter_domains_infos *mtb = fm->mfts;
8415         int ret;
8416
8417         if (attr->egress) {
8418                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->egress,
8419                                                 priv->sh->tx_mtr_sfx_tbl,
8420                                                 priv->mtr_color_reg);
8421                 if (ret) {
8422                         DRV_LOG(ERR, "Failed to create egress policer.");
8423                         goto error;
8424                 }
8425         }
8426         if (attr->ingress) {
8427                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->ingress,
8428                                                 priv->sh->rx_mtr_sfx_tbl,
8429                                                 priv->mtr_color_reg);
8430                 if (ret) {
8431                         DRV_LOG(ERR, "Failed to create ingress policer.");
8432                         goto error;
8433                 }
8434         }
8435         if (attr->transfer) {
8436                 ret = flow_dv_create_policer_forward_rule(fm, &mtb->transfer,
8437                                                 priv->sh->fdb_mtr_sfx_tbl,
8438                                                 priv->mtr_color_reg);
8439                 if (ret) {
8440                         DRV_LOG(ERR, "Failed to create transfer policer.");
8441                         goto error;
8442                 }
8443         }
8444         return 0;
8445 error:
8446         flow_dv_destroy_policer_rules(dev, fm, attr);
8447         return -1;
8448 }
8449
8450 /**
8451  * Query a devx counter.
8452  *
8453  * @param[in] dev
8454  *   Pointer to the Ethernet device structure.
8455  * @param[in] cnt
8456  *   Pointer to the flow counter.
8457  * @param[in] clear
8458  *   Set to clear the counter statistics.
8459  * @param[out] pkts
8460  *   The statistics value of packets.
8461  * @param[out] bytes
8462  *   The statistics value of bytes.
8463  *
8464  * @return
8465  *   0 on success, otherwise return -1.
8466  */
8467 static int
8468 flow_dv_counter_query(struct rte_eth_dev *dev,
8469                       struct mlx5_flow_counter *cnt, bool clear,
8470                       uint64_t *pkts, uint64_t *bytes)
8471 {
8472         struct mlx5_priv *priv = dev->data->dev_private;
8473         uint64_t inn_pkts, inn_bytes;
8474         int ret;
8475
8476         if (!priv->config.devx)
8477                 return -1;
8478         ret = _flow_dv_query_count(dev, cnt, &inn_pkts, &inn_bytes);
8479         if (ret)
8480                 return -1;
8481         *pkts = inn_pkts - cnt->hits;
8482         *bytes = inn_bytes - cnt->bytes;
8483         if (clear) {
8484                 cnt->hits = inn_pkts;
8485                 cnt->bytes = inn_bytes;
8486         }
8487         return 0;
8488 }
8489
8490 /*
8491  * Mutex-protected thunk to lock-free  __flow_dv_translate().
8492  */
8493 static int
8494 flow_dv_translate(struct rte_eth_dev *dev,
8495                   struct mlx5_flow *dev_flow,
8496                   const struct rte_flow_attr *attr,
8497                   const struct rte_flow_item items[],
8498                   const struct rte_flow_action actions[],
8499                   struct rte_flow_error *error)
8500 {
8501         int ret;
8502
8503         flow_dv_shared_lock(dev);
8504         ret = __flow_dv_translate(dev, dev_flow, attr, items, actions, error);
8505         flow_dv_shared_unlock(dev);
8506         return ret;
8507 }
8508
8509 /*
8510  * Mutex-protected thunk to lock-free  __flow_dv_apply().
8511  */
8512 static int
8513 flow_dv_apply(struct rte_eth_dev *dev,
8514               struct rte_flow *flow,
8515               struct rte_flow_error *error)
8516 {
8517         int ret;
8518
8519         flow_dv_shared_lock(dev);
8520         ret = __flow_dv_apply(dev, flow, error);
8521         flow_dv_shared_unlock(dev);
8522         return ret;
8523 }
8524
8525 /*
8526  * Mutex-protected thunk to lock-free __flow_dv_remove().
8527  */
8528 static void
8529 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
8530 {
8531         flow_dv_shared_lock(dev);
8532         __flow_dv_remove(dev, flow);
8533         flow_dv_shared_unlock(dev);
8534 }
8535
8536 /*
8537  * Mutex-protected thunk to lock-free __flow_dv_destroy().
8538  */
8539 static void
8540 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
8541 {
8542         flow_dv_shared_lock(dev);
8543         __flow_dv_destroy(dev, flow);
8544         flow_dv_shared_unlock(dev);
8545 }
8546
8547 /*
8548  * Mutex-protected thunk to lock-free flow_dv_counter_alloc().
8549  */
8550 static struct mlx5_flow_counter *
8551 flow_dv_counter_allocate(struct rte_eth_dev *dev)
8552 {
8553         struct mlx5_flow_counter *cnt;
8554
8555         flow_dv_shared_lock(dev);
8556         cnt = flow_dv_counter_alloc(dev, 0, 0, 1);
8557         flow_dv_shared_unlock(dev);
8558         return cnt;
8559 }
8560
8561 /*
8562  * Mutex-protected thunk to lock-free flow_dv_counter_release().
8563  */
8564 static void
8565 flow_dv_counter_free(struct rte_eth_dev *dev, struct mlx5_flow_counter *cnt)
8566 {
8567         flow_dv_shared_lock(dev);
8568         flow_dv_counter_release(dev, cnt);
8569         flow_dv_shared_unlock(dev);
8570 }
8571
8572 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
8573         .validate = flow_dv_validate,
8574         .prepare = flow_dv_prepare,
8575         .translate = flow_dv_translate,
8576         .apply = flow_dv_apply,
8577         .remove = flow_dv_remove,
8578         .destroy = flow_dv_destroy,
8579         .query = flow_dv_query,
8580         .create_mtr_tbls = flow_dv_create_mtr_tbl,
8581         .destroy_mtr_tbls = flow_dv_destroy_mtr_tbl,
8582         .create_policer_rules = flow_dv_create_policer_rules,
8583         .destroy_policer_rules = flow_dv_destroy_policer_rules,
8584         .counter_alloc = flow_dv_counter_allocate,
8585         .counter_free = flow_dv_counter_free,
8586         .counter_query = flow_dv_counter_query,
8587 };
8588
8589 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */