net/mlx5: add metadata register copy
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29 #include <rte_vxlan.h>
30
31 #include "mlx5.h"
32 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #include "mlx5_prm.h"
36 #include "mlx5_rxtx.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 /**
74  * Initialize flow attributes structure according to flow items' types.
75  *
76  * @param[in] item
77  *   Pointer to item specification.
78  * @param[out] attr
79  *   Pointer to flow attributes structure.
80  */
81 static void
82 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
83 {
84         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
85                 switch (item->type) {
86                 case RTE_FLOW_ITEM_TYPE_IPV4:
87                         attr->ipv4 = 1;
88                         break;
89                 case RTE_FLOW_ITEM_TYPE_IPV6:
90                         attr->ipv6 = 1;
91                         break;
92                 case RTE_FLOW_ITEM_TYPE_UDP:
93                         attr->udp = 1;
94                         break;
95                 case RTE_FLOW_ITEM_TYPE_TCP:
96                         attr->tcp = 1;
97                         break;
98                 default:
99                         break;
100                 }
101         }
102         attr->valid = 1;
103 }
104
105 struct field_modify_info {
106         uint32_t size; /* Size of field in protocol header, in bytes. */
107         uint32_t offset; /* Offset of field in protocol header, in bytes. */
108         enum mlx5_modification_field id;
109 };
110
111 struct field_modify_info modify_eth[] = {
112         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
113         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
114         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
115         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
116         {0, 0, 0},
117 };
118
119 struct field_modify_info modify_vlan_out_first_vid[] = {
120         /* Size in bits !!! */
121         {12, 0, MLX5_MODI_OUT_FIRST_VID},
122         {0, 0, 0},
123 };
124
125 struct field_modify_info modify_ipv4[] = {
126         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
127         {4, 12, MLX5_MODI_OUT_SIPV4},
128         {4, 16, MLX5_MODI_OUT_DIPV4},
129         {0, 0, 0},
130 };
131
132 struct field_modify_info modify_ipv6[] = {
133         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
134         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
135         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
136         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
137         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
138         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
139         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
140         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
141         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
142         {0, 0, 0},
143 };
144
145 struct field_modify_info modify_udp[] = {
146         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
147         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
148         {0, 0, 0},
149 };
150
151 struct field_modify_info modify_tcp[] = {
152         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
153         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
154         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
155         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
156         {0, 0, 0},
157 };
158
159 static void
160 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
161                           uint8_t next_protocol, uint64_t *item_flags,
162                           int *tunnel)
163 {
164         assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
165                item->type == RTE_FLOW_ITEM_TYPE_IPV6);
166         if (next_protocol == IPPROTO_IPIP) {
167                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
168                 *tunnel = 1;
169         }
170         if (next_protocol == IPPROTO_IPV6) {
171                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
172                 *tunnel = 1;
173         }
174 }
175
176 /**
177  * Acquire the synchronizing object to protect multithreaded access
178  * to shared dv context. Lock occurs only if context is actually
179  * shared, i.e. we have multiport IB device and representors are
180  * created.
181  *
182  * @param[in] dev
183  *   Pointer to the rte_eth_dev structure.
184  */
185 static void
186 flow_d_shared_lock(struct rte_eth_dev *dev)
187 {
188         struct mlx5_priv *priv = dev->data->dev_private;
189         struct mlx5_ibv_shared *sh = priv->sh;
190
191         if (sh->dv_refcnt > 1) {
192                 int ret;
193
194                 ret = pthread_mutex_lock(&sh->dv_mutex);
195                 assert(!ret);
196                 (void)ret;
197         }
198 }
199
200 static void
201 flow_d_shared_unlock(struct rte_eth_dev *dev)
202 {
203         struct mlx5_priv *priv = dev->data->dev_private;
204         struct mlx5_ibv_shared *sh = priv->sh;
205
206         if (sh->dv_refcnt > 1) {
207                 int ret;
208
209                 ret = pthread_mutex_unlock(&sh->dv_mutex);
210                 assert(!ret);
211                 (void)ret;
212         }
213 }
214
215 /* Update VLAN's VID/PCP based on input rte_flow_action.
216  *
217  * @param[in] action
218  *   Pointer to struct rte_flow_action.
219  * @param[out] vlan
220  *   Pointer to struct rte_vlan_hdr.
221  */
222 static void
223 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
224                          struct rte_vlan_hdr *vlan)
225 {
226         uint16_t vlan_tci;
227         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
228                 vlan_tci =
229                     ((const struct rte_flow_action_of_set_vlan_pcp *)
230                                                action->conf)->vlan_pcp;
231                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
232                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
233                 vlan->vlan_tci |= vlan_tci;
234         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
235                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
236                 vlan->vlan_tci |= rte_be_to_cpu_16
237                     (((const struct rte_flow_action_of_set_vlan_vid *)
238                                              action->conf)->vlan_vid);
239         }
240 }
241
242 /**
243  * Fetch 1, 2, 3 or 4 byte field from the byte array
244  * and return as unsigned integer in host-endian format.
245  *
246  * @param[in] data
247  *   Pointer to data array.
248  * @param[in] size
249  *   Size of field to extract.
250  *
251  * @return
252  *   converted field in host endian format.
253  */
254 static inline uint32_t
255 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
256 {
257         uint32_t ret;
258
259         switch (size) {
260         case 1:
261                 ret = *data;
262                 break;
263         case 2:
264                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
265                 break;
266         case 3:
267                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
268                 ret = (ret << 8) | *(data + sizeof(uint16_t));
269                 break;
270         case 4:
271                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
272                 break;
273         default:
274                 assert(false);
275                 ret = 0;
276                 break;
277         }
278         return ret;
279 }
280
281 /**
282  * Convert modify-header action to DV specification.
283  *
284  * Data length of each action is determined by provided field description
285  * and the item mask. Data bit offset and width of each action is determined
286  * by provided item mask.
287  *
288  * @param[in] item
289  *   Pointer to item specification.
290  * @param[in] field
291  *   Pointer to field modification information.
292  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
293  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
294  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
295  * @param[in] dcopy
296  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
297  *   Negative offset value sets the same offset as source offset.
298  *   size field is ignored, value is taken from source field.
299  * @param[in,out] resource
300  *   Pointer to the modify-header resource.
301  * @param[in] type
302  *   Type of modification.
303  * @param[out] error
304  *   Pointer to the error structure.
305  *
306  * @return
307  *   0 on success, a negative errno value otherwise and rte_errno is set.
308  */
309 static int
310 flow_dv_convert_modify_action(struct rte_flow_item *item,
311                               struct field_modify_info *field,
312                               struct field_modify_info *dcopy,
313                               struct mlx5_flow_dv_modify_hdr_resource *resource,
314                               uint32_t type, struct rte_flow_error *error)
315 {
316         uint32_t i = resource->actions_num;
317         struct mlx5_modification_cmd *actions = resource->actions;
318
319         /*
320          * The item and mask are provided in big-endian format.
321          * The fields should be presented as in big-endian format either.
322          * Mask must be always present, it defines the actual field width.
323          */
324         assert(item->mask);
325         assert(field->size);
326         do {
327                 unsigned int size_b;
328                 unsigned int off_b;
329                 uint32_t mask;
330                 uint32_t data;
331
332                 if (i >= MLX5_MODIFY_NUM)
333                         return rte_flow_error_set(error, EINVAL,
334                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
335                                  "too many items to modify");
336                 /* Fetch variable byte size mask from the array. */
337                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
338                                            field->offset, field->size);
339                 if (!mask) {
340                         ++field;
341                         continue;
342                 }
343                 /* Deduce actual data width in bits from mask value. */
344                 off_b = rte_bsf32(mask);
345                 size_b = sizeof(uint32_t) * CHAR_BIT -
346                          off_b - __builtin_clz(mask);
347                 assert(size_b);
348                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
349                 actions[i].action_type = type;
350                 actions[i].field = field->id;
351                 actions[i].offset = off_b;
352                 actions[i].length = size_b;
353                 /* Convert entire record to expected big-endian format. */
354                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
355                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
356                         assert(dcopy);
357                         actions[i].dst_field = dcopy->id;
358                         actions[i].dst_offset =
359                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
360                         /* Convert entire record to big-endian format. */
361                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
362                 } else {
363                         assert(item->spec);
364                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
365                                                    field->offset, field->size);
366                         /* Shift out the trailing masked bits from data. */
367                         data = (data & mask) >> off_b;
368                         actions[i].data1 = rte_cpu_to_be_32(data);
369                 }
370                 ++i;
371                 ++field;
372         } while (field->size);
373         resource->actions_num = i;
374         if (!resource->actions_num)
375                 return rte_flow_error_set(error, EINVAL,
376                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
377                                           "invalid modification flow item");
378         return 0;
379 }
380
381 /**
382  * Convert modify-header set IPv4 address action to DV specification.
383  *
384  * @param[in,out] resource
385  *   Pointer to the modify-header resource.
386  * @param[in] action
387  *   Pointer to action specification.
388  * @param[out] error
389  *   Pointer to the error structure.
390  *
391  * @return
392  *   0 on success, a negative errno value otherwise and rte_errno is set.
393  */
394 static int
395 flow_dv_convert_action_modify_ipv4
396                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
397                          const struct rte_flow_action *action,
398                          struct rte_flow_error *error)
399 {
400         const struct rte_flow_action_set_ipv4 *conf =
401                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
402         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
403         struct rte_flow_item_ipv4 ipv4;
404         struct rte_flow_item_ipv4 ipv4_mask;
405
406         memset(&ipv4, 0, sizeof(ipv4));
407         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
408         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
409                 ipv4.hdr.src_addr = conf->ipv4_addr;
410                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
411         } else {
412                 ipv4.hdr.dst_addr = conf->ipv4_addr;
413                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
414         }
415         item.spec = &ipv4;
416         item.mask = &ipv4_mask;
417         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
418                                              MLX5_MODIFICATION_TYPE_SET, error);
419 }
420
421 /**
422  * Convert modify-header set IPv6 address action to DV specification.
423  *
424  * @param[in,out] resource
425  *   Pointer to the modify-header resource.
426  * @param[in] action
427  *   Pointer to action specification.
428  * @param[out] error
429  *   Pointer to the error structure.
430  *
431  * @return
432  *   0 on success, a negative errno value otherwise and rte_errno is set.
433  */
434 static int
435 flow_dv_convert_action_modify_ipv6
436                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
437                          const struct rte_flow_action *action,
438                          struct rte_flow_error *error)
439 {
440         const struct rte_flow_action_set_ipv6 *conf =
441                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
442         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
443         struct rte_flow_item_ipv6 ipv6;
444         struct rte_flow_item_ipv6 ipv6_mask;
445
446         memset(&ipv6, 0, sizeof(ipv6));
447         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
448         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
449                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
450                        sizeof(ipv6.hdr.src_addr));
451                 memcpy(&ipv6_mask.hdr.src_addr,
452                        &rte_flow_item_ipv6_mask.hdr.src_addr,
453                        sizeof(ipv6.hdr.src_addr));
454         } else {
455                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
456                        sizeof(ipv6.hdr.dst_addr));
457                 memcpy(&ipv6_mask.hdr.dst_addr,
458                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
459                        sizeof(ipv6.hdr.dst_addr));
460         }
461         item.spec = &ipv6;
462         item.mask = &ipv6_mask;
463         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
464                                              MLX5_MODIFICATION_TYPE_SET, error);
465 }
466
467 /**
468  * Convert modify-header set MAC address action to DV specification.
469  *
470  * @param[in,out] resource
471  *   Pointer to the modify-header resource.
472  * @param[in] action
473  *   Pointer to action specification.
474  * @param[out] error
475  *   Pointer to the error structure.
476  *
477  * @return
478  *   0 on success, a negative errno value otherwise and rte_errno is set.
479  */
480 static int
481 flow_dv_convert_action_modify_mac
482                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
483                          const struct rte_flow_action *action,
484                          struct rte_flow_error *error)
485 {
486         const struct rte_flow_action_set_mac *conf =
487                 (const struct rte_flow_action_set_mac *)(action->conf);
488         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
489         struct rte_flow_item_eth eth;
490         struct rte_flow_item_eth eth_mask;
491
492         memset(&eth, 0, sizeof(eth));
493         memset(&eth_mask, 0, sizeof(eth_mask));
494         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
495                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
496                        sizeof(eth.src.addr_bytes));
497                 memcpy(&eth_mask.src.addr_bytes,
498                        &rte_flow_item_eth_mask.src.addr_bytes,
499                        sizeof(eth_mask.src.addr_bytes));
500         } else {
501                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
502                        sizeof(eth.dst.addr_bytes));
503                 memcpy(&eth_mask.dst.addr_bytes,
504                        &rte_flow_item_eth_mask.dst.addr_bytes,
505                        sizeof(eth_mask.dst.addr_bytes));
506         }
507         item.spec = &eth;
508         item.mask = &eth_mask;
509         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
510                                              MLX5_MODIFICATION_TYPE_SET, error);
511 }
512
513 /**
514  * Convert modify-header set VLAN VID action to DV specification.
515  *
516  * @param[in,out] resource
517  *   Pointer to the modify-header resource.
518  * @param[in] action
519  *   Pointer to action specification.
520  * @param[out] error
521  *   Pointer to the error structure.
522  *
523  * @return
524  *   0 on success, a negative errno value otherwise and rte_errno is set.
525  */
526 static int
527 flow_dv_convert_action_modify_vlan_vid
528                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
529                          const struct rte_flow_action *action,
530                          struct rte_flow_error *error)
531 {
532         const struct rte_flow_action_of_set_vlan_vid *conf =
533                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
534         int i = resource->actions_num;
535         struct mlx5_modification_cmd *actions = &resource->actions[i];
536         struct field_modify_info *field = modify_vlan_out_first_vid;
537
538         if (i >= MLX5_MODIFY_NUM)
539                 return rte_flow_error_set(error, EINVAL,
540                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
541                          "too many items to modify");
542         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
543         actions[i].field = field->id;
544         actions[i].length = field->size;
545         actions[i].offset = field->offset;
546         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
547         actions[i].data1 = conf->vlan_vid;
548         actions[i].data1 = actions[i].data1 << 16;
549         resource->actions_num = ++i;
550         return 0;
551 }
552
553 /**
554  * Convert modify-header set TP action to DV specification.
555  *
556  * @param[in,out] resource
557  *   Pointer to the modify-header resource.
558  * @param[in] action
559  *   Pointer to action specification.
560  * @param[in] items
561  *   Pointer to rte_flow_item objects list.
562  * @param[in] attr
563  *   Pointer to flow attributes structure.
564  * @param[out] error
565  *   Pointer to the error structure.
566  *
567  * @return
568  *   0 on success, a negative errno value otherwise and rte_errno is set.
569  */
570 static int
571 flow_dv_convert_action_modify_tp
572                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
573                          const struct rte_flow_action *action,
574                          const struct rte_flow_item *items,
575                          union flow_dv_attr *attr,
576                          struct rte_flow_error *error)
577 {
578         const struct rte_flow_action_set_tp *conf =
579                 (const struct rte_flow_action_set_tp *)(action->conf);
580         struct rte_flow_item item;
581         struct rte_flow_item_udp udp;
582         struct rte_flow_item_udp udp_mask;
583         struct rte_flow_item_tcp tcp;
584         struct rte_flow_item_tcp tcp_mask;
585         struct field_modify_info *field;
586
587         if (!attr->valid)
588                 flow_dv_attr_init(items, attr);
589         if (attr->udp) {
590                 memset(&udp, 0, sizeof(udp));
591                 memset(&udp_mask, 0, sizeof(udp_mask));
592                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
593                         udp.hdr.src_port = conf->port;
594                         udp_mask.hdr.src_port =
595                                         rte_flow_item_udp_mask.hdr.src_port;
596                 } else {
597                         udp.hdr.dst_port = conf->port;
598                         udp_mask.hdr.dst_port =
599                                         rte_flow_item_udp_mask.hdr.dst_port;
600                 }
601                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
602                 item.spec = &udp;
603                 item.mask = &udp_mask;
604                 field = modify_udp;
605         }
606         if (attr->tcp) {
607                 memset(&tcp, 0, sizeof(tcp));
608                 memset(&tcp_mask, 0, sizeof(tcp_mask));
609                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
610                         tcp.hdr.src_port = conf->port;
611                         tcp_mask.hdr.src_port =
612                                         rte_flow_item_tcp_mask.hdr.src_port;
613                 } else {
614                         tcp.hdr.dst_port = conf->port;
615                         tcp_mask.hdr.dst_port =
616                                         rte_flow_item_tcp_mask.hdr.dst_port;
617                 }
618                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
619                 item.spec = &tcp;
620                 item.mask = &tcp_mask;
621                 field = modify_tcp;
622         }
623         return flow_dv_convert_modify_action(&item, field, NULL, resource,
624                                              MLX5_MODIFICATION_TYPE_SET, error);
625 }
626
627 /**
628  * Convert modify-header set TTL action to DV specification.
629  *
630  * @param[in,out] resource
631  *   Pointer to the modify-header resource.
632  * @param[in] action
633  *   Pointer to action specification.
634  * @param[in] items
635  *   Pointer to rte_flow_item objects list.
636  * @param[in] attr
637  *   Pointer to flow attributes structure.
638  * @param[out] error
639  *   Pointer to the error structure.
640  *
641  * @return
642  *   0 on success, a negative errno value otherwise and rte_errno is set.
643  */
644 static int
645 flow_dv_convert_action_modify_ttl
646                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
647                          const struct rte_flow_action *action,
648                          const struct rte_flow_item *items,
649                          union flow_dv_attr *attr,
650                          struct rte_flow_error *error)
651 {
652         const struct rte_flow_action_set_ttl *conf =
653                 (const struct rte_flow_action_set_ttl *)(action->conf);
654         struct rte_flow_item item;
655         struct rte_flow_item_ipv4 ipv4;
656         struct rte_flow_item_ipv4 ipv4_mask;
657         struct rte_flow_item_ipv6 ipv6;
658         struct rte_flow_item_ipv6 ipv6_mask;
659         struct field_modify_info *field;
660
661         if (!attr->valid)
662                 flow_dv_attr_init(items, attr);
663         if (attr->ipv4) {
664                 memset(&ipv4, 0, sizeof(ipv4));
665                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
666                 ipv4.hdr.time_to_live = conf->ttl_value;
667                 ipv4_mask.hdr.time_to_live = 0xFF;
668                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
669                 item.spec = &ipv4;
670                 item.mask = &ipv4_mask;
671                 field = modify_ipv4;
672         }
673         if (attr->ipv6) {
674                 memset(&ipv6, 0, sizeof(ipv6));
675                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
676                 ipv6.hdr.hop_limits = conf->ttl_value;
677                 ipv6_mask.hdr.hop_limits = 0xFF;
678                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
679                 item.spec = &ipv6;
680                 item.mask = &ipv6_mask;
681                 field = modify_ipv6;
682         }
683         return flow_dv_convert_modify_action(&item, field, NULL, resource,
684                                              MLX5_MODIFICATION_TYPE_SET, error);
685 }
686
687 /**
688  * Convert modify-header decrement TTL action to DV specification.
689  *
690  * @param[in,out] resource
691  *   Pointer to the modify-header resource.
692  * @param[in] action
693  *   Pointer to action specification.
694  * @param[in] items
695  *   Pointer to rte_flow_item objects list.
696  * @param[in] attr
697  *   Pointer to flow attributes structure.
698  * @param[out] error
699  *   Pointer to the error structure.
700  *
701  * @return
702  *   0 on success, a negative errno value otherwise and rte_errno is set.
703  */
704 static int
705 flow_dv_convert_action_modify_dec_ttl
706                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
707                          const struct rte_flow_item *items,
708                          union flow_dv_attr *attr,
709                          struct rte_flow_error *error)
710 {
711         struct rte_flow_item item;
712         struct rte_flow_item_ipv4 ipv4;
713         struct rte_flow_item_ipv4 ipv4_mask;
714         struct rte_flow_item_ipv6 ipv6;
715         struct rte_flow_item_ipv6 ipv6_mask;
716         struct field_modify_info *field;
717
718         if (!attr->valid)
719                 flow_dv_attr_init(items, attr);
720         if (attr->ipv4) {
721                 memset(&ipv4, 0, sizeof(ipv4));
722                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
723                 ipv4.hdr.time_to_live = 0xFF;
724                 ipv4_mask.hdr.time_to_live = 0xFF;
725                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
726                 item.spec = &ipv4;
727                 item.mask = &ipv4_mask;
728                 field = modify_ipv4;
729         }
730         if (attr->ipv6) {
731                 memset(&ipv6, 0, sizeof(ipv6));
732                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
733                 ipv6.hdr.hop_limits = 0xFF;
734                 ipv6_mask.hdr.hop_limits = 0xFF;
735                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
736                 item.spec = &ipv6;
737                 item.mask = &ipv6_mask;
738                 field = modify_ipv6;
739         }
740         return flow_dv_convert_modify_action(&item, field, NULL, resource,
741                                              MLX5_MODIFICATION_TYPE_ADD, error);
742 }
743
744 /**
745  * Convert modify-header increment/decrement TCP Sequence number
746  * to DV specification.
747  *
748  * @param[in,out] resource
749  *   Pointer to the modify-header resource.
750  * @param[in] action
751  *   Pointer to action specification.
752  * @param[out] error
753  *   Pointer to the error structure.
754  *
755  * @return
756  *   0 on success, a negative errno value otherwise and rte_errno is set.
757  */
758 static int
759 flow_dv_convert_action_modify_tcp_seq
760                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
761                          const struct rte_flow_action *action,
762                          struct rte_flow_error *error)
763 {
764         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
765         uint64_t value = rte_be_to_cpu_32(*conf);
766         struct rte_flow_item item;
767         struct rte_flow_item_tcp tcp;
768         struct rte_flow_item_tcp tcp_mask;
769
770         memset(&tcp, 0, sizeof(tcp));
771         memset(&tcp_mask, 0, sizeof(tcp_mask));
772         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
773                 /*
774                  * The HW has no decrement operation, only increment operation.
775                  * To simulate decrement X from Y using increment operation
776                  * we need to add UINT32_MAX X times to Y.
777                  * Each adding of UINT32_MAX decrements Y by 1.
778                  */
779                 value *= UINT32_MAX;
780         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
781         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
782         item.type = RTE_FLOW_ITEM_TYPE_TCP;
783         item.spec = &tcp;
784         item.mask = &tcp_mask;
785         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
786                                              MLX5_MODIFICATION_TYPE_ADD, error);
787 }
788
789 /**
790  * Convert modify-header increment/decrement TCP Acknowledgment number
791  * to DV specification.
792  *
793  * @param[in,out] resource
794  *   Pointer to the modify-header resource.
795  * @param[in] action
796  *   Pointer to action specification.
797  * @param[out] error
798  *   Pointer to the error structure.
799  *
800  * @return
801  *   0 on success, a negative errno value otherwise and rte_errno is set.
802  */
803 static int
804 flow_dv_convert_action_modify_tcp_ack
805                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
806                          const struct rte_flow_action *action,
807                          struct rte_flow_error *error)
808 {
809         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
810         uint64_t value = rte_be_to_cpu_32(*conf);
811         struct rte_flow_item item;
812         struct rte_flow_item_tcp tcp;
813         struct rte_flow_item_tcp tcp_mask;
814
815         memset(&tcp, 0, sizeof(tcp));
816         memset(&tcp_mask, 0, sizeof(tcp_mask));
817         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
818                 /*
819                  * The HW has no decrement operation, only increment operation.
820                  * To simulate decrement X from Y using increment operation
821                  * we need to add UINT32_MAX X times to Y.
822                  * Each adding of UINT32_MAX decrements Y by 1.
823                  */
824                 value *= UINT32_MAX;
825         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
826         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
827         item.type = RTE_FLOW_ITEM_TYPE_TCP;
828         item.spec = &tcp;
829         item.mask = &tcp_mask;
830         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
831                                              MLX5_MODIFICATION_TYPE_ADD, error);
832 }
833
834 static enum mlx5_modification_field reg_to_field[] = {
835         [REG_A] = MLX5_MODI_META_DATA_REG_A,
836         [REG_B] = MLX5_MODI_META_DATA_REG_B,
837         [REG_C_0] = MLX5_MODI_META_REG_C_0,
838         [REG_C_1] = MLX5_MODI_META_REG_C_1,
839         [REG_C_2] = MLX5_MODI_META_REG_C_2,
840         [REG_C_3] = MLX5_MODI_META_REG_C_3,
841         [REG_C_4] = MLX5_MODI_META_REG_C_4,
842         [REG_C_5] = MLX5_MODI_META_REG_C_5,
843         [REG_C_6] = MLX5_MODI_META_REG_C_6,
844         [REG_C_7] = MLX5_MODI_META_REG_C_7,
845 };
846
847 /**
848  * Convert register set to DV specification.
849  *
850  * @param[in,out] resource
851  *   Pointer to the modify-header resource.
852  * @param[in] action
853  *   Pointer to action specification.
854  * @param[out] error
855  *   Pointer to the error structure.
856  *
857  * @return
858  *   0 on success, a negative errno value otherwise and rte_errno is set.
859  */
860 static int
861 flow_dv_convert_action_set_reg
862                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
863                          const struct rte_flow_action *action,
864                          struct rte_flow_error *error)
865 {
866         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
867         struct mlx5_modification_cmd *actions = resource->actions;
868         uint32_t i = resource->actions_num;
869
870         if (i >= MLX5_MODIFY_NUM)
871                 return rte_flow_error_set(error, EINVAL,
872                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
873                                           "too many items to modify");
874         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
875         actions[i].field = reg_to_field[conf->id];
876         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
877         actions[i].data1 = conf->data;
878         ++i;
879         resource->actions_num = i;
880         if (!resource->actions_num)
881                 return rte_flow_error_set(error, EINVAL,
882                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
883                                           "invalid modification flow item");
884         return 0;
885 }
886
887 /**
888  * Convert internal COPY_REG action to DV specification.
889  *
890  * @param[in] dev
891  *   Pointer to the rte_eth_dev structure.
892  * @param[in,out] res
893  *   Pointer to the modify-header resource.
894  * @param[in] action
895  *   Pointer to action specification.
896  * @param[out] error
897  *   Pointer to the error structure.
898  *
899  * @return
900  *   0 on success, a negative errno value otherwise and rte_errno is set.
901  */
902 static int
903 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev  __rte_unused,
904                                  struct mlx5_flow_dv_modify_hdr_resource *res,
905                                  const struct rte_flow_action *action,
906                                  struct rte_flow_error *error)
907 {
908         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
909         uint32_t mask = RTE_BE32(UINT32_MAX);
910         struct rte_flow_item item = {
911                 .spec = NULL,
912                 .mask = &mask,
913         };
914         struct field_modify_info reg_src[] = {
915                 {4, 0, reg_to_field[conf->src]},
916                 {0, 0, 0},
917         };
918         struct field_modify_info reg_dst = {
919                 .offset = (uint32_t)-1, /* Same as src. */
920                 .id = reg_to_field[conf->dst],
921         };
922         return flow_dv_convert_modify_action(&item,
923                                              reg_src, &reg_dst, res,
924                                              MLX5_MODIFICATION_TYPE_COPY,
925                                              error);
926 }
927
928 /**
929  * Validate META item.
930  *
931  * @param[in] dev
932  *   Pointer to the rte_eth_dev structure.
933  * @param[in] item
934  *   Item specification.
935  * @param[in] attr
936  *   Attributes of flow that includes this item.
937  * @param[out] error
938  *   Pointer to error structure.
939  *
940  * @return
941  *   0 on success, a negative errno value otherwise and rte_errno is set.
942  */
943 static int
944 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
945                            const struct rte_flow_item *item,
946                            const struct rte_flow_attr *attr,
947                            struct rte_flow_error *error)
948 {
949         const struct rte_flow_item_meta *spec = item->spec;
950         const struct rte_flow_item_meta *mask = item->mask;
951         const struct rte_flow_item_meta nic_mask = {
952                 .data = UINT32_MAX
953         };
954         int ret;
955
956         if (!spec)
957                 return rte_flow_error_set(error, EINVAL,
958                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
959                                           item->spec,
960                                           "data cannot be empty");
961         if (!spec->data)
962                 return rte_flow_error_set(error, EINVAL,
963                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
964                                           NULL,
965                                           "data cannot be zero");
966         if (!mask)
967                 mask = &rte_flow_item_meta_mask;
968         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
969                                         (const uint8_t *)&nic_mask,
970                                         sizeof(struct rte_flow_item_meta),
971                                         error);
972         if (ret < 0)
973                 return ret;
974         if (attr->ingress)
975                 return rte_flow_error_set(error, ENOTSUP,
976                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
977                                           NULL,
978                                           "pattern not supported for ingress");
979         return 0;
980 }
981
982 /**
983  * Validate vport item.
984  *
985  * @param[in] dev
986  *   Pointer to the rte_eth_dev structure.
987  * @param[in] item
988  *   Item specification.
989  * @param[in] attr
990  *   Attributes of flow that includes this item.
991  * @param[in] item_flags
992  *   Bit-fields that holds the items detected until now.
993  * @param[out] error
994  *   Pointer to error structure.
995  *
996  * @return
997  *   0 on success, a negative errno value otherwise and rte_errno is set.
998  */
999 static int
1000 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1001                               const struct rte_flow_item *item,
1002                               const struct rte_flow_attr *attr,
1003                               uint64_t item_flags,
1004                               struct rte_flow_error *error)
1005 {
1006         const struct rte_flow_item_port_id *spec = item->spec;
1007         const struct rte_flow_item_port_id *mask = item->mask;
1008         const struct rte_flow_item_port_id switch_mask = {
1009                         .id = 0xffffffff,
1010         };
1011         struct mlx5_priv *esw_priv;
1012         struct mlx5_priv *dev_priv;
1013         int ret;
1014
1015         if (!attr->transfer)
1016                 return rte_flow_error_set(error, EINVAL,
1017                                           RTE_FLOW_ERROR_TYPE_ITEM,
1018                                           NULL,
1019                                           "match on port id is valid only"
1020                                           " when transfer flag is enabled");
1021         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1022                 return rte_flow_error_set(error, ENOTSUP,
1023                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1024                                           "multiple source ports are not"
1025                                           " supported");
1026         if (!mask)
1027                 mask = &switch_mask;
1028         if (mask->id != 0xffffffff)
1029                 return rte_flow_error_set(error, ENOTSUP,
1030                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1031                                            mask,
1032                                            "no support for partial mask on"
1033                                            " \"id\" field");
1034         ret = mlx5_flow_item_acceptable
1035                                 (item, (const uint8_t *)mask,
1036                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1037                                  sizeof(struct rte_flow_item_port_id),
1038                                  error);
1039         if (ret)
1040                 return ret;
1041         if (!spec)
1042                 return 0;
1043         esw_priv = mlx5_port_to_eswitch_info(spec->id);
1044         if (!esw_priv)
1045                 return rte_flow_error_set(error, rte_errno,
1046                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1047                                           "failed to obtain E-Switch info for"
1048                                           " port");
1049         dev_priv = mlx5_dev_to_eswitch_info(dev);
1050         if (!dev_priv)
1051                 return rte_flow_error_set(error, rte_errno,
1052                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1053                                           NULL,
1054                                           "failed to obtain E-Switch info");
1055         if (esw_priv->domain_id != dev_priv->domain_id)
1056                 return rte_flow_error_set(error, EINVAL,
1057                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1058                                           "cannot match on a port from a"
1059                                           " different E-Switch");
1060         return 0;
1061 }
1062
1063 /**
1064  * Validate the pop VLAN action.
1065  *
1066  * @param[in] dev
1067  *   Pointer to the rte_eth_dev structure.
1068  * @param[in] action_flags
1069  *   Holds the actions detected until now.
1070  * @param[in] action
1071  *   Pointer to the pop vlan action.
1072  * @param[in] item_flags
1073  *   The items found in this flow rule.
1074  * @param[in] attr
1075  *   Pointer to flow attributes.
1076  * @param[out] error
1077  *   Pointer to error structure.
1078  *
1079  * @return
1080  *   0 on success, a negative errno value otherwise and rte_errno is set.
1081  */
1082 static int
1083 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1084                                  uint64_t action_flags,
1085                                  const struct rte_flow_action *action,
1086                                  uint64_t item_flags,
1087                                  const struct rte_flow_attr *attr,
1088                                  struct rte_flow_error *error)
1089 {
1090         struct mlx5_priv *priv = dev->data->dev_private;
1091
1092         (void)action;
1093         (void)attr;
1094         if (!priv->sh->pop_vlan_action)
1095                 return rte_flow_error_set(error, ENOTSUP,
1096                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1097                                           NULL,
1098                                           "pop vlan action is not supported");
1099         /*
1100          * Check for inconsistencies:
1101          *  fail strip_vlan in a flow that matches packets without VLAN tags.
1102          *  fail strip_vlan in a flow that matches packets without explicitly a
1103          *  matching on VLAN tag ?
1104          */
1105         if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
1106                 return rte_flow_error_set(error, ENOTSUP,
1107                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1108                                           NULL,
1109                                           "no support for multiple vlan pop "
1110                                           "actions");
1111         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1112                 return rte_flow_error_set(error, ENOTSUP,
1113                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1114                                           NULL,
1115                                           "cannot pop vlan without a "
1116                                           "match on (outer) vlan in the flow");
1117         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1118                 return rte_flow_error_set(error, EINVAL,
1119                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1120                                           "wrong action order, port_id should "
1121                                           "be after pop VLAN action");
1122         return 0;
1123 }
1124
1125 /**
1126  * Get VLAN default info from vlan match info.
1127  *
1128  * @param[in] dev
1129  *   Pointer to the rte_eth_dev structure.
1130  * @param[in] item
1131  *   the list of item specifications.
1132  * @param[out] vlan
1133  *   pointer VLAN info to fill to.
1134  * @param[out] error
1135  *   Pointer to error structure.
1136  *
1137  * @return
1138  *   0 on success, a negative errno value otherwise and rte_errno is set.
1139  */
1140 static void
1141 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1142                                   struct rte_vlan_hdr *vlan)
1143 {
1144         const struct rte_flow_item_vlan nic_mask = {
1145                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1146                                 MLX5DV_FLOW_VLAN_VID_MASK),
1147                 .inner_type = RTE_BE16(0xffff),
1148         };
1149
1150         if (items == NULL)
1151                 return;
1152         for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
1153                items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
1154                 ;
1155         if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1156                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1157                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1158
1159                 if (!vlan_m)
1160                         vlan_m = &nic_mask;
1161                 /* Only full match values are accepted */
1162                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1163                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1164                         vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
1165                         vlan->vlan_tci |=
1166                                 rte_be_to_cpu_16(vlan_v->tci &
1167                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1168                 }
1169                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1170                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1171                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1172                         vlan->vlan_tci |=
1173                                 rte_be_to_cpu_16(vlan_v->tci &
1174                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1175                 }
1176                 if (vlan_m->inner_type == nic_mask.inner_type)
1177                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1178                                                            vlan_m->inner_type);
1179         }
1180 }
1181
1182 /**
1183  * Validate the push VLAN action.
1184  *
1185  * @param[in] action_flags
1186  *   Holds the actions detected until now.
1187  * @param[in] action
1188  *   Pointer to the encap action.
1189  * @param[in] attr
1190  *   Pointer to flow attributes
1191  * @param[out] error
1192  *   Pointer to error structure.
1193  *
1194  * @return
1195  *   0 on success, a negative errno value otherwise and rte_errno is set.
1196  */
1197 static int
1198 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1199                                   uint64_t item_flags,
1200                                   const struct rte_flow_action *action,
1201                                   const struct rte_flow_attr *attr,
1202                                   struct rte_flow_error *error)
1203 {
1204         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1205
1206         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1207             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1208                 return rte_flow_error_set(error, EINVAL,
1209                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1210                                           "invalid vlan ethertype");
1211         if (action_flags &
1212                 (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1213                 return rte_flow_error_set(error, ENOTSUP,
1214                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1215                                           "no support for multiple VLAN "
1216                                           "actions");
1217         if (!mlx5_flow_find_action
1218                         (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) &&
1219             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1220                 return rte_flow_error_set(error, ENOTSUP,
1221                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
1222                                 "push VLAN needs to match on VLAN in order to "
1223                                 "get VLAN VID information because there is "
1224                                 "no followed set VLAN VID action");
1225         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1226                 return rte_flow_error_set(error, EINVAL,
1227                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1228                                           "wrong action order, port_id should "
1229                                           "be after push VLAN");
1230         (void)attr;
1231         return 0;
1232 }
1233
1234 /**
1235  * Validate the set VLAN PCP.
1236  *
1237  * @param[in] action_flags
1238  *   Holds the actions detected until now.
1239  * @param[in] actions
1240  *   Pointer to the list of actions remaining in the flow rule.
1241  * @param[in] attr
1242  *   Pointer to flow attributes
1243  * @param[out] error
1244  *   Pointer to error structure.
1245  *
1246  * @return
1247  *   0 on success, a negative errno value otherwise and rte_errno is set.
1248  */
1249 static int
1250 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1251                                      const struct rte_flow_action actions[],
1252                                      struct rte_flow_error *error)
1253 {
1254         const struct rte_flow_action *action = actions;
1255         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1256
1257         if (conf->vlan_pcp > 7)
1258                 return rte_flow_error_set(error, EINVAL,
1259                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1260                                           "VLAN PCP value is too big");
1261         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1262                 return rte_flow_error_set(error, ENOTSUP,
1263                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1264                                           "set VLAN PCP action must follow "
1265                                           "the push VLAN action");
1266         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1267                 return rte_flow_error_set(error, ENOTSUP,
1268                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1269                                           "Multiple VLAN PCP modification are "
1270                                           "not supported");
1271         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1272                 return rte_flow_error_set(error, EINVAL,
1273                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1274                                           "wrong action order, port_id should "
1275                                           "be after set VLAN PCP");
1276         return 0;
1277 }
1278
1279 /**
1280  * Validate the set VLAN VID.
1281  *
1282  * @param[in] item_flags
1283  *   Holds the items detected in this rule.
1284  * @param[in] actions
1285  *   Pointer to the list of actions remaining in the flow rule.
1286  * @param[in] attr
1287  *   Pointer to flow attributes
1288  * @param[out] error
1289  *   Pointer to error structure.
1290  *
1291  * @return
1292  *   0 on success, a negative errno value otherwise and rte_errno is set.
1293  */
1294 static int
1295 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1296                                      uint64_t action_flags,
1297                                      const struct rte_flow_action actions[],
1298                                      struct rte_flow_error *error)
1299 {
1300         const struct rte_flow_action *action = actions;
1301         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1302
1303         if (conf->vlan_vid > RTE_BE16(0xFFE))
1304                 return rte_flow_error_set(error, EINVAL,
1305                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1306                                           "VLAN VID value is too big");
1307         /* there is an of_push_vlan action before us */
1308         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
1309                 if (mlx5_flow_find_action(actions + 1,
1310                                           RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1311                         return rte_flow_error_set(error, ENOTSUP,
1312                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
1313                                         "Multiple VLAN VID modifications are "
1314                                         "not supported");
1315                 else
1316                         return 0;
1317         }
1318
1319         /*
1320          * Action is on an existing VLAN header:
1321          *    Need to verify this is a single modify CID action.
1322          *   Rule mast include a match on outer VLAN.
1323          */
1324         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1325                 return rte_flow_error_set(error, ENOTSUP,
1326                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1327                                           "Multiple VLAN VID modifications are "
1328                                           "not supported");
1329         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1330                 return rte_flow_error_set(error, EINVAL,
1331                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1332                                           "match on VLAN is required in order "
1333                                           "to set VLAN VID");
1334         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1335                 return rte_flow_error_set(error, EINVAL,
1336                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1337                                           "wrong action order, port_id should "
1338                                           "be after set VLAN VID");
1339         return 0;
1340 }
1341
1342 /**
1343  * Validate count action.
1344  *
1345  * @param[in] dev
1346  *   device otr.
1347  * @param[out] error
1348  *   Pointer to error structure.
1349  *
1350  * @return
1351  *   0 on success, a negative errno value otherwise and rte_errno is set.
1352  */
1353 static int
1354 flow_dv_validate_action_count(struct rte_eth_dev *dev,
1355                               struct rte_flow_error *error)
1356 {
1357         struct mlx5_priv *priv = dev->data->dev_private;
1358
1359         if (!priv->config.devx)
1360                 goto notsup_err;
1361 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
1362         return 0;
1363 #endif
1364 notsup_err:
1365         return rte_flow_error_set
1366                       (error, ENOTSUP,
1367                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1368                        NULL,
1369                        "count action not supported");
1370 }
1371
1372 /**
1373  * Validate the L2 encap action.
1374  *
1375  * @param[in] action_flags
1376  *   Holds the actions detected until now.
1377  * @param[in] action
1378  *   Pointer to the encap action.
1379  * @param[in] attr
1380  *   Pointer to flow attributes
1381  * @param[out] error
1382  *   Pointer to error structure.
1383  *
1384  * @return
1385  *   0 on success, a negative errno value otherwise and rte_errno is set.
1386  */
1387 static int
1388 flow_dv_validate_action_l2_encap(uint64_t action_flags,
1389                                  const struct rte_flow_action *action,
1390                                  const struct rte_flow_attr *attr,
1391                                  struct rte_flow_error *error)
1392 {
1393         if (!(action->conf))
1394                 return rte_flow_error_set(error, EINVAL,
1395                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1396                                           "configuration cannot be null");
1397         if (action_flags & MLX5_FLOW_ACTION_DROP)
1398                 return rte_flow_error_set(error, EINVAL,
1399                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1400                                           "can't drop and encap in same flow");
1401         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1402                 return rte_flow_error_set(error, EINVAL,
1403                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1404                                           "can only have a single encap or"
1405                                           " decap action in a flow");
1406         if (!attr->transfer && attr->ingress)
1407                 return rte_flow_error_set(error, ENOTSUP,
1408                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1409                                           NULL,
1410                                           "encap action not supported for "
1411                                           "ingress");
1412         return 0;
1413 }
1414
1415 /**
1416  * Validate the L2 decap action.
1417  *
1418  * @param[in] action_flags
1419  *   Holds the actions detected until now.
1420  * @param[in] attr
1421  *   Pointer to flow attributes
1422  * @param[out] error
1423  *   Pointer to error structure.
1424  *
1425  * @return
1426  *   0 on success, a negative errno value otherwise and rte_errno is set.
1427  */
1428 static int
1429 flow_dv_validate_action_l2_decap(uint64_t action_flags,
1430                                  const struct rte_flow_attr *attr,
1431                                  struct rte_flow_error *error)
1432 {
1433         if (action_flags & MLX5_FLOW_ACTION_DROP)
1434                 return rte_flow_error_set(error, EINVAL,
1435                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1436                                           "can't drop and decap in same flow");
1437         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1438                 return rte_flow_error_set(error, EINVAL,
1439                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1440                                           "can only have a single encap or"
1441                                           " decap action in a flow");
1442         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1443                 return rte_flow_error_set(error, EINVAL,
1444                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1445                                           "can't have decap action after"
1446                                           " modify action");
1447         if (attr->egress)
1448                 return rte_flow_error_set(error, ENOTSUP,
1449                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1450                                           NULL,
1451                                           "decap action not supported for "
1452                                           "egress");
1453         return 0;
1454 }
1455
1456 /**
1457  * Validate the raw encap action.
1458  *
1459  * @param[in] action_flags
1460  *   Holds the actions detected until now.
1461  * @param[in] action
1462  *   Pointer to the encap action.
1463  * @param[in] attr
1464  *   Pointer to flow attributes
1465  * @param[out] error
1466  *   Pointer to error structure.
1467  *
1468  * @return
1469  *   0 on success, a negative errno value otherwise and rte_errno is set.
1470  */
1471 static int
1472 flow_dv_validate_action_raw_encap(uint64_t action_flags,
1473                                   const struct rte_flow_action *action,
1474                                   const struct rte_flow_attr *attr,
1475                                   struct rte_flow_error *error)
1476 {
1477         const struct rte_flow_action_raw_encap *raw_encap =
1478                 (const struct rte_flow_action_raw_encap *)action->conf;
1479         if (!(action->conf))
1480                 return rte_flow_error_set(error, EINVAL,
1481                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1482                                           "configuration cannot be null");
1483         if (action_flags & MLX5_FLOW_ACTION_DROP)
1484                 return rte_flow_error_set(error, EINVAL,
1485                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1486                                           "can't drop and encap in same flow");
1487         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1488                 return rte_flow_error_set(error, EINVAL,
1489                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1490                                           "can only have a single encap"
1491                                           " action in a flow");
1492         /* encap without preceding decap is not supported for ingress */
1493         if (!attr->transfer &&  attr->ingress &&
1494             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
1495                 return rte_flow_error_set(error, ENOTSUP,
1496                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1497                                           NULL,
1498                                           "encap action not supported for "
1499                                           "ingress");
1500         if (!raw_encap->size || !raw_encap->data)
1501                 return rte_flow_error_set(error, EINVAL,
1502                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1503                                           "raw encap data cannot be empty");
1504         return 0;
1505 }
1506
1507 /**
1508  * Validate the raw decap action.
1509  *
1510  * @param[in] action_flags
1511  *   Holds the actions detected until now.
1512  * @param[in] action
1513  *   Pointer to the encap action.
1514  * @param[in] attr
1515  *   Pointer to flow attributes
1516  * @param[out] error
1517  *   Pointer to error structure.
1518  *
1519  * @return
1520  *   0 on success, a negative errno value otherwise and rte_errno is set.
1521  */
1522 static int
1523 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1524                                   const struct rte_flow_action *action,
1525                                   const struct rte_flow_attr *attr,
1526                                   struct rte_flow_error *error)
1527 {
1528         if (action_flags & MLX5_FLOW_ACTION_DROP)
1529                 return rte_flow_error_set(error, EINVAL,
1530                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1531                                           "can't drop and decap in same flow");
1532         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1533                 return rte_flow_error_set(error, EINVAL,
1534                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1535                                           "can't have encap action before"
1536                                           " decap action");
1537         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1538                 return rte_flow_error_set(error, EINVAL,
1539                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1540                                           "can only have a single decap"
1541                                           " action in a flow");
1542         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1543                 return rte_flow_error_set(error, EINVAL,
1544                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1545                                           "can't have decap action after"
1546                                           " modify action");
1547         /* decap action is valid on egress only if it is followed by encap */
1548         if (attr->egress) {
1549                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1550                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1551                        action++) {
1552                 }
1553                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1554                         return rte_flow_error_set
1555                                         (error, ENOTSUP,
1556                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1557                                          NULL, "decap action not supported"
1558                                          " for egress");
1559         }
1560         return 0;
1561 }
1562
1563 /**
1564  * Find existing encap/decap resource or create and register a new one.
1565  *
1566  * @param dev[in, out]
1567  *   Pointer to rte_eth_dev structure.
1568  * @param[in, out] resource
1569  *   Pointer to encap/decap resource.
1570  * @parm[in, out] dev_flow
1571  *   Pointer to the dev_flow.
1572  * @param[out] error
1573  *   pointer to error structure.
1574  *
1575  * @return
1576  *   0 on success otherwise -errno and errno is set.
1577  */
1578 static int
1579 flow_dv_encap_decap_resource_register
1580                         (struct rte_eth_dev *dev,
1581                          struct mlx5_flow_dv_encap_decap_resource *resource,
1582                          struct mlx5_flow *dev_flow,
1583                          struct rte_flow_error *error)
1584 {
1585         struct mlx5_priv *priv = dev->data->dev_private;
1586         struct mlx5_ibv_shared *sh = priv->sh;
1587         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1588         struct rte_flow *flow = dev_flow->flow;
1589         struct mlx5dv_dr_domain *domain;
1590
1591         resource->flags = flow->group ? 0 : 1;
1592         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1593                 domain = sh->fdb_domain;
1594         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1595                 domain = sh->rx_domain;
1596         else
1597                 domain = sh->tx_domain;
1598
1599         /* Lookup a matching resource from cache. */
1600         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1601                 if (resource->reformat_type == cache_resource->reformat_type &&
1602                     resource->ft_type == cache_resource->ft_type &&
1603                     resource->flags == cache_resource->flags &&
1604                     resource->size == cache_resource->size &&
1605                     !memcmp((const void *)resource->buf,
1606                             (const void *)cache_resource->buf,
1607                             resource->size)) {
1608                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1609                                 (void *)cache_resource,
1610                                 rte_atomic32_read(&cache_resource->refcnt));
1611                         rte_atomic32_inc(&cache_resource->refcnt);
1612                         dev_flow->dv.encap_decap = cache_resource;
1613                         return 0;
1614                 }
1615         }
1616         /* Register new encap/decap resource. */
1617         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1618         if (!cache_resource)
1619                 return rte_flow_error_set(error, ENOMEM,
1620                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1621                                           "cannot allocate resource memory");
1622         *cache_resource = *resource;
1623         cache_resource->verbs_action =
1624                 mlx5_glue->dv_create_flow_action_packet_reformat
1625                         (sh->ctx, cache_resource->reformat_type,
1626                          cache_resource->ft_type, domain, cache_resource->flags,
1627                          cache_resource->size,
1628                          (cache_resource->size ? cache_resource->buf : NULL));
1629         if (!cache_resource->verbs_action) {
1630                 rte_free(cache_resource);
1631                 return rte_flow_error_set(error, ENOMEM,
1632                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1633                                           NULL, "cannot create action");
1634         }
1635         rte_atomic32_init(&cache_resource->refcnt);
1636         rte_atomic32_inc(&cache_resource->refcnt);
1637         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1638         dev_flow->dv.encap_decap = cache_resource;
1639         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1640                 (void *)cache_resource,
1641                 rte_atomic32_read(&cache_resource->refcnt));
1642         return 0;
1643 }
1644
1645 /**
1646  * Find existing table jump resource or create and register a new one.
1647  *
1648  * @param dev[in, out]
1649  *   Pointer to rte_eth_dev structure.
1650  * @param[in, out] resource
1651  *   Pointer to jump table resource.
1652  * @parm[in, out] dev_flow
1653  *   Pointer to the dev_flow.
1654  * @param[out] error
1655  *   pointer to error structure.
1656  *
1657  * @return
1658  *   0 on success otherwise -errno and errno is set.
1659  */
1660 static int
1661 flow_dv_jump_tbl_resource_register
1662                         (struct rte_eth_dev *dev,
1663                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1664                          struct mlx5_flow *dev_flow,
1665                          struct rte_flow_error *error)
1666 {
1667         struct mlx5_priv *priv = dev->data->dev_private;
1668         struct mlx5_ibv_shared *sh = priv->sh;
1669         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1670
1671         /* Lookup a matching resource from cache. */
1672         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1673                 if (resource->tbl == cache_resource->tbl) {
1674                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1675                                 (void *)cache_resource,
1676                                 rte_atomic32_read(&cache_resource->refcnt));
1677                         rte_atomic32_inc(&cache_resource->refcnt);
1678                         dev_flow->dv.jump = cache_resource;
1679                         return 0;
1680                 }
1681         }
1682         /* Register new jump table resource. */
1683         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1684         if (!cache_resource)
1685                 return rte_flow_error_set(error, ENOMEM,
1686                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1687                                           "cannot allocate resource memory");
1688         *cache_resource = *resource;
1689         cache_resource->action =
1690                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1691                 (resource->tbl->obj);
1692         if (!cache_resource->action) {
1693                 rte_free(cache_resource);
1694                 return rte_flow_error_set(error, ENOMEM,
1695                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1696                                           NULL, "cannot create action");
1697         }
1698         rte_atomic32_init(&cache_resource->refcnt);
1699         rte_atomic32_inc(&cache_resource->refcnt);
1700         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1701         dev_flow->dv.jump = cache_resource;
1702         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1703                 (void *)cache_resource,
1704                 rte_atomic32_read(&cache_resource->refcnt));
1705         return 0;
1706 }
1707
1708 /**
1709  * Find existing table port ID resource or create and register a new one.
1710  *
1711  * @param dev[in, out]
1712  *   Pointer to rte_eth_dev structure.
1713  * @param[in, out] resource
1714  *   Pointer to port ID action resource.
1715  * @parm[in, out] dev_flow
1716  *   Pointer to the dev_flow.
1717  * @param[out] error
1718  *   pointer to error structure.
1719  *
1720  * @return
1721  *   0 on success otherwise -errno and errno is set.
1722  */
1723 static int
1724 flow_dv_port_id_action_resource_register
1725                         (struct rte_eth_dev *dev,
1726                          struct mlx5_flow_dv_port_id_action_resource *resource,
1727                          struct mlx5_flow *dev_flow,
1728                          struct rte_flow_error *error)
1729 {
1730         struct mlx5_priv *priv = dev->data->dev_private;
1731         struct mlx5_ibv_shared *sh = priv->sh;
1732         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1733
1734         /* Lookup a matching resource from cache. */
1735         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1736                 if (resource->port_id == cache_resource->port_id) {
1737                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1738                                 "refcnt %d++",
1739                                 (void *)cache_resource,
1740                                 rte_atomic32_read(&cache_resource->refcnt));
1741                         rte_atomic32_inc(&cache_resource->refcnt);
1742                         dev_flow->dv.port_id_action = cache_resource;
1743                         return 0;
1744                 }
1745         }
1746         /* Register new port id action resource. */
1747         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1748         if (!cache_resource)
1749                 return rte_flow_error_set(error, ENOMEM,
1750                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1751                                           "cannot allocate resource memory");
1752         *cache_resource = *resource;
1753         cache_resource->action =
1754                 mlx5_glue->dr_create_flow_action_dest_vport
1755                         (priv->sh->fdb_domain, resource->port_id);
1756         if (!cache_resource->action) {
1757                 rte_free(cache_resource);
1758                 return rte_flow_error_set(error, ENOMEM,
1759                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1760                                           NULL, "cannot create action");
1761         }
1762         rte_atomic32_init(&cache_resource->refcnt);
1763         rte_atomic32_inc(&cache_resource->refcnt);
1764         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1765         dev_flow->dv.port_id_action = cache_resource;
1766         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1767                 (void *)cache_resource,
1768                 rte_atomic32_read(&cache_resource->refcnt));
1769         return 0;
1770 }
1771
1772 /**
1773  * Find existing push vlan resource or create and register a new one.
1774  *
1775  * @param dev[in, out]
1776  *   Pointer to rte_eth_dev structure.
1777  * @param[in, out] resource
1778  *   Pointer to port ID action resource.
1779  * @parm[in, out] dev_flow
1780  *   Pointer to the dev_flow.
1781  * @param[out] error
1782  *   pointer to error structure.
1783  *
1784  * @return
1785  *   0 on success otherwise -errno and errno is set.
1786  */
1787 static int
1788 flow_dv_push_vlan_action_resource_register
1789                        (struct rte_eth_dev *dev,
1790                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
1791                         struct mlx5_flow *dev_flow,
1792                         struct rte_flow_error *error)
1793 {
1794         struct mlx5_priv *priv = dev->data->dev_private;
1795         struct mlx5_ibv_shared *sh = priv->sh;
1796         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
1797         struct mlx5dv_dr_domain *domain;
1798
1799         /* Lookup a matching resource from cache. */
1800         LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
1801                 if (resource->vlan_tag == cache_resource->vlan_tag &&
1802                     resource->ft_type == cache_resource->ft_type) {
1803                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
1804                                 "refcnt %d++",
1805                                 (void *)cache_resource,
1806                                 rte_atomic32_read(&cache_resource->refcnt));
1807                         rte_atomic32_inc(&cache_resource->refcnt);
1808                         dev_flow->dv.push_vlan_res = cache_resource;
1809                         return 0;
1810                 }
1811         }
1812         /* Register new push_vlan action resource. */
1813         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1814         if (!cache_resource)
1815                 return rte_flow_error_set(error, ENOMEM,
1816                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1817                                           "cannot allocate resource memory");
1818         *cache_resource = *resource;
1819         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1820                 domain = sh->fdb_domain;
1821         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1822                 domain = sh->rx_domain;
1823         else
1824                 domain = sh->tx_domain;
1825         cache_resource->action =
1826                 mlx5_glue->dr_create_flow_action_push_vlan(domain,
1827                                                            resource->vlan_tag);
1828         if (!cache_resource->action) {
1829                 rte_free(cache_resource);
1830                 return rte_flow_error_set(error, ENOMEM,
1831                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1832                                           NULL, "cannot create action");
1833         }
1834         rte_atomic32_init(&cache_resource->refcnt);
1835         rte_atomic32_inc(&cache_resource->refcnt);
1836         LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
1837         dev_flow->dv.push_vlan_res = cache_resource;
1838         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
1839                 (void *)cache_resource,
1840                 rte_atomic32_read(&cache_resource->refcnt));
1841         return 0;
1842 }
1843 /**
1844  * Get the size of specific rte_flow_item_type
1845  *
1846  * @param[in] item_type
1847  *   Tested rte_flow_item_type.
1848  *
1849  * @return
1850  *   sizeof struct item_type, 0 if void or irrelevant.
1851  */
1852 static size_t
1853 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1854 {
1855         size_t retval;
1856
1857         switch (item_type) {
1858         case RTE_FLOW_ITEM_TYPE_ETH:
1859                 retval = sizeof(struct rte_flow_item_eth);
1860                 break;
1861         case RTE_FLOW_ITEM_TYPE_VLAN:
1862                 retval = sizeof(struct rte_flow_item_vlan);
1863                 break;
1864         case RTE_FLOW_ITEM_TYPE_IPV4:
1865                 retval = sizeof(struct rte_flow_item_ipv4);
1866                 break;
1867         case RTE_FLOW_ITEM_TYPE_IPV6:
1868                 retval = sizeof(struct rte_flow_item_ipv6);
1869                 break;
1870         case RTE_FLOW_ITEM_TYPE_UDP:
1871                 retval = sizeof(struct rte_flow_item_udp);
1872                 break;
1873         case RTE_FLOW_ITEM_TYPE_TCP:
1874                 retval = sizeof(struct rte_flow_item_tcp);
1875                 break;
1876         case RTE_FLOW_ITEM_TYPE_VXLAN:
1877                 retval = sizeof(struct rte_flow_item_vxlan);
1878                 break;
1879         case RTE_FLOW_ITEM_TYPE_GRE:
1880                 retval = sizeof(struct rte_flow_item_gre);
1881                 break;
1882         case RTE_FLOW_ITEM_TYPE_NVGRE:
1883                 retval = sizeof(struct rte_flow_item_nvgre);
1884                 break;
1885         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1886                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1887                 break;
1888         case RTE_FLOW_ITEM_TYPE_MPLS:
1889                 retval = sizeof(struct rte_flow_item_mpls);
1890                 break;
1891         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1892         default:
1893                 retval = 0;
1894                 break;
1895         }
1896         return retval;
1897 }
1898
1899 #define MLX5_ENCAP_IPV4_VERSION         0x40
1900 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1901 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1902 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1903 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1904 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1905 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1906
1907 /**
1908  * Convert the encap action data from list of rte_flow_item to raw buffer
1909  *
1910  * @param[in] items
1911  *   Pointer to rte_flow_item objects list.
1912  * @param[out] buf
1913  *   Pointer to the output buffer.
1914  * @param[out] size
1915  *   Pointer to the output buffer size.
1916  * @param[out] error
1917  *   Pointer to the error structure.
1918  *
1919  * @return
1920  *   0 on success, a negative errno value otherwise and rte_errno is set.
1921  */
1922 static int
1923 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1924                            size_t *size, struct rte_flow_error *error)
1925 {
1926         struct rte_ether_hdr *eth = NULL;
1927         struct rte_vlan_hdr *vlan = NULL;
1928         struct rte_ipv4_hdr *ipv4 = NULL;
1929         struct rte_ipv6_hdr *ipv6 = NULL;
1930         struct rte_udp_hdr *udp = NULL;
1931         struct rte_vxlan_hdr *vxlan = NULL;
1932         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1933         struct rte_gre_hdr *gre = NULL;
1934         size_t len;
1935         size_t temp_size = 0;
1936
1937         if (!items)
1938                 return rte_flow_error_set(error, EINVAL,
1939                                           RTE_FLOW_ERROR_TYPE_ACTION,
1940                                           NULL, "invalid empty data");
1941         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1942                 len = flow_dv_get_item_len(items->type);
1943                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1944                         return rte_flow_error_set(error, EINVAL,
1945                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1946                                                   (void *)items->type,
1947                                                   "items total size is too big"
1948                                                   " for encap action");
1949                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1950                 switch (items->type) {
1951                 case RTE_FLOW_ITEM_TYPE_ETH:
1952                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1953                         break;
1954                 case RTE_FLOW_ITEM_TYPE_VLAN:
1955                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1956                         if (!eth)
1957                                 return rte_flow_error_set(error, EINVAL,
1958                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1959                                                 (void *)items->type,
1960                                                 "eth header not found");
1961                         if (!eth->ether_type)
1962                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1963                         break;
1964                 case RTE_FLOW_ITEM_TYPE_IPV4:
1965                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1966                         if (!vlan && !eth)
1967                                 return rte_flow_error_set(error, EINVAL,
1968                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1969                                                 (void *)items->type,
1970                                                 "neither eth nor vlan"
1971                                                 " header found");
1972                         if (vlan && !vlan->eth_proto)
1973                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1974                         else if (eth && !eth->ether_type)
1975                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1976                         if (!ipv4->version_ihl)
1977                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1978                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1979                         if (!ipv4->time_to_live)
1980                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1981                         break;
1982                 case RTE_FLOW_ITEM_TYPE_IPV6:
1983                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1984                         if (!vlan && !eth)
1985                                 return rte_flow_error_set(error, EINVAL,
1986                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1987                                                 (void *)items->type,
1988                                                 "neither eth nor vlan"
1989                                                 " header found");
1990                         if (vlan && !vlan->eth_proto)
1991                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1992                         else if (eth && !eth->ether_type)
1993                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1994                         if (!ipv6->vtc_flow)
1995                                 ipv6->vtc_flow =
1996                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1997                         if (!ipv6->hop_limits)
1998                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1999                         break;
2000                 case RTE_FLOW_ITEM_TYPE_UDP:
2001                         udp = (struct rte_udp_hdr *)&buf[temp_size];
2002                         if (!ipv4 && !ipv6)
2003                                 return rte_flow_error_set(error, EINVAL,
2004                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2005                                                 (void *)items->type,
2006                                                 "ip header not found");
2007                         if (ipv4 && !ipv4->next_proto_id)
2008                                 ipv4->next_proto_id = IPPROTO_UDP;
2009                         else if (ipv6 && !ipv6->proto)
2010                                 ipv6->proto = IPPROTO_UDP;
2011                         break;
2012                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2013                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
2014                         if (!udp)
2015                                 return rte_flow_error_set(error, EINVAL,
2016                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2017                                                 (void *)items->type,
2018                                                 "udp header not found");
2019                         if (!udp->dst_port)
2020                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
2021                         if (!vxlan->vx_flags)
2022                                 vxlan->vx_flags =
2023                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
2024                         break;
2025                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2026                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
2027                         if (!udp)
2028                                 return rte_flow_error_set(error, EINVAL,
2029                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2030                                                 (void *)items->type,
2031                                                 "udp header not found");
2032                         if (!vxlan_gpe->proto)
2033                                 return rte_flow_error_set(error, EINVAL,
2034                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2035                                                 (void *)items->type,
2036                                                 "next protocol not found");
2037                         if (!udp->dst_port)
2038                                 udp->dst_port =
2039                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
2040                         if (!vxlan_gpe->vx_flags)
2041                                 vxlan_gpe->vx_flags =
2042                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
2043                         break;
2044                 case RTE_FLOW_ITEM_TYPE_GRE:
2045                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2046                         gre = (struct rte_gre_hdr *)&buf[temp_size];
2047                         if (!gre->proto)
2048                                 return rte_flow_error_set(error, EINVAL,
2049                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2050                                                 (void *)items->type,
2051                                                 "next protocol not found");
2052                         if (!ipv4 && !ipv6)
2053                                 return rte_flow_error_set(error, EINVAL,
2054                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2055                                                 (void *)items->type,
2056                                                 "ip header not found");
2057                         if (ipv4 && !ipv4->next_proto_id)
2058                                 ipv4->next_proto_id = IPPROTO_GRE;
2059                         else if (ipv6 && !ipv6->proto)
2060                                 ipv6->proto = IPPROTO_GRE;
2061                         break;
2062                 case RTE_FLOW_ITEM_TYPE_VOID:
2063                         break;
2064                 default:
2065                         return rte_flow_error_set(error, EINVAL,
2066                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2067                                                   (void *)items->type,
2068                                                   "unsupported item type");
2069                         break;
2070                 }
2071                 temp_size += len;
2072         }
2073         *size = temp_size;
2074         return 0;
2075 }
2076
2077 static int
2078 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
2079 {
2080         struct rte_ether_hdr *eth = NULL;
2081         struct rte_vlan_hdr *vlan = NULL;
2082         struct rte_ipv6_hdr *ipv6 = NULL;
2083         struct rte_udp_hdr *udp = NULL;
2084         char *next_hdr;
2085         uint16_t proto;
2086
2087         eth = (struct rte_ether_hdr *)data;
2088         next_hdr = (char *)(eth + 1);
2089         proto = RTE_BE16(eth->ether_type);
2090
2091         /* VLAN skipping */
2092         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
2093                 vlan = (struct rte_vlan_hdr *)next_hdr;
2094                 proto = RTE_BE16(vlan->eth_proto);
2095                 next_hdr += sizeof(struct rte_vlan_hdr);
2096         }
2097
2098         /* HW calculates IPv4 csum. no need to proceed */
2099         if (proto == RTE_ETHER_TYPE_IPV4)
2100                 return 0;
2101
2102         /* non IPv4/IPv6 header. not supported */
2103         if (proto != RTE_ETHER_TYPE_IPV6) {
2104                 return rte_flow_error_set(error, ENOTSUP,
2105                                           RTE_FLOW_ERROR_TYPE_ACTION,
2106                                           NULL, "Cannot offload non IPv4/IPv6");
2107         }
2108
2109         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
2110
2111         /* ignore non UDP */
2112         if (ipv6->proto != IPPROTO_UDP)
2113                 return 0;
2114
2115         udp = (struct rte_udp_hdr *)(ipv6 + 1);
2116         udp->dgram_cksum = 0;
2117
2118         return 0;
2119 }
2120
2121 /**
2122  * Convert L2 encap action to DV specification.
2123  *
2124  * @param[in] dev
2125  *   Pointer to rte_eth_dev structure.
2126  * @param[in] action
2127  *   Pointer to action structure.
2128  * @param[in, out] dev_flow
2129  *   Pointer to the mlx5_flow.
2130  * @param[in] transfer
2131  *   Mark if the flow is E-Switch flow.
2132  * @param[out] error
2133  *   Pointer to the error structure.
2134  *
2135  * @return
2136  *   0 on success, a negative errno value otherwise and rte_errno is set.
2137  */
2138 static int
2139 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2140                                const struct rte_flow_action *action,
2141                                struct mlx5_flow *dev_flow,
2142                                uint8_t transfer,
2143                                struct rte_flow_error *error)
2144 {
2145         const struct rte_flow_item *encap_data;
2146         const struct rte_flow_action_raw_encap *raw_encap_data;
2147         struct mlx5_flow_dv_encap_decap_resource res = {
2148                 .reformat_type =
2149                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2150                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2151                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2152         };
2153
2154         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2155                 raw_encap_data =
2156                         (const struct rte_flow_action_raw_encap *)action->conf;
2157                 res.size = raw_encap_data->size;
2158                 memcpy(res.buf, raw_encap_data->data, res.size);
2159                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
2160                         return -rte_errno;
2161         } else {
2162                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2163                         encap_data =
2164                                 ((const struct rte_flow_action_vxlan_encap *)
2165                                                 action->conf)->definition;
2166                 else
2167                         encap_data =
2168                                 ((const struct rte_flow_action_nvgre_encap *)
2169                                                 action->conf)->definition;
2170                 if (flow_dv_convert_encap_data(encap_data, res.buf,
2171                                                &res.size, error))
2172                         return -rte_errno;
2173         }
2174         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2175                 return rte_flow_error_set(error, EINVAL,
2176                                           RTE_FLOW_ERROR_TYPE_ACTION,
2177                                           NULL, "can't create L2 encap action");
2178         return 0;
2179 }
2180
2181 /**
2182  * Convert L2 decap action to DV specification.
2183  *
2184  * @param[in] dev
2185  *   Pointer to rte_eth_dev structure.
2186  * @param[in, out] dev_flow
2187  *   Pointer to the mlx5_flow.
2188  * @param[in] transfer
2189  *   Mark if the flow is E-Switch flow.
2190  * @param[out] error
2191  *   Pointer to the error structure.
2192  *
2193  * @return
2194  *   0 on success, a negative errno value otherwise and rte_errno is set.
2195  */
2196 static int
2197 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
2198                                struct mlx5_flow *dev_flow,
2199                                uint8_t transfer,
2200                                struct rte_flow_error *error)
2201 {
2202         struct mlx5_flow_dv_encap_decap_resource res = {
2203                 .size = 0,
2204                 .reformat_type =
2205                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
2206                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2207                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
2208         };
2209
2210         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2211                 return rte_flow_error_set(error, EINVAL,
2212                                           RTE_FLOW_ERROR_TYPE_ACTION,
2213                                           NULL, "can't create L2 decap action");
2214         return 0;
2215 }
2216
2217 /**
2218  * Convert raw decap/encap (L3 tunnel) action to DV specification.
2219  *
2220  * @param[in] dev
2221  *   Pointer to rte_eth_dev structure.
2222  * @param[in] action
2223  *   Pointer to action structure.
2224  * @param[in, out] dev_flow
2225  *   Pointer to the mlx5_flow.
2226  * @param[in] attr
2227  *   Pointer to the flow attributes.
2228  * @param[out] error
2229  *   Pointer to the error structure.
2230  *
2231  * @return
2232  *   0 on success, a negative errno value otherwise and rte_errno is set.
2233  */
2234 static int
2235 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
2236                                 const struct rte_flow_action *action,
2237                                 struct mlx5_flow *dev_flow,
2238                                 const struct rte_flow_attr *attr,
2239                                 struct rte_flow_error *error)
2240 {
2241         const struct rte_flow_action_raw_encap *encap_data;
2242         struct mlx5_flow_dv_encap_decap_resource res;
2243
2244         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
2245         res.size = encap_data->size;
2246         memcpy(res.buf, encap_data->data, res.size);
2247         res.reformat_type = attr->egress ?
2248                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
2249                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
2250         if (attr->transfer)
2251                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2252         else
2253                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2254                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2255         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2256                 return rte_flow_error_set(error, EINVAL,
2257                                           RTE_FLOW_ERROR_TYPE_ACTION,
2258                                           NULL, "can't create encap action");
2259         return 0;
2260 }
2261
2262 /**
2263  * Create action push VLAN.
2264  *
2265  * @param[in] dev
2266  *   Pointer to rte_eth_dev structure.
2267  * @param[in] vlan_tag
2268  *   the vlan tag to push to the Ethernet header.
2269  * @param[in, out] dev_flow
2270  *   Pointer to the mlx5_flow.
2271  * @param[in] attr
2272  *   Pointer to the flow attributes.
2273  * @param[out] error
2274  *   Pointer to the error structure.
2275  *
2276  * @return
2277  *   0 on success, a negative errno value otherwise and rte_errno is set.
2278  */
2279 static int
2280 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
2281                                 const struct rte_flow_attr *attr,
2282                                 const struct rte_vlan_hdr *vlan,
2283                                 struct mlx5_flow *dev_flow,
2284                                 struct rte_flow_error *error)
2285 {
2286         struct mlx5_flow_dv_push_vlan_action_resource res;
2287
2288         res.vlan_tag =
2289                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
2290                                  vlan->vlan_tci);
2291         if (attr->transfer)
2292                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2293         else
2294                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2295                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2296         return flow_dv_push_vlan_action_resource_register
2297                                             (dev, &res, dev_flow, error);
2298 }
2299
2300 /**
2301  * Validate the modify-header actions.
2302  *
2303  * @param[in] action_flags
2304  *   Holds the actions detected until now.
2305  * @param[in] action
2306  *   Pointer to the modify action.
2307  * @param[out] error
2308  *   Pointer to error structure.
2309  *
2310  * @return
2311  *   0 on success, a negative errno value otherwise and rte_errno is set.
2312  */
2313 static int
2314 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
2315                                    const struct rte_flow_action *action,
2316                                    struct rte_flow_error *error)
2317 {
2318         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
2319                 return rte_flow_error_set(error, EINVAL,
2320                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2321                                           NULL, "action configuration not set");
2322         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2323                 return rte_flow_error_set(error, EINVAL,
2324                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2325                                           "can't have encap action before"
2326                                           " modify action");
2327         return 0;
2328 }
2329
2330 /**
2331  * Validate the modify-header MAC address actions.
2332  *
2333  * @param[in] action_flags
2334  *   Holds the actions detected until now.
2335  * @param[in] action
2336  *   Pointer to the modify action.
2337  * @param[in] item_flags
2338  *   Holds the items detected.
2339  * @param[out] error
2340  *   Pointer to error structure.
2341  *
2342  * @return
2343  *   0 on success, a negative errno value otherwise and rte_errno is set.
2344  */
2345 static int
2346 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
2347                                    const struct rte_flow_action *action,
2348                                    const uint64_t item_flags,
2349                                    struct rte_flow_error *error)
2350 {
2351         int ret = 0;
2352
2353         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2354         if (!ret) {
2355                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
2356                         return rte_flow_error_set(error, EINVAL,
2357                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2358                                                   NULL,
2359                                                   "no L2 item in pattern");
2360         }
2361         return ret;
2362 }
2363
2364 /**
2365  * Validate the modify-header IPv4 address actions.
2366  *
2367  * @param[in] action_flags
2368  *   Holds the actions detected until now.
2369  * @param[in] action
2370  *   Pointer to the modify action.
2371  * @param[in] item_flags
2372  *   Holds the items detected.
2373  * @param[out] error
2374  *   Pointer to error structure.
2375  *
2376  * @return
2377  *   0 on success, a negative errno value otherwise and rte_errno is set.
2378  */
2379 static int
2380 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
2381                                     const struct rte_flow_action *action,
2382                                     const uint64_t item_flags,
2383                                     struct rte_flow_error *error)
2384 {
2385         int ret = 0;
2386
2387         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2388         if (!ret) {
2389                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
2390                         return rte_flow_error_set(error, EINVAL,
2391                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2392                                                   NULL,
2393                                                   "no ipv4 item in pattern");
2394         }
2395         return ret;
2396 }
2397
2398 /**
2399  * Validate the modify-header IPv6 address actions.
2400  *
2401  * @param[in] action_flags
2402  *   Holds the actions detected until now.
2403  * @param[in] action
2404  *   Pointer to the modify action.
2405  * @param[in] item_flags
2406  *   Holds the items detected.
2407  * @param[out] error
2408  *   Pointer to error structure.
2409  *
2410  * @return
2411  *   0 on success, a negative errno value otherwise and rte_errno is set.
2412  */
2413 static int
2414 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
2415                                     const struct rte_flow_action *action,
2416                                     const uint64_t item_flags,
2417                                     struct rte_flow_error *error)
2418 {
2419         int ret = 0;
2420
2421         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2422         if (!ret) {
2423                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
2424                         return rte_flow_error_set(error, EINVAL,
2425                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2426                                                   NULL,
2427                                                   "no ipv6 item in pattern");
2428         }
2429         return ret;
2430 }
2431
2432 /**
2433  * Validate the modify-header TP actions.
2434  *
2435  * @param[in] action_flags
2436  *   Holds the actions detected until now.
2437  * @param[in] action
2438  *   Pointer to the modify action.
2439  * @param[in] item_flags
2440  *   Holds the items detected.
2441  * @param[out] error
2442  *   Pointer to error structure.
2443  *
2444  * @return
2445  *   0 on success, a negative errno value otherwise and rte_errno is set.
2446  */
2447 static int
2448 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
2449                                   const struct rte_flow_action *action,
2450                                   const uint64_t item_flags,
2451                                   struct rte_flow_error *error)
2452 {
2453         int ret = 0;
2454
2455         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2456         if (!ret) {
2457                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
2458                         return rte_flow_error_set(error, EINVAL,
2459                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2460                                                   NULL, "no transport layer "
2461                                                   "in pattern");
2462         }
2463         return ret;
2464 }
2465
2466 /**
2467  * Validate the modify-header actions of increment/decrement
2468  * TCP Sequence-number.
2469  *
2470  * @param[in] action_flags
2471  *   Holds the actions detected until now.
2472  * @param[in] action
2473  *   Pointer to the modify action.
2474  * @param[in] item_flags
2475  *   Holds the items detected.
2476  * @param[out] error
2477  *   Pointer to error structure.
2478  *
2479  * @return
2480  *   0 on success, a negative errno value otherwise and rte_errno is set.
2481  */
2482 static int
2483 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
2484                                        const struct rte_flow_action *action,
2485                                        const uint64_t item_flags,
2486                                        struct rte_flow_error *error)
2487 {
2488         int ret = 0;
2489
2490         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2491         if (!ret) {
2492                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2493                         return rte_flow_error_set(error, EINVAL,
2494                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2495                                                   NULL, "no TCP item in"
2496                                                   " pattern");
2497                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
2498                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
2499                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
2500                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
2501                         return rte_flow_error_set(error, EINVAL,
2502                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2503                                                   NULL,
2504                                                   "cannot decrease and increase"
2505                                                   " TCP sequence number"
2506                                                   " at the same time");
2507         }
2508         return ret;
2509 }
2510
2511 /**
2512  * Validate the modify-header actions of increment/decrement
2513  * TCP Acknowledgment number.
2514  *
2515  * @param[in] action_flags
2516  *   Holds the actions detected until now.
2517  * @param[in] action
2518  *   Pointer to the modify action.
2519  * @param[in] item_flags
2520  *   Holds the items detected.
2521  * @param[out] error
2522  *   Pointer to error structure.
2523  *
2524  * @return
2525  *   0 on success, a negative errno value otherwise and rte_errno is set.
2526  */
2527 static int
2528 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
2529                                        const struct rte_flow_action *action,
2530                                        const uint64_t item_flags,
2531                                        struct rte_flow_error *error)
2532 {
2533         int ret = 0;
2534
2535         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2536         if (!ret) {
2537                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2538                         return rte_flow_error_set(error, EINVAL,
2539                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2540                                                   NULL, "no TCP item in"
2541                                                   " pattern");
2542                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
2543                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
2544                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
2545                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
2546                         return rte_flow_error_set(error, EINVAL,
2547                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2548                                                   NULL,
2549                                                   "cannot decrease and increase"
2550                                                   " TCP acknowledgment number"
2551                                                   " at the same time");
2552         }
2553         return ret;
2554 }
2555
2556 /**
2557  * Validate the modify-header TTL actions.
2558  *
2559  * @param[in] action_flags
2560  *   Holds the actions detected until now.
2561  * @param[in] action
2562  *   Pointer to the modify action.
2563  * @param[in] item_flags
2564  *   Holds the items detected.
2565  * @param[out] error
2566  *   Pointer to error structure.
2567  *
2568  * @return
2569  *   0 on success, a negative errno value otherwise and rte_errno is set.
2570  */
2571 static int
2572 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
2573                                    const struct rte_flow_action *action,
2574                                    const uint64_t item_flags,
2575                                    struct rte_flow_error *error)
2576 {
2577         int ret = 0;
2578
2579         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2580         if (!ret) {
2581                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
2582                         return rte_flow_error_set(error, EINVAL,
2583                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2584                                                   NULL,
2585                                                   "no IP protocol in pattern");
2586         }
2587         return ret;
2588 }
2589
2590 /**
2591  * Validate jump action.
2592  *
2593  * @param[in] action
2594  *   Pointer to the jump action.
2595  * @param[in] action_flags
2596  *   Holds the actions detected until now.
2597  * @param[in] attributes
2598  *   Pointer to flow attributes
2599  * @param[in] external
2600  *   Action belongs to flow rule created by request external to PMD.
2601  * @param[out] error
2602  *   Pointer to error structure.
2603  *
2604  * @return
2605  *   0 on success, a negative errno value otherwise and rte_errno is set.
2606  */
2607 static int
2608 flow_dv_validate_action_jump(const struct rte_flow_action *action,
2609                              uint64_t action_flags,
2610                              const struct rte_flow_attr *attributes,
2611                              bool external, struct rte_flow_error *error)
2612 {
2613         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
2614                                                     MLX5_MAX_TABLES;
2615         uint32_t target_group, table;
2616         int ret = 0;
2617
2618         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2619                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2620                 return rte_flow_error_set(error, EINVAL,
2621                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2622                                           "can't have 2 fate actions in"
2623                                           " same flow");
2624         if (!action->conf)
2625                 return rte_flow_error_set(error, EINVAL,
2626                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2627                                           NULL, "action configuration not set");
2628         target_group =
2629                 ((const struct rte_flow_action_jump *)action->conf)->group;
2630         ret = mlx5_flow_group_to_table(attributes, external, target_group,
2631                                        &table, error);
2632         if (ret)
2633                 return ret;
2634         if (table >= max_group)
2635                 return rte_flow_error_set(error, EINVAL,
2636                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2637                                           "target group index out of range");
2638         if (attributes->group >= target_group)
2639                 return rte_flow_error_set(error, EINVAL,
2640                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2641                                           "target group must be higher than"
2642                                           " the current flow group");
2643         return 0;
2644 }
2645
2646 /*
2647  * Validate the port_id action.
2648  *
2649  * @param[in] dev
2650  *   Pointer to rte_eth_dev structure.
2651  * @param[in] action_flags
2652  *   Bit-fields that holds the actions detected until now.
2653  * @param[in] action
2654  *   Port_id RTE action structure.
2655  * @param[in] attr
2656  *   Attributes of flow that includes this action.
2657  * @param[out] error
2658  *   Pointer to error structure.
2659  *
2660  * @return
2661  *   0 on success, a negative errno value otherwise and rte_errno is set.
2662  */
2663 static int
2664 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2665                                 uint64_t action_flags,
2666                                 const struct rte_flow_action *action,
2667                                 const struct rte_flow_attr *attr,
2668                                 struct rte_flow_error *error)
2669 {
2670         const struct rte_flow_action_port_id *port_id;
2671         struct mlx5_priv *act_priv;
2672         struct mlx5_priv *dev_priv;
2673         uint16_t port;
2674
2675         if (!attr->transfer)
2676                 return rte_flow_error_set(error, ENOTSUP,
2677                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2678                                           NULL,
2679                                           "port id action is valid in transfer"
2680                                           " mode only");
2681         if (!action || !action->conf)
2682                 return rte_flow_error_set(error, ENOTSUP,
2683                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2684                                           NULL,
2685                                           "port id action parameters must be"
2686                                           " specified");
2687         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2688                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2689                 return rte_flow_error_set(error, EINVAL,
2690                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2691                                           "can have only one fate actions in"
2692                                           " a flow");
2693         dev_priv = mlx5_dev_to_eswitch_info(dev);
2694         if (!dev_priv)
2695                 return rte_flow_error_set(error, rte_errno,
2696                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2697                                           NULL,
2698                                           "failed to obtain E-Switch info");
2699         port_id = action->conf;
2700         port = port_id->original ? dev->data->port_id : port_id->id;
2701         act_priv = mlx5_port_to_eswitch_info(port);
2702         if (!act_priv)
2703                 return rte_flow_error_set
2704                                 (error, rte_errno,
2705                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2706                                  "failed to obtain E-Switch port id for port");
2707         if (act_priv->domain_id != dev_priv->domain_id)
2708                 return rte_flow_error_set
2709                                 (error, EINVAL,
2710                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2711                                  "port does not belong to"
2712                                  " E-Switch being configured");
2713         return 0;
2714 }
2715
2716 /**
2717  * Find existing modify-header resource or create and register a new one.
2718  *
2719  * @param dev[in, out]
2720  *   Pointer to rte_eth_dev structure.
2721  * @param[in, out] resource
2722  *   Pointer to modify-header resource.
2723  * @parm[in, out] dev_flow
2724  *   Pointer to the dev_flow.
2725  * @param[out] error
2726  *   pointer to error structure.
2727  *
2728  * @return
2729  *   0 on success otherwise -errno and errno is set.
2730  */
2731 static int
2732 flow_dv_modify_hdr_resource_register
2733                         (struct rte_eth_dev *dev,
2734                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2735                          struct mlx5_flow *dev_flow,
2736                          struct rte_flow_error *error)
2737 {
2738         struct mlx5_priv *priv = dev->data->dev_private;
2739         struct mlx5_ibv_shared *sh = priv->sh;
2740         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2741         struct mlx5dv_dr_domain *ns;
2742
2743         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2744                 ns = sh->fdb_domain;
2745         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2746                 ns = sh->tx_domain;
2747         else
2748                 ns = sh->rx_domain;
2749         resource->flags =
2750                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2751         /* Lookup a matching resource from cache. */
2752         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2753                 if (resource->ft_type == cache_resource->ft_type &&
2754                     resource->actions_num == cache_resource->actions_num &&
2755                     resource->flags == cache_resource->flags &&
2756                     !memcmp((const void *)resource->actions,
2757                             (const void *)cache_resource->actions,
2758                             (resource->actions_num *
2759                                             sizeof(resource->actions[0])))) {
2760                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2761                                 (void *)cache_resource,
2762                                 rte_atomic32_read(&cache_resource->refcnt));
2763                         rte_atomic32_inc(&cache_resource->refcnt);
2764                         dev_flow->dv.modify_hdr = cache_resource;
2765                         return 0;
2766                 }
2767         }
2768         /* Register new modify-header resource. */
2769         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2770         if (!cache_resource)
2771                 return rte_flow_error_set(error, ENOMEM,
2772                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2773                                           "cannot allocate resource memory");
2774         *cache_resource = *resource;
2775         cache_resource->verbs_action =
2776                 mlx5_glue->dv_create_flow_action_modify_header
2777                                         (sh->ctx, cache_resource->ft_type,
2778                                          ns, cache_resource->flags,
2779                                          cache_resource->actions_num *
2780                                          sizeof(cache_resource->actions[0]),
2781                                          (uint64_t *)cache_resource->actions);
2782         if (!cache_resource->verbs_action) {
2783                 rte_free(cache_resource);
2784                 return rte_flow_error_set(error, ENOMEM,
2785                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2786                                           NULL, "cannot create action");
2787         }
2788         rte_atomic32_init(&cache_resource->refcnt);
2789         rte_atomic32_inc(&cache_resource->refcnt);
2790         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2791         dev_flow->dv.modify_hdr = cache_resource;
2792         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2793                 (void *)cache_resource,
2794                 rte_atomic32_read(&cache_resource->refcnt));
2795         return 0;
2796 }
2797
2798 #define MLX5_CNT_CONTAINER_RESIZE 64
2799
2800 /**
2801  * Get or create a flow counter.
2802  *
2803  * @param[in] dev
2804  *   Pointer to the Ethernet device structure.
2805  * @param[in] shared
2806  *   Indicate if this counter is shared with other flows.
2807  * @param[in] id
2808  *   Counter identifier.
2809  *
2810  * @return
2811  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2812  */
2813 static struct mlx5_flow_counter *
2814 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2815                                uint32_t id)
2816 {
2817         struct mlx5_priv *priv = dev->data->dev_private;
2818         struct mlx5_flow_counter *cnt = NULL;
2819         struct mlx5_devx_obj *dcs = NULL;
2820
2821         if (!priv->config.devx) {
2822                 rte_errno = ENOTSUP;
2823                 return NULL;
2824         }
2825         if (shared) {
2826                 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2827                         if (cnt->shared && cnt->id == id) {
2828                                 cnt->ref_cnt++;
2829                                 return cnt;
2830                         }
2831                 }
2832         }
2833         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2834         if (!dcs)
2835                 return NULL;
2836         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2837         if (!cnt) {
2838                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2839                 rte_errno = ENOMEM;
2840                 return NULL;
2841         }
2842         struct mlx5_flow_counter tmpl = {
2843                 .shared = shared,
2844                 .ref_cnt = 1,
2845                 .id = id,
2846                 .dcs = dcs,
2847         };
2848         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2849         if (!tmpl.action) {
2850                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2851                 rte_errno = errno;
2852                 rte_free(cnt);
2853                 return NULL;
2854         }
2855         *cnt = tmpl;
2856         TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2857         return cnt;
2858 }
2859
2860 /**
2861  * Release a flow counter.
2862  *
2863  * @param[in] dev
2864  *   Pointer to the Ethernet device structure.
2865  * @param[in] counter
2866  *   Pointer to the counter handler.
2867  */
2868 static void
2869 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2870                                  struct mlx5_flow_counter *counter)
2871 {
2872         struct mlx5_priv *priv = dev->data->dev_private;
2873
2874         if (!counter)
2875                 return;
2876         if (--counter->ref_cnt == 0) {
2877                 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2878                 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2879                 rte_free(counter);
2880         }
2881 }
2882
2883 /**
2884  * Query a devx flow counter.
2885  *
2886  * @param[in] dev
2887  *   Pointer to the Ethernet device structure.
2888  * @param[in] cnt
2889  *   Pointer to the flow counter.
2890  * @param[out] pkts
2891  *   The statistics value of packets.
2892  * @param[out] bytes
2893  *   The statistics value of bytes.
2894  *
2895  * @return
2896  *   0 on success, otherwise a negative errno value and rte_errno is set.
2897  */
2898 static inline int
2899 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2900                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2901                      uint64_t *bytes)
2902 {
2903         return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2904                                                 0, NULL, NULL, 0);
2905 }
2906
2907 /**
2908  * Get a pool by a counter.
2909  *
2910  * @param[in] cnt
2911  *   Pointer to the counter.
2912  *
2913  * @return
2914  *   The counter pool.
2915  */
2916 static struct mlx5_flow_counter_pool *
2917 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2918 {
2919         if (!cnt->batch) {
2920                 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2921                 return (struct mlx5_flow_counter_pool *)cnt - 1;
2922         }
2923         return cnt->pool;
2924 }
2925
2926 /**
2927  * Get a pool by devx counter ID.
2928  *
2929  * @param[in] cont
2930  *   Pointer to the counter container.
2931  * @param[in] id
2932  *   The counter devx ID.
2933  *
2934  * @return
2935  *   The counter pool pointer if exists, NULL otherwise,
2936  */
2937 static struct mlx5_flow_counter_pool *
2938 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2939 {
2940         struct mlx5_flow_counter_pool *pool;
2941
2942         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2943                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2944                                 MLX5_COUNTERS_PER_POOL;
2945
2946                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2947                         return pool;
2948         };
2949         return NULL;
2950 }
2951
2952 /**
2953  * Allocate a new memory for the counter values wrapped by all the needed
2954  * management.
2955  *
2956  * @param[in] dev
2957  *   Pointer to the Ethernet device structure.
2958  * @param[in] raws_n
2959  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2960  *
2961  * @return
2962  *   The new memory management pointer on success, otherwise NULL and rte_errno
2963  *   is set.
2964  */
2965 static struct mlx5_counter_stats_mem_mng *
2966 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2967 {
2968         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2969                                         (dev->data->dev_private))->sh;
2970         struct mlx5_devx_mkey_attr mkey_attr;
2971         struct mlx5_counter_stats_mem_mng *mem_mng;
2972         volatile struct flow_counter_stats *raw_data;
2973         int size = (sizeof(struct flow_counter_stats) *
2974                         MLX5_COUNTERS_PER_POOL +
2975                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2976                         sizeof(struct mlx5_counter_stats_mem_mng);
2977         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2978         int i;
2979
2980         if (!mem) {
2981                 rte_errno = ENOMEM;
2982                 return NULL;
2983         }
2984         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2985         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2986         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2987                                                  IBV_ACCESS_LOCAL_WRITE);
2988         if (!mem_mng->umem) {
2989                 rte_errno = errno;
2990                 rte_free(mem);
2991                 return NULL;
2992         }
2993         mkey_attr.addr = (uintptr_t)mem;
2994         mkey_attr.size = size;
2995         mkey_attr.umem_id = mem_mng->umem->umem_id;
2996         mkey_attr.pd = sh->pdn;
2997         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2998         if (!mem_mng->dm) {
2999                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
3000                 rte_errno = errno;
3001                 rte_free(mem);
3002                 return NULL;
3003         }
3004         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
3005         raw_data = (volatile struct flow_counter_stats *)mem;
3006         for (i = 0; i < raws_n; ++i) {
3007                 mem_mng->raws[i].mem_mng = mem_mng;
3008                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
3009         }
3010         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
3011         return mem_mng;
3012 }
3013
3014 /**
3015  * Resize a counter container.
3016  *
3017  * @param[in] dev
3018  *   Pointer to the Ethernet device structure.
3019  * @param[in] batch
3020  *   Whether the pool is for counter that was allocated by batch command.
3021  *
3022  * @return
3023  *   The new container pointer on success, otherwise NULL and rte_errno is set.
3024  */
3025 static struct mlx5_pools_container *
3026 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
3027 {
3028         struct mlx5_priv *priv = dev->data->dev_private;
3029         struct mlx5_pools_container *cont =
3030                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
3031         struct mlx5_pools_container *new_cont =
3032                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
3033         struct mlx5_counter_stats_mem_mng *mem_mng;
3034         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
3035         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
3036         int i;
3037
3038         if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
3039                 /* The last resize still hasn't detected by the host thread. */
3040                 rte_errno = EAGAIN;
3041                 return NULL;
3042         }
3043         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
3044         if (!new_cont->pools) {
3045                 rte_errno = ENOMEM;
3046                 return NULL;
3047         }
3048         if (cont->n)
3049                 memcpy(new_cont->pools, cont->pools, cont->n *
3050                        sizeof(struct mlx5_flow_counter_pool *));
3051         mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
3052                 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
3053         if (!mem_mng) {
3054                 rte_free(new_cont->pools);
3055                 return NULL;
3056         }
3057         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
3058                 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
3059                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
3060                                  i, next);
3061         new_cont->n = resize;
3062         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
3063         TAILQ_INIT(&new_cont->pool_list);
3064         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
3065         new_cont->init_mem_mng = mem_mng;
3066         rte_cio_wmb();
3067          /* Flip the master container. */
3068         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
3069         return new_cont;
3070 }
3071
3072 /**
3073  * Query a devx flow counter.
3074  *
3075  * @param[in] dev
3076  *   Pointer to the Ethernet device structure.
3077  * @param[in] cnt
3078  *   Pointer to the flow counter.
3079  * @param[out] pkts
3080  *   The statistics value of packets.
3081  * @param[out] bytes
3082  *   The statistics value of bytes.
3083  *
3084  * @return
3085  *   0 on success, otherwise a negative errno value and rte_errno is set.
3086  */
3087 static inline int
3088 _flow_dv_query_count(struct rte_eth_dev *dev,
3089                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
3090                      uint64_t *bytes)
3091 {
3092         struct mlx5_priv *priv = dev->data->dev_private;
3093         struct mlx5_flow_counter_pool *pool =
3094                         flow_dv_counter_pool_get(cnt);
3095         int offset = cnt - &pool->counters_raw[0];
3096
3097         if (priv->counter_fallback)
3098                 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
3099
3100         rte_spinlock_lock(&pool->sl);
3101         /*
3102          * The single counters allocation may allocate smaller ID than the
3103          * current allocated in parallel to the host reading.
3104          * In this case the new counter values must be reported as 0.
3105          */
3106         if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
3107                 *pkts = 0;
3108                 *bytes = 0;
3109         } else {
3110                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
3111                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
3112         }
3113         rte_spinlock_unlock(&pool->sl);
3114         return 0;
3115 }
3116
3117 /**
3118  * Create and initialize a new counter pool.
3119  *
3120  * @param[in] dev
3121  *   Pointer to the Ethernet device structure.
3122  * @param[out] dcs
3123  *   The devX counter handle.
3124  * @param[in] batch
3125  *   Whether the pool is for counter that was allocated by batch command.
3126  *
3127  * @return
3128  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
3129  */
3130 static struct mlx5_flow_counter_pool *
3131 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
3132                     uint32_t batch)
3133 {
3134         struct mlx5_priv *priv = dev->data->dev_private;
3135         struct mlx5_flow_counter_pool *pool;
3136         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3137                                                                0);
3138         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
3139         uint32_t size;
3140
3141         if (cont->n == n_valid) {
3142                 cont = flow_dv_container_resize(dev, batch);
3143                 if (!cont)
3144                         return NULL;
3145         }
3146         size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
3147                         sizeof(struct mlx5_flow_counter);
3148         pool = rte_calloc(__func__, 1, size, 0);
3149         if (!pool) {
3150                 rte_errno = ENOMEM;
3151                 return NULL;
3152         }
3153         pool->min_dcs = dcs;
3154         pool->raw = cont->init_mem_mng->raws + n_valid %
3155                                                      MLX5_CNT_CONTAINER_RESIZE;
3156         pool->raw_hw = NULL;
3157         rte_spinlock_init(&pool->sl);
3158         /*
3159          * The generation of the new allocated counters in this pool is 0, 2 in
3160          * the pool generation makes all the counters valid for allocation.
3161          */
3162         rte_atomic64_set(&pool->query_gen, 0x2);
3163         TAILQ_INIT(&pool->counters);
3164         TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3165         cont->pools[n_valid] = pool;
3166         /* Pool initialization must be updated before host thread access. */
3167         rte_cio_wmb();
3168         rte_atomic16_add(&cont->n_valid, 1);
3169         return pool;
3170 }
3171
3172 /**
3173  * Prepare a new counter and/or a new counter pool.
3174  *
3175  * @param[in] dev
3176  *   Pointer to the Ethernet device structure.
3177  * @param[out] cnt_free
3178  *   Where to put the pointer of a new counter.
3179  * @param[in] batch
3180  *   Whether the pool is for counter that was allocated by batch command.
3181  *
3182  * @return
3183  *   The free counter pool pointer and @p cnt_free is set on success,
3184  *   NULL otherwise and rte_errno is set.
3185  */
3186 static struct mlx5_flow_counter_pool *
3187 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
3188                              struct mlx5_flow_counter **cnt_free,
3189                              uint32_t batch)
3190 {
3191         struct mlx5_priv *priv = dev->data->dev_private;
3192         struct mlx5_flow_counter_pool *pool;
3193         struct mlx5_devx_obj *dcs = NULL;
3194         struct mlx5_flow_counter *cnt;
3195         uint32_t i;
3196
3197         if (!batch) {
3198                 /* bulk_bitmap must be 0 for single counter allocation. */
3199                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
3200                 if (!dcs)
3201                         return NULL;
3202                 pool = flow_dv_find_pool_by_id
3203                         (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
3204                 if (!pool) {
3205                         pool = flow_dv_pool_create(dev, dcs, batch);
3206                         if (!pool) {
3207                                 mlx5_devx_cmd_destroy(dcs);
3208                                 return NULL;
3209                         }
3210                 } else if (dcs->id < pool->min_dcs->id) {
3211                         rte_atomic64_set(&pool->a64_dcs,
3212                                          (int64_t)(uintptr_t)dcs);
3213                 }
3214                 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
3215                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3216                 cnt->dcs = dcs;
3217                 *cnt_free = cnt;
3218                 return pool;
3219         }
3220         /* bulk_bitmap is in 128 counters units. */
3221         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
3222                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
3223         if (!dcs) {
3224                 rte_errno = ENODATA;
3225                 return NULL;
3226         }
3227         pool = flow_dv_pool_create(dev, dcs, batch);
3228         if (!pool) {
3229                 mlx5_devx_cmd_destroy(dcs);
3230                 return NULL;
3231         }
3232         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3233                 cnt = &pool->counters_raw[i];
3234                 cnt->pool = pool;
3235                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3236         }
3237         *cnt_free = &pool->counters_raw[0];
3238         return pool;
3239 }
3240
3241 /**
3242  * Search for existed shared counter.
3243  *
3244  * @param[in] cont
3245  *   Pointer to the relevant counter pool container.
3246  * @param[in] id
3247  *   The shared counter ID to search.
3248  *
3249  * @return
3250  *   NULL if not existed, otherwise pointer to the shared counter.
3251  */
3252 static struct mlx5_flow_counter *
3253 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
3254                               uint32_t id)
3255 {
3256         static struct mlx5_flow_counter *cnt;
3257         struct mlx5_flow_counter_pool *pool;
3258         int i;
3259
3260         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3261                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3262                         cnt = &pool->counters_raw[i];
3263                         if (cnt->ref_cnt && cnt->shared && cnt->id == id)
3264                                 return cnt;
3265                 }
3266         }
3267         return NULL;
3268 }
3269
3270 /**
3271  * Allocate a flow counter.
3272  *
3273  * @param[in] dev
3274  *   Pointer to the Ethernet device structure.
3275  * @param[in] shared
3276  *   Indicate if this counter is shared with other flows.
3277  * @param[in] id
3278  *   Counter identifier.
3279  * @param[in] group
3280  *   Counter flow group.
3281  *
3282  * @return
3283  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
3284  */
3285 static struct mlx5_flow_counter *
3286 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
3287                       uint16_t group)
3288 {
3289         struct mlx5_priv *priv = dev->data->dev_private;
3290         struct mlx5_flow_counter_pool *pool = NULL;
3291         struct mlx5_flow_counter *cnt_free = NULL;
3292         /*
3293          * Currently group 0 flow counter cannot be assigned to a flow if it is
3294          * not the first one in the batch counter allocation, so it is better
3295          * to allocate counters one by one for these flows in a separate
3296          * container.
3297          * A counter can be shared between different groups so need to take
3298          * shared counters from the single container.
3299          */
3300         uint32_t batch = (group && !shared) ? 1 : 0;
3301         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3302                                                                0);
3303
3304         if (priv->counter_fallback)
3305                 return flow_dv_counter_alloc_fallback(dev, shared, id);
3306         if (!priv->config.devx) {
3307                 rte_errno = ENOTSUP;
3308                 return NULL;
3309         }
3310         if (shared) {
3311                 cnt_free = flow_dv_counter_shared_search(cont, id);
3312                 if (cnt_free) {
3313                         if (cnt_free->ref_cnt + 1 == 0) {
3314                                 rte_errno = E2BIG;
3315                                 return NULL;
3316                         }
3317                         cnt_free->ref_cnt++;
3318                         return cnt_free;
3319                 }
3320         }
3321         /* Pools which has a free counters are in the start. */
3322         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3323                 /*
3324                  * The free counter reset values must be updated between the
3325                  * counter release to the counter allocation, so, at least one
3326                  * query must be done in this time. ensure it by saving the
3327                  * query generation in the release time.
3328                  * The free list is sorted according to the generation - so if
3329                  * the first one is not updated, all the others are not
3330                  * updated too.
3331                  */
3332                 cnt_free = TAILQ_FIRST(&pool->counters);
3333                 if (cnt_free && cnt_free->query_gen + 1 <
3334                     rte_atomic64_read(&pool->query_gen))
3335                         break;
3336                 cnt_free = NULL;
3337         }
3338         if (!cnt_free) {
3339                 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
3340                 if (!pool)
3341                         return NULL;
3342         }
3343         cnt_free->batch = batch;
3344         /* Create a DV counter action only in the first time usage. */
3345         if (!cnt_free->action) {
3346                 uint16_t offset;
3347                 struct mlx5_devx_obj *dcs;
3348
3349                 if (batch) {
3350                         offset = cnt_free - &pool->counters_raw[0];
3351                         dcs = pool->min_dcs;
3352                 } else {
3353                         offset = 0;
3354                         dcs = cnt_free->dcs;
3355                 }
3356                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
3357                                         (dcs->obj, offset);
3358                 if (!cnt_free->action) {
3359                         rte_errno = errno;
3360                         return NULL;
3361                 }
3362         }
3363         /* Update the counter reset values. */
3364         if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
3365                                  &cnt_free->bytes))
3366                 return NULL;
3367         cnt_free->shared = shared;
3368         cnt_free->ref_cnt = 1;
3369         cnt_free->id = id;
3370         if (!priv->sh->cmng.query_thread_on)
3371                 /* Start the asynchronous batch query by the host thread. */
3372                 mlx5_set_query_alarm(priv->sh);
3373         TAILQ_REMOVE(&pool->counters, cnt_free, next);
3374         if (TAILQ_EMPTY(&pool->counters)) {
3375                 /* Move the pool to the end of the container pool list. */
3376                 TAILQ_REMOVE(&cont->pool_list, pool, next);
3377                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3378         }
3379         return cnt_free;
3380 }
3381
3382 /**
3383  * Release a flow counter.
3384  *
3385  * @param[in] dev
3386  *   Pointer to the Ethernet device structure.
3387  * @param[in] counter
3388  *   Pointer to the counter handler.
3389  */
3390 static void
3391 flow_dv_counter_release(struct rte_eth_dev *dev,
3392                         struct mlx5_flow_counter *counter)
3393 {
3394         struct mlx5_priv *priv = dev->data->dev_private;
3395
3396         if (!counter)
3397                 return;
3398         if (priv->counter_fallback) {
3399                 flow_dv_counter_release_fallback(dev, counter);
3400                 return;
3401         }
3402         if (--counter->ref_cnt == 0) {
3403                 struct mlx5_flow_counter_pool *pool =
3404                                 flow_dv_counter_pool_get(counter);
3405
3406                 /* Put the counter in the end - the last updated one. */
3407                 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
3408                 counter->query_gen = rte_atomic64_read(&pool->query_gen);
3409         }
3410 }
3411
3412 /**
3413  * Verify the @p attributes will be correctly understood by the NIC and store
3414  * them in the @p flow if everything is correct.
3415  *
3416  * @param[in] dev
3417  *   Pointer to dev struct.
3418  * @param[in] attributes
3419  *   Pointer to flow attributes
3420  * @param[in] external
3421  *   This flow rule is created by request external to PMD.
3422  * @param[out] error
3423  *   Pointer to error structure.
3424  *
3425  * @return
3426  *   0 on success, a negative errno value otherwise and rte_errno is set.
3427  */
3428 static int
3429 flow_dv_validate_attributes(struct rte_eth_dev *dev,
3430                             const struct rte_flow_attr *attributes,
3431                             bool external __rte_unused,
3432                             struct rte_flow_error *error)
3433 {
3434         struct mlx5_priv *priv = dev->data->dev_private;
3435         uint32_t priority_max = priv->config.flow_prio - 1;
3436
3437 #ifndef HAVE_MLX5DV_DR
3438         if (attributes->group)
3439                 return rte_flow_error_set(error, ENOTSUP,
3440                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3441                                           NULL,
3442                                           "groups are not supported");
3443 #else
3444         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
3445                                                     MLX5_MAX_TABLES;
3446         uint32_t table;
3447         int ret;
3448
3449         ret = mlx5_flow_group_to_table(attributes, external,
3450                                        attributes->group,
3451                                        &table, error);
3452         if (ret)
3453                 return ret;
3454         if (table >= max_group)
3455                 return rte_flow_error_set(error, EINVAL,
3456                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
3457                                           "group index out of range");
3458 #endif
3459         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
3460             attributes->priority >= priority_max)
3461                 return rte_flow_error_set(error, ENOTSUP,
3462                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
3463                                           NULL,
3464                                           "priority out of range");
3465         if (attributes->transfer) {
3466                 if (!priv->config.dv_esw_en)
3467                         return rte_flow_error_set
3468                                 (error, ENOTSUP,
3469                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3470                                  "E-Switch dr is not supported");
3471                 if (!(priv->representor || priv->master))
3472                         return rte_flow_error_set
3473                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3474                                  NULL, "E-Switch configuration can only be"
3475                                  " done by a master or a representor device");
3476                 if (attributes->egress)
3477                         return rte_flow_error_set
3478                                 (error, ENOTSUP,
3479                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
3480                                  "egress is not supported");
3481         }
3482         if (!(attributes->egress ^ attributes->ingress))
3483                 return rte_flow_error_set(error, ENOTSUP,
3484                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
3485                                           "must specify exactly one of "
3486                                           "ingress or egress");
3487         return 0;
3488 }
3489
3490 /**
3491  * Internal validation function. For validating both actions and items.
3492  *
3493  * @param[in] dev
3494  *   Pointer to the rte_eth_dev structure.
3495  * @param[in] attr
3496  *   Pointer to the flow attributes.
3497  * @param[in] items
3498  *   Pointer to the list of items.
3499  * @param[in] actions
3500  *   Pointer to the list of actions.
3501  * @param[in] external
3502  *   This flow rule is created by request external to PMD.
3503  * @param[out] error
3504  *   Pointer to the error structure.
3505  *
3506  * @return
3507  *   0 on success, a negative errno value otherwise and rte_errno is set.
3508  */
3509 static int
3510 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3511                  const struct rte_flow_item items[],
3512                  const struct rte_flow_action actions[],
3513                  bool external, struct rte_flow_error *error)
3514 {
3515         int ret;
3516         uint64_t action_flags = 0;
3517         uint64_t item_flags = 0;
3518         uint64_t last_item = 0;
3519         uint8_t next_protocol = 0xff;
3520         uint16_t ether_type = 0;
3521         int actions_n = 0;
3522         const struct rte_flow_item *gre_item = NULL;
3523         struct rte_flow_item_tcp nic_tcp_mask = {
3524                 .hdr = {
3525                         .tcp_flags = 0xFF,
3526                         .src_port = RTE_BE16(UINT16_MAX),
3527                         .dst_port = RTE_BE16(UINT16_MAX),
3528                 }
3529         };
3530
3531         if (items == NULL)
3532                 return -1;
3533         ret = flow_dv_validate_attributes(dev, attr, external, error);
3534         if (ret < 0)
3535                 return ret;
3536         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3537                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3538                 int type = items->type;
3539
3540                 switch (type) {
3541                 case RTE_FLOW_ITEM_TYPE_VOID:
3542                         break;
3543                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3544                         ret = flow_dv_validate_item_port_id
3545                                         (dev, items, attr, item_flags, error);
3546                         if (ret < 0)
3547                                 return ret;
3548                         last_item = MLX5_FLOW_ITEM_PORT_ID;
3549                         break;
3550                 case RTE_FLOW_ITEM_TYPE_ETH:
3551                         ret = mlx5_flow_validate_item_eth(items, item_flags,
3552                                                           error);
3553                         if (ret < 0)
3554                                 return ret;
3555                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3556                                              MLX5_FLOW_LAYER_OUTER_L2;
3557                         if (items->mask != NULL && items->spec != NULL) {
3558                                 ether_type =
3559                                         ((const struct rte_flow_item_eth *)
3560                                          items->spec)->type;
3561                                 ether_type &=
3562                                         ((const struct rte_flow_item_eth *)
3563                                          items->mask)->type;
3564                                 ether_type = rte_be_to_cpu_16(ether_type);
3565                         } else {
3566                                 ether_type = 0;
3567                         }
3568                         break;
3569                 case RTE_FLOW_ITEM_TYPE_VLAN:
3570                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
3571                                                            dev, error);
3572                         if (ret < 0)
3573                                 return ret;
3574                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
3575                                              MLX5_FLOW_LAYER_OUTER_VLAN;
3576                         if (items->mask != NULL && items->spec != NULL) {
3577                                 ether_type =
3578                                         ((const struct rte_flow_item_vlan *)
3579                                          items->spec)->inner_type;
3580                                 ether_type &=
3581                                         ((const struct rte_flow_item_vlan *)
3582                                          items->mask)->inner_type;
3583                                 ether_type = rte_be_to_cpu_16(ether_type);
3584                         } else {
3585                                 ether_type = 0;
3586                         }
3587                         break;
3588                 case RTE_FLOW_ITEM_TYPE_IPV4:
3589                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3590                                                   &item_flags, &tunnel);
3591                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
3592                                                            last_item,
3593                                                            ether_type, NULL,
3594                                                            error);
3595                         if (ret < 0)
3596                                 return ret;
3597                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3598                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3599                         if (items->mask != NULL &&
3600                             ((const struct rte_flow_item_ipv4 *)
3601                              items->mask)->hdr.next_proto_id) {
3602                                 next_protocol =
3603                                         ((const struct rte_flow_item_ipv4 *)
3604                                          (items->spec))->hdr.next_proto_id;
3605                                 next_protocol &=
3606                                         ((const struct rte_flow_item_ipv4 *)
3607                                          (items->mask))->hdr.next_proto_id;
3608                         } else {
3609                                 /* Reset for inner layer. */
3610                                 next_protocol = 0xff;
3611                         }
3612                         break;
3613                 case RTE_FLOW_ITEM_TYPE_IPV6:
3614                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3615                                                   &item_flags, &tunnel);
3616                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
3617                                                            last_item,
3618                                                            ether_type, NULL,
3619                                                            error);
3620                         if (ret < 0)
3621                                 return ret;
3622                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3623                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3624                         if (items->mask != NULL &&
3625                             ((const struct rte_flow_item_ipv6 *)
3626                              items->mask)->hdr.proto) {
3627                                 next_protocol =
3628                                         ((const struct rte_flow_item_ipv6 *)
3629                                          items->spec)->hdr.proto;
3630                                 next_protocol &=
3631                                         ((const struct rte_flow_item_ipv6 *)
3632                                          items->mask)->hdr.proto;
3633                         } else {
3634                                 /* Reset for inner layer. */
3635                                 next_protocol = 0xff;
3636                         }
3637                         break;
3638                 case RTE_FLOW_ITEM_TYPE_TCP:
3639                         ret = mlx5_flow_validate_item_tcp
3640                                                 (items, item_flags,
3641                                                  next_protocol,
3642                                                  &nic_tcp_mask,
3643                                                  error);
3644                         if (ret < 0)
3645                                 return ret;
3646                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3647                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3648                         break;
3649                 case RTE_FLOW_ITEM_TYPE_UDP:
3650                         ret = mlx5_flow_validate_item_udp(items, item_flags,
3651                                                           next_protocol,
3652                                                           error);
3653                         if (ret < 0)
3654                                 return ret;
3655                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3656                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3657                         break;
3658                 case RTE_FLOW_ITEM_TYPE_GRE:
3659                         ret = mlx5_flow_validate_item_gre(items, item_flags,
3660                                                           next_protocol, error);
3661                         if (ret < 0)
3662                                 return ret;
3663                         gre_item = items;
3664                         last_item = MLX5_FLOW_LAYER_GRE;
3665                         break;
3666                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3667                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
3668                                                             next_protocol,
3669                                                             error);
3670                         if (ret < 0)
3671                                 return ret;
3672                         last_item = MLX5_FLOW_LAYER_NVGRE;
3673                         break;
3674                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
3675                         ret = mlx5_flow_validate_item_gre_key
3676                                 (items, item_flags, gre_item, error);
3677                         if (ret < 0)
3678                                 return ret;
3679                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
3680                         break;
3681                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3682                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
3683                                                             error);
3684                         if (ret < 0)
3685                                 return ret;
3686                         last_item = MLX5_FLOW_LAYER_VXLAN;
3687                         break;
3688                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3689                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
3690                                                                 item_flags, dev,
3691                                                                 error);
3692                         if (ret < 0)
3693                                 return ret;
3694                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3695                         break;
3696                 case RTE_FLOW_ITEM_TYPE_GENEVE:
3697                         ret = mlx5_flow_validate_item_geneve(items,
3698                                                              item_flags, dev,
3699                                                              error);
3700                         if (ret < 0)
3701                                 return ret;
3702                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3703                         break;
3704                 case RTE_FLOW_ITEM_TYPE_MPLS:
3705                         ret = mlx5_flow_validate_item_mpls(dev, items,
3706                                                            item_flags,
3707                                                            last_item, error);
3708                         if (ret < 0)
3709                                 return ret;
3710                         last_item = MLX5_FLOW_LAYER_MPLS;
3711                         break;
3712                 case RTE_FLOW_ITEM_TYPE_META:
3713                         ret = flow_dv_validate_item_meta(dev, items, attr,
3714                                                          error);
3715                         if (ret < 0)
3716                                 return ret;
3717                         last_item = MLX5_FLOW_ITEM_METADATA;
3718                         break;
3719                 case RTE_FLOW_ITEM_TYPE_ICMP:
3720                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
3721                                                            next_protocol,
3722                                                            error);
3723                         if (ret < 0)
3724                                 return ret;
3725                         last_item = MLX5_FLOW_LAYER_ICMP;
3726                         break;
3727                 case RTE_FLOW_ITEM_TYPE_ICMP6:
3728                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3729                                                             next_protocol,
3730                                                             error);
3731                         if (ret < 0)
3732                                 return ret;
3733                         last_item = MLX5_FLOW_LAYER_ICMP6;
3734                         break;
3735                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
3736                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
3737                         break;
3738                 default:
3739                         return rte_flow_error_set(error, ENOTSUP,
3740                                                   RTE_FLOW_ERROR_TYPE_ITEM,
3741                                                   NULL, "item not supported");
3742                 }
3743                 item_flags |= last_item;
3744         }
3745         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3746                 int type = actions->type;
3747                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3748                         return rte_flow_error_set(error, ENOTSUP,
3749                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3750                                                   actions, "too many actions");
3751                 switch (type) {
3752                 case RTE_FLOW_ACTION_TYPE_VOID:
3753                         break;
3754                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3755                         ret = flow_dv_validate_action_port_id(dev,
3756                                                               action_flags,
3757                                                               actions,
3758                                                               attr,
3759                                                               error);
3760                         if (ret)
3761                                 return ret;
3762                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3763                         ++actions_n;
3764                         break;
3765                 case RTE_FLOW_ACTION_TYPE_FLAG:
3766                         ret = mlx5_flow_validate_action_flag(action_flags,
3767                                                              attr, error);
3768                         if (ret < 0)
3769                                 return ret;
3770                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3771                         ++actions_n;
3772                         break;
3773                 case RTE_FLOW_ACTION_TYPE_MARK:
3774                         ret = mlx5_flow_validate_action_mark(actions,
3775                                                              action_flags,
3776                                                              attr, error);
3777                         if (ret < 0)
3778                                 return ret;
3779                         action_flags |= MLX5_FLOW_ACTION_MARK;
3780                         ++actions_n;
3781                         break;
3782                 case RTE_FLOW_ACTION_TYPE_DROP:
3783                         ret = mlx5_flow_validate_action_drop(action_flags,
3784                                                              attr, error);
3785                         if (ret < 0)
3786                                 return ret;
3787                         action_flags |= MLX5_FLOW_ACTION_DROP;
3788                         ++actions_n;
3789                         break;
3790                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3791                         ret = mlx5_flow_validate_action_queue(actions,
3792                                                               action_flags, dev,
3793                                                               attr, error);
3794                         if (ret < 0)
3795                                 return ret;
3796                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3797                         ++actions_n;
3798                         break;
3799                 case RTE_FLOW_ACTION_TYPE_RSS:
3800                         ret = mlx5_flow_validate_action_rss(actions,
3801                                                             action_flags, dev,
3802                                                             attr, item_flags,
3803                                                             error);
3804                         if (ret < 0)
3805                                 return ret;
3806                         action_flags |= MLX5_FLOW_ACTION_RSS;
3807                         ++actions_n;
3808                         break;
3809                 case RTE_FLOW_ACTION_TYPE_COUNT:
3810                         ret = flow_dv_validate_action_count(dev, error);
3811                         if (ret < 0)
3812                                 return ret;
3813                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3814                         ++actions_n;
3815                         break;
3816                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3817                         if (flow_dv_validate_action_pop_vlan(dev,
3818                                                              action_flags,
3819                                                              actions,
3820                                                              item_flags, attr,
3821                                                              error))
3822                                 return -rte_errno;
3823                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
3824                         ++actions_n;
3825                         break;
3826                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3827                         ret = flow_dv_validate_action_push_vlan(action_flags,
3828                                                                 item_flags,
3829                                                                 actions, attr,
3830                                                                 error);
3831                         if (ret < 0)
3832                                 return ret;
3833                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
3834                         ++actions_n;
3835                         break;
3836                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3837                         ret = flow_dv_validate_action_set_vlan_pcp
3838                                                 (action_flags, actions, error);
3839                         if (ret < 0)
3840                                 return ret;
3841                         /* Count PCP with push_vlan command. */
3842                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
3843                         break;
3844                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3845                         ret = flow_dv_validate_action_set_vlan_vid
3846                                                 (item_flags, action_flags,
3847                                                  actions, error);
3848                         if (ret < 0)
3849                                 return ret;
3850                         /* Count VID with push_vlan command. */
3851                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
3852                         break;
3853                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3854                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3855                         ret = flow_dv_validate_action_l2_encap(action_flags,
3856                                                                actions, attr,
3857                                                                error);
3858                         if (ret < 0)
3859                                 return ret;
3860                         action_flags |= actions->type ==
3861                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3862                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3863                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3864                         ++actions_n;
3865                         break;
3866                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3867                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3868                         ret = flow_dv_validate_action_l2_decap(action_flags,
3869                                                                attr, error);
3870                         if (ret < 0)
3871                                 return ret;
3872                         action_flags |= actions->type ==
3873                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3874                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3875                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3876                         ++actions_n;
3877                         break;
3878                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3879                         ret = flow_dv_validate_action_raw_encap(action_flags,
3880                                                                 actions, attr,
3881                                                                 error);
3882                         if (ret < 0)
3883                                 return ret;
3884                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3885                         ++actions_n;
3886                         break;
3887                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3888                         ret = flow_dv_validate_action_raw_decap(action_flags,
3889                                                                 actions, attr,
3890                                                                 error);
3891                         if (ret < 0)
3892                                 return ret;
3893                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3894                         ++actions_n;
3895                         break;
3896                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3897                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3898                         ret = flow_dv_validate_action_modify_mac(action_flags,
3899                                                                  actions,
3900                                                                  item_flags,
3901                                                                  error);
3902                         if (ret < 0)
3903                                 return ret;
3904                         /* Count all modify-header actions as one action. */
3905                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3906                                 ++actions_n;
3907                         action_flags |= actions->type ==
3908                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3909                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
3910                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
3911                         break;
3912
3913                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3914                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3915                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
3916                                                                   actions,
3917                                                                   item_flags,
3918                                                                   error);
3919                         if (ret < 0)
3920                                 return ret;
3921                         /* Count all modify-header actions as one action. */
3922                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3923                                 ++actions_n;
3924                         action_flags |= actions->type ==
3925                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3926                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3927                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
3928                         break;
3929                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3930                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3931                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
3932                                                                   actions,
3933                                                                   item_flags,
3934                                                                   error);
3935                         if (ret < 0)
3936                                 return ret;
3937                         /* Count all modify-header actions as one action. */
3938                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3939                                 ++actions_n;
3940                         action_flags |= actions->type ==
3941                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3942                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3943                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
3944                         break;
3945                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3946                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3947                         ret = flow_dv_validate_action_modify_tp(action_flags,
3948                                                                 actions,
3949                                                                 item_flags,
3950                                                                 error);
3951                         if (ret < 0)
3952                                 return ret;
3953                         /* Count all modify-header actions as one action. */
3954                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3955                                 ++actions_n;
3956                         action_flags |= actions->type ==
3957                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3958                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
3959                                                 MLX5_FLOW_ACTION_SET_TP_DST;
3960                         break;
3961                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3962                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3963                         ret = flow_dv_validate_action_modify_ttl(action_flags,
3964                                                                  actions,
3965                                                                  item_flags,
3966                                                                  error);
3967                         if (ret < 0)
3968                                 return ret;
3969                         /* Count all modify-header actions as one action. */
3970                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3971                                 ++actions_n;
3972                         action_flags |= actions->type ==
3973                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
3974                                                 MLX5_FLOW_ACTION_SET_TTL :
3975                                                 MLX5_FLOW_ACTION_DEC_TTL;
3976                         break;
3977                 case RTE_FLOW_ACTION_TYPE_JUMP:
3978                         ret = flow_dv_validate_action_jump(actions,
3979                                                            action_flags,
3980                                                            attr, external,
3981                                                            error);
3982                         if (ret)
3983                                 return ret;
3984                         ++actions_n;
3985                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3986                         break;
3987                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3988                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3989                         ret = flow_dv_validate_action_modify_tcp_seq
3990                                                                 (action_flags,
3991                                                                  actions,
3992                                                                  item_flags,
3993                                                                  error);
3994                         if (ret < 0)
3995                                 return ret;
3996                         /* Count all modify-header actions as one action. */
3997                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3998                                 ++actions_n;
3999                         action_flags |= actions->type ==
4000                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4001                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
4002                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4003                         break;
4004                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4005                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4006                         ret = flow_dv_validate_action_modify_tcp_ack
4007                                                                 (action_flags,
4008                                                                  actions,
4009                                                                  item_flags,
4010                                                                  error);
4011                         if (ret < 0)
4012                                 return ret;
4013                         /* Count all modify-header actions as one action. */
4014                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4015                                 ++actions_n;
4016                         action_flags |= actions->type ==
4017                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4018                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
4019                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
4020                         break;
4021                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
4022                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
4023                         break;
4024                 default:
4025                         return rte_flow_error_set(error, ENOTSUP,
4026                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4027                                                   actions,
4028                                                   "action not supported");
4029                 }
4030         }
4031         if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
4032             (action_flags & MLX5_FLOW_VLAN_ACTIONS))
4033                 return rte_flow_error_set(error, ENOTSUP,
4034                                           RTE_FLOW_ERROR_TYPE_ACTION,
4035                                           actions,
4036                                           "can't have vxlan and vlan"
4037                                           " actions in the same rule");
4038         /* Eswitch has few restrictions on using items and actions */
4039         if (attr->transfer) {
4040                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
4041                         return rte_flow_error_set(error, ENOTSUP,
4042                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4043                                                   NULL,
4044                                                   "unsupported action FLAG");
4045                 if (action_flags & MLX5_FLOW_ACTION_MARK)
4046                         return rte_flow_error_set(error, ENOTSUP,
4047                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4048                                                   NULL,
4049                                                   "unsupported action MARK");
4050                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
4051                         return rte_flow_error_set(error, ENOTSUP,
4052                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4053                                                   NULL,
4054                                                   "unsupported action QUEUE");
4055                 if (action_flags & MLX5_FLOW_ACTION_RSS)
4056                         return rte_flow_error_set(error, ENOTSUP,
4057                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4058                                                   NULL,
4059                                                   "unsupported action RSS");
4060                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4061                         return rte_flow_error_set(error, EINVAL,
4062                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4063                                                   actions,
4064                                                   "no fate action is found");
4065         } else {
4066                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
4067                         return rte_flow_error_set(error, EINVAL,
4068                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4069                                                   actions,
4070                                                   "no fate action is found");
4071         }
4072         return 0;
4073 }
4074
4075 /**
4076  * Internal preparation function. Allocates the DV flow size,
4077  * this size is constant.
4078  *
4079  * @param[in] attr
4080  *   Pointer to the flow attributes.
4081  * @param[in] items
4082  *   Pointer to the list of items.
4083  * @param[in] actions
4084  *   Pointer to the list of actions.
4085  * @param[out] error
4086  *   Pointer to the error structure.
4087  *
4088  * @return
4089  *   Pointer to mlx5_flow object on success,
4090  *   otherwise NULL and rte_errno is set.
4091  */
4092 static struct mlx5_flow *
4093 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
4094                 const struct rte_flow_item items[] __rte_unused,
4095                 const struct rte_flow_action actions[] __rte_unused,
4096                 struct rte_flow_error *error)
4097 {
4098         uint32_t size = sizeof(struct mlx5_flow);
4099         struct mlx5_flow *flow;
4100
4101         flow = rte_calloc(__func__, 1, size, 0);
4102         if (!flow) {
4103                 rte_flow_error_set(error, ENOMEM,
4104                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4105                                    "not enough memory to create flow");
4106                 return NULL;
4107         }
4108         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
4109         return flow;
4110 }
4111
4112 #ifndef NDEBUG
4113 /**
4114  * Sanity check for match mask and value. Similar to check_valid_spec() in
4115  * kernel driver. If unmasked bit is present in value, it returns failure.
4116  *
4117  * @param match_mask
4118  *   pointer to match mask buffer.
4119  * @param match_value
4120  *   pointer to match value buffer.
4121  *
4122  * @return
4123  *   0 if valid, -EINVAL otherwise.
4124  */
4125 static int
4126 flow_dv_check_valid_spec(void *match_mask, void *match_value)
4127 {
4128         uint8_t *m = match_mask;
4129         uint8_t *v = match_value;
4130         unsigned int i;
4131
4132         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
4133                 if (v[i] & ~m[i]) {
4134                         DRV_LOG(ERR,
4135                                 "match_value differs from match_criteria"
4136                                 " %p[%u] != %p[%u]",
4137                                 match_value, i, match_mask, i);
4138                         return -EINVAL;
4139                 }
4140         }
4141         return 0;
4142 }
4143 #endif
4144
4145 /**
4146  * Add Ethernet item to matcher and to the value.
4147  *
4148  * @param[in, out] matcher
4149  *   Flow matcher.
4150  * @param[in, out] key
4151  *   Flow matcher value.
4152  * @param[in] item
4153  *   Flow pattern to translate.
4154  * @param[in] inner
4155  *   Item is inner pattern.
4156  */
4157 static void
4158 flow_dv_translate_item_eth(void *matcher, void *key,
4159                            const struct rte_flow_item *item, int inner)
4160 {
4161         const struct rte_flow_item_eth *eth_m = item->mask;
4162         const struct rte_flow_item_eth *eth_v = item->spec;
4163         const struct rte_flow_item_eth nic_mask = {
4164                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4165                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4166                 .type = RTE_BE16(0xffff),
4167         };
4168         void *headers_m;
4169         void *headers_v;
4170         char *l24_v;
4171         unsigned int i;
4172
4173         if (!eth_v)
4174                 return;
4175         if (!eth_m)
4176                 eth_m = &nic_mask;
4177         if (inner) {
4178                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4179                                          inner_headers);
4180                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4181         } else {
4182                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4183                                          outer_headers);
4184                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4185         }
4186         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
4187                &eth_m->dst, sizeof(eth_m->dst));
4188         /* The value must be in the range of the mask. */
4189         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
4190         for (i = 0; i < sizeof(eth_m->dst); ++i)
4191                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
4192         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
4193                &eth_m->src, sizeof(eth_m->src));
4194         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
4195         /* The value must be in the range of the mask. */
4196         for (i = 0; i < sizeof(eth_m->dst); ++i)
4197                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
4198         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4199                  rte_be_to_cpu_16(eth_m->type));
4200         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
4201         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
4202 }
4203
4204 /**
4205  * Add VLAN item to matcher and to the value.
4206  *
4207  * @param[in, out] dev_flow
4208  *   Flow descriptor.
4209  * @param[in, out] matcher
4210  *   Flow matcher.
4211  * @param[in, out] key
4212  *   Flow matcher value.
4213  * @param[in] item
4214  *   Flow pattern to translate.
4215  * @param[in] inner
4216  *   Item is inner pattern.
4217  */
4218 static void
4219 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
4220                             void *matcher, void *key,
4221                             const struct rte_flow_item *item,
4222                             int inner)
4223 {
4224         const struct rte_flow_item_vlan *vlan_m = item->mask;
4225         const struct rte_flow_item_vlan *vlan_v = item->spec;
4226         void *headers_m;
4227         void *headers_v;
4228         uint16_t tci_m;
4229         uint16_t tci_v;
4230
4231         if (!vlan_v)
4232                 return;
4233         if (!vlan_m)
4234                 vlan_m = &rte_flow_item_vlan_mask;
4235         if (inner) {
4236                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4237                                          inner_headers);
4238                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4239         } else {
4240                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4241                                          outer_headers);
4242                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4243                 /*
4244                  * This is workaround, masks are not supported,
4245                  * and pre-validated.
4246                  */
4247                 dev_flow->dv.vf_vlan.tag =
4248                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
4249         }
4250         tci_m = rte_be_to_cpu_16(vlan_m->tci);
4251         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
4252         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
4253         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
4254         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
4255         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
4256         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
4257         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
4258         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
4259         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
4260         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4261                  rte_be_to_cpu_16(vlan_m->inner_type));
4262         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
4263                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
4264 }
4265
4266 /**
4267  * Add IPV4 item to matcher and to the value.
4268  *
4269  * @param[in, out] matcher
4270  *   Flow matcher.
4271  * @param[in, out] key
4272  *   Flow matcher value.
4273  * @param[in] item
4274  *   Flow pattern to translate.
4275  * @param[in] inner
4276  *   Item is inner pattern.
4277  * @param[in] group
4278  *   The group to insert the rule.
4279  */
4280 static void
4281 flow_dv_translate_item_ipv4(void *matcher, void *key,
4282                             const struct rte_flow_item *item,
4283                             int inner, uint32_t group)
4284 {
4285         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
4286         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
4287         const struct rte_flow_item_ipv4 nic_mask = {
4288                 .hdr = {
4289                         .src_addr = RTE_BE32(0xffffffff),
4290                         .dst_addr = RTE_BE32(0xffffffff),
4291                         .type_of_service = 0xff,
4292                         .next_proto_id = 0xff,
4293                 },
4294         };
4295         void *headers_m;
4296         void *headers_v;
4297         char *l24_m;
4298         char *l24_v;
4299         uint8_t tos;
4300
4301         if (inner) {
4302                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4303                                          inner_headers);
4304                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4305         } else {
4306                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4307                                          outer_headers);
4308                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4309         }
4310         if (group == 0)
4311                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4312         else
4313                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
4314         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
4315         if (!ipv4_v)
4316                 return;
4317         if (!ipv4_m)
4318                 ipv4_m = &nic_mask;
4319         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4320                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4321         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4322                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4323         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
4324         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
4325         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4326                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4327         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4328                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4329         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
4330         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
4331         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
4332         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
4333                  ipv4_m->hdr.type_of_service);
4334         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
4335         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
4336                  ipv4_m->hdr.type_of_service >> 2);
4337         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
4338         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4339                  ipv4_m->hdr.next_proto_id);
4340         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4341                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
4342 }
4343
4344 /**
4345  * Add IPV6 item to matcher and to the value.
4346  *
4347  * @param[in, out] matcher
4348  *   Flow matcher.
4349  * @param[in, out] key
4350  *   Flow matcher value.
4351  * @param[in] item
4352  *   Flow pattern to translate.
4353  * @param[in] inner
4354  *   Item is inner pattern.
4355  * @param[in] group
4356  *   The group to insert the rule.
4357  */
4358 static void
4359 flow_dv_translate_item_ipv6(void *matcher, void *key,
4360                             const struct rte_flow_item *item,
4361                             int inner, uint32_t group)
4362 {
4363         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
4364         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
4365         const struct rte_flow_item_ipv6 nic_mask = {
4366                 .hdr = {
4367                         .src_addr =
4368                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4369                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4370                         .dst_addr =
4371                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4372                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4373                         .vtc_flow = RTE_BE32(0xffffffff),
4374                         .proto = 0xff,
4375                         .hop_limits = 0xff,
4376                 },
4377         };
4378         void *headers_m;
4379         void *headers_v;
4380         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4381         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4382         char *l24_m;
4383         char *l24_v;
4384         uint32_t vtc_m;
4385         uint32_t vtc_v;
4386         int i;
4387         int size;
4388
4389         if (inner) {
4390                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4391                                          inner_headers);
4392                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4393         } else {
4394                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4395                                          outer_headers);
4396                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4397         }
4398         if (group == 0)
4399                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4400         else
4401                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
4402         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
4403         if (!ipv6_v)
4404                 return;
4405         if (!ipv6_m)
4406                 ipv6_m = &nic_mask;
4407         size = sizeof(ipv6_m->hdr.dst_addr);
4408         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4409                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4410         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4411                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4412         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
4413         for (i = 0; i < size; ++i)
4414                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
4415         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4416                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4417         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4418                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4419         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
4420         for (i = 0; i < size; ++i)
4421                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
4422         /* TOS. */
4423         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
4424         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
4425         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
4426         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
4427         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
4428         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
4429         /* Label. */
4430         if (inner) {
4431                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
4432                          vtc_m);
4433                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
4434                          vtc_v);
4435         } else {
4436                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
4437                          vtc_m);
4438                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
4439                          vtc_v);
4440         }
4441         /* Protocol. */
4442         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4443                  ipv6_m->hdr.proto);
4444         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4445                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
4446 }
4447
4448 /**
4449  * Add TCP item to matcher and to the value.
4450  *
4451  * @param[in, out] matcher
4452  *   Flow matcher.
4453  * @param[in, out] key
4454  *   Flow matcher value.
4455  * @param[in] item
4456  *   Flow pattern to translate.
4457  * @param[in] inner
4458  *   Item is inner pattern.
4459  */
4460 static void
4461 flow_dv_translate_item_tcp(void *matcher, void *key,
4462                            const struct rte_flow_item *item,
4463                            int inner)
4464 {
4465         const struct rte_flow_item_tcp *tcp_m = item->mask;
4466         const struct rte_flow_item_tcp *tcp_v = item->spec;
4467         void *headers_m;
4468         void *headers_v;
4469
4470         if (inner) {
4471                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4472                                          inner_headers);
4473                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4474         } else {
4475                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4476                                          outer_headers);
4477                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4478         }
4479         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4480         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
4481         if (!tcp_v)
4482                 return;
4483         if (!tcp_m)
4484                 tcp_m = &rte_flow_item_tcp_mask;
4485         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
4486                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
4487         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
4488                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
4489         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
4490                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
4491         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
4492                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
4493         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
4494                  tcp_m->hdr.tcp_flags);
4495         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
4496                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
4497 }
4498
4499 /**
4500  * Add UDP item to matcher and to the value.
4501  *
4502  * @param[in, out] matcher
4503  *   Flow matcher.
4504  * @param[in, out] key
4505  *   Flow matcher value.
4506  * @param[in] item
4507  *   Flow pattern to translate.
4508  * @param[in] inner
4509  *   Item is inner pattern.
4510  */
4511 static void
4512 flow_dv_translate_item_udp(void *matcher, void *key,
4513                            const struct rte_flow_item *item,
4514                            int inner)
4515 {
4516         const struct rte_flow_item_udp *udp_m = item->mask;
4517         const struct rte_flow_item_udp *udp_v = item->spec;
4518         void *headers_m;
4519         void *headers_v;
4520
4521         if (inner) {
4522                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4523                                          inner_headers);
4524                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4525         } else {
4526                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4527                                          outer_headers);
4528                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4529         }
4530         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4531         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
4532         if (!udp_v)
4533                 return;
4534         if (!udp_m)
4535                 udp_m = &rte_flow_item_udp_mask;
4536         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
4537                  rte_be_to_cpu_16(udp_m->hdr.src_port));
4538         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
4539                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
4540         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
4541                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
4542         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4543                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
4544 }
4545
4546 /**
4547  * Add GRE optional Key item to matcher and to the value.
4548  *
4549  * @param[in, out] matcher
4550  *   Flow matcher.
4551  * @param[in, out] key
4552  *   Flow matcher value.
4553  * @param[in] item
4554  *   Flow pattern to translate.
4555  * @param[in] inner
4556  *   Item is inner pattern.
4557  */
4558 static void
4559 flow_dv_translate_item_gre_key(void *matcher, void *key,
4560                                    const struct rte_flow_item *item)
4561 {
4562         const rte_be32_t *key_m = item->mask;
4563         const rte_be32_t *key_v = item->spec;
4564         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4565         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4566         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
4567
4568         if (!key_v)
4569                 return;
4570         if (!key_m)
4571                 key_m = &gre_key_default_mask;
4572         /* GRE K bit must be on and should already be validated */
4573         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
4574         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
4575         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
4576                  rte_be_to_cpu_32(*key_m) >> 8);
4577         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
4578                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
4579         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
4580                  rte_be_to_cpu_32(*key_m) & 0xFF);
4581         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
4582                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
4583 }
4584
4585 /**
4586  * Add GRE item to matcher and to the value.
4587  *
4588  * @param[in, out] matcher
4589  *   Flow matcher.
4590  * @param[in, out] key
4591  *   Flow matcher value.
4592  * @param[in] item
4593  *   Flow pattern to translate.
4594  * @param[in] inner
4595  *   Item is inner pattern.
4596  */
4597 static void
4598 flow_dv_translate_item_gre(void *matcher, void *key,
4599                            const struct rte_flow_item *item,
4600                            int inner)
4601 {
4602         const struct rte_flow_item_gre *gre_m = item->mask;
4603         const struct rte_flow_item_gre *gre_v = item->spec;
4604         void *headers_m;
4605         void *headers_v;
4606         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4607         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4608         struct {
4609                 union {
4610                         __extension__
4611                         struct {
4612                                 uint16_t version:3;
4613                                 uint16_t rsvd0:9;
4614                                 uint16_t s_present:1;
4615                                 uint16_t k_present:1;
4616                                 uint16_t rsvd_bit1:1;
4617                                 uint16_t c_present:1;
4618                         };
4619                         uint16_t value;
4620                 };
4621         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
4622
4623         if (inner) {
4624                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4625                                          inner_headers);
4626                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4627         } else {
4628                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4629                                          outer_headers);
4630                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4631         }
4632         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4633         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
4634         if (!gre_v)
4635                 return;
4636         if (!gre_m)
4637                 gre_m = &rte_flow_item_gre_mask;
4638         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
4639                  rte_be_to_cpu_16(gre_m->protocol));
4640         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4641                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
4642         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
4643         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
4644         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
4645                  gre_crks_rsvd0_ver_m.c_present);
4646         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
4647                  gre_crks_rsvd0_ver_v.c_present &
4648                  gre_crks_rsvd0_ver_m.c_present);
4649         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
4650                  gre_crks_rsvd0_ver_m.k_present);
4651         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
4652                  gre_crks_rsvd0_ver_v.k_present &
4653                  gre_crks_rsvd0_ver_m.k_present);
4654         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
4655                  gre_crks_rsvd0_ver_m.s_present);
4656         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
4657                  gre_crks_rsvd0_ver_v.s_present &
4658                  gre_crks_rsvd0_ver_m.s_present);
4659 }
4660
4661 /**
4662  * Add NVGRE item to matcher and to the value.
4663  *
4664  * @param[in, out] matcher
4665  *   Flow matcher.
4666  * @param[in, out] key
4667  *   Flow matcher value.
4668  * @param[in] item
4669  *   Flow pattern to translate.
4670  * @param[in] inner
4671  *   Item is inner pattern.
4672  */
4673 static void
4674 flow_dv_translate_item_nvgre(void *matcher, void *key,
4675                              const struct rte_flow_item *item,
4676                              int inner)
4677 {
4678         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
4679         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
4680         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4681         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4682         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
4683         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
4684         char *gre_key_m;
4685         char *gre_key_v;
4686         int size;
4687         int i;
4688
4689         /* For NVGRE, GRE header fields must be set with defined values. */
4690         const struct rte_flow_item_gre gre_spec = {
4691                 .c_rsvd0_ver = RTE_BE16(0x2000),
4692                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
4693         };
4694         const struct rte_flow_item_gre gre_mask = {
4695                 .c_rsvd0_ver = RTE_BE16(0xB000),
4696                 .protocol = RTE_BE16(UINT16_MAX),
4697         };
4698         const struct rte_flow_item gre_item = {
4699                 .spec = &gre_spec,
4700                 .mask = &gre_mask,
4701                 .last = NULL,
4702         };
4703         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
4704         if (!nvgre_v)
4705                 return;
4706         if (!nvgre_m)
4707                 nvgre_m = &rte_flow_item_nvgre_mask;
4708         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
4709         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
4710         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
4711         memcpy(gre_key_m, tni_flow_id_m, size);
4712         for (i = 0; i < size; ++i)
4713                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
4714 }
4715
4716 /**
4717  * Add VXLAN item to matcher and to the value.
4718  *
4719  * @param[in, out] matcher
4720  *   Flow matcher.
4721  * @param[in, out] key
4722  *   Flow matcher value.
4723  * @param[in] item
4724  *   Flow pattern to translate.
4725  * @param[in] inner
4726  *   Item is inner pattern.
4727  */
4728 static void
4729 flow_dv_translate_item_vxlan(void *matcher, void *key,
4730                              const struct rte_flow_item *item,
4731                              int inner)
4732 {
4733         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
4734         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
4735         void *headers_m;
4736         void *headers_v;
4737         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4738         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4739         char *vni_m;
4740         char *vni_v;
4741         uint16_t dport;
4742         int size;
4743         int i;
4744
4745         if (inner) {
4746                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4747                                          inner_headers);
4748                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4749         } else {
4750                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4751                                          outer_headers);
4752                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4753         }
4754         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
4755                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
4756         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4757                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4758                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4759         }
4760         if (!vxlan_v)
4761                 return;
4762         if (!vxlan_m)
4763                 vxlan_m = &rte_flow_item_vxlan_mask;
4764         size = sizeof(vxlan_m->vni);
4765         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
4766         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
4767         memcpy(vni_m, vxlan_m->vni, size);
4768         for (i = 0; i < size; ++i)
4769                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
4770 }
4771
4772 /**
4773  * Add Geneve item to matcher and to the value.
4774  *
4775  * @param[in, out] matcher
4776  *   Flow matcher.
4777  * @param[in, out] key
4778  *   Flow matcher value.
4779  * @param[in] item
4780  *   Flow pattern to translate.
4781  * @param[in] inner
4782  *   Item is inner pattern.
4783  */
4784
4785 static void
4786 flow_dv_translate_item_geneve(void *matcher, void *key,
4787                               const struct rte_flow_item *item, int inner)
4788 {
4789         const struct rte_flow_item_geneve *geneve_m = item->mask;
4790         const struct rte_flow_item_geneve *geneve_v = item->spec;
4791         void *headers_m;
4792         void *headers_v;
4793         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4794         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4795         uint16_t dport;
4796         uint16_t gbhdr_m;
4797         uint16_t gbhdr_v;
4798         char *vni_m;
4799         char *vni_v;
4800         size_t size, i;
4801
4802         if (inner) {
4803                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4804                                          inner_headers);
4805                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4806         } else {
4807                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4808                                          outer_headers);
4809                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4810         }
4811         dport = MLX5_UDP_PORT_GENEVE;
4812         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4813                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4814                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4815         }
4816         if (!geneve_v)
4817                 return;
4818         if (!geneve_m)
4819                 geneve_m = &rte_flow_item_geneve_mask;
4820         size = sizeof(geneve_m->vni);
4821         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
4822         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
4823         memcpy(vni_m, geneve_m->vni, size);
4824         for (i = 0; i < size; ++i)
4825                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
4826         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
4827                  rte_be_to_cpu_16(geneve_m->protocol));
4828         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
4829                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
4830         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
4831         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
4832         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
4833                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4834         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
4835                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4836         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
4837                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4838         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
4839                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
4840                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4841 }
4842
4843 /**
4844  * Add MPLS item to matcher and to the value.
4845  *
4846  * @param[in, out] matcher
4847  *   Flow matcher.
4848  * @param[in, out] key
4849  *   Flow matcher value.
4850  * @param[in] item
4851  *   Flow pattern to translate.
4852  * @param[in] prev_layer
4853  *   The protocol layer indicated in previous item.
4854  * @param[in] inner
4855  *   Item is inner pattern.
4856  */
4857 static void
4858 flow_dv_translate_item_mpls(void *matcher, void *key,
4859                             const struct rte_flow_item *item,
4860                             uint64_t prev_layer,
4861                             int inner)
4862 {
4863         const uint32_t *in_mpls_m = item->mask;
4864         const uint32_t *in_mpls_v = item->spec;
4865         uint32_t *out_mpls_m = 0;
4866         uint32_t *out_mpls_v = 0;
4867         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4868         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4869         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4870                                      misc_parameters_2);
4871         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4872         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4873         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4874
4875         switch (prev_layer) {
4876         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4877                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4878                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4879                          MLX5_UDP_PORT_MPLS);
4880                 break;
4881         case MLX5_FLOW_LAYER_GRE:
4882                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4883                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4884                          RTE_ETHER_TYPE_MPLS);
4885                 break;
4886         default:
4887                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4888                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4889                          IPPROTO_MPLS);
4890                 break;
4891         }
4892         if (!in_mpls_v)
4893                 return;
4894         if (!in_mpls_m)
4895                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4896         switch (prev_layer) {
4897         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4898                 out_mpls_m =
4899                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4900                                                  outer_first_mpls_over_udp);
4901                 out_mpls_v =
4902                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4903                                                  outer_first_mpls_over_udp);
4904                 break;
4905         case MLX5_FLOW_LAYER_GRE:
4906                 out_mpls_m =
4907                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4908                                                  outer_first_mpls_over_gre);
4909                 out_mpls_v =
4910                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4911                                                  outer_first_mpls_over_gre);
4912                 break;
4913         default:
4914                 /* Inner MPLS not over GRE is not supported. */
4915                 if (!inner) {
4916                         out_mpls_m =
4917                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4918                                                          misc2_m,
4919                                                          outer_first_mpls);
4920                         out_mpls_v =
4921                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4922                                                          misc2_v,
4923                                                          outer_first_mpls);
4924                 }
4925                 break;
4926         }
4927         if (out_mpls_m && out_mpls_v) {
4928                 *out_mpls_m = *in_mpls_m;
4929                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4930         }
4931 }
4932
4933 /**
4934  * Add META item to matcher
4935  *
4936  * @param[in, out] matcher
4937  *   Flow matcher.
4938  * @param[in, out] key
4939  *   Flow matcher value.
4940  * @param[in] item
4941  *   Flow pattern to translate.
4942  * @param[in] inner
4943  *   Item is inner pattern.
4944  */
4945 static void
4946 flow_dv_translate_item_meta(void *matcher, void *key,
4947                             const struct rte_flow_item *item)
4948 {
4949         const struct rte_flow_item_meta *meta_m;
4950         const struct rte_flow_item_meta *meta_v;
4951         void *misc2_m =
4952                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4953         void *misc2_v =
4954                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4955
4956         meta_m = (const void *)item->mask;
4957         if (!meta_m)
4958                 meta_m = &rte_flow_item_meta_mask;
4959         meta_v = (const void *)item->spec;
4960         if (meta_v) {
4961                 MLX5_SET(fte_match_set_misc2, misc2_m,
4962                          metadata_reg_a, meta_m->data);
4963                 MLX5_SET(fte_match_set_misc2, misc2_v,
4964                          metadata_reg_a, meta_v->data & meta_m->data);
4965         }
4966 }
4967
4968 /**
4969  * Add vport metadata Reg C0 item to matcher
4970  *
4971  * @param[in, out] matcher
4972  *   Flow matcher.
4973  * @param[in, out] key
4974  *   Flow matcher value.
4975  * @param[in] reg
4976  *   Flow pattern to translate.
4977  */
4978 static void
4979 flow_dv_translate_item_meta_vport(void *matcher, void *key,
4980                                   uint32_t value, uint32_t mask)
4981 {
4982         void *misc2_m =
4983                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4984         void *misc2_v =
4985                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4986
4987         MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, mask);
4988         MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, value);
4989 }
4990
4991 /**
4992  * Add tag item to matcher
4993  *
4994  * @param[in, out] matcher
4995  *   Flow matcher.
4996  * @param[in, out] key
4997  *   Flow matcher value.
4998  * @param[in] item
4999  *   Flow pattern to translate.
5000  */
5001 static void
5002 flow_dv_translate_item_tag(void *matcher, void *key,
5003                            const struct rte_flow_item *item)
5004 {
5005         void *misc2_m =
5006                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
5007         void *misc2_v =
5008                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
5009         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
5010         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
5011         enum modify_reg reg = tag_v->id;
5012         rte_be32_t value = tag_v->data;
5013         rte_be32_t mask = tag_m->data;
5014
5015         switch (reg) {
5016         case REG_A:
5017                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
5018                                 rte_be_to_cpu_32(mask));
5019                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
5020                                 rte_be_to_cpu_32(value));
5021                 break;
5022         case REG_B:
5023                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b,
5024                                  rte_be_to_cpu_32(mask));
5025                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b,
5026                                 rte_be_to_cpu_32(value));
5027                 break;
5028         case REG_C_0:
5029                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0,
5030                                  rte_be_to_cpu_32(mask));
5031                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0,
5032                                 rte_be_to_cpu_32(value));
5033                 break;
5034         case REG_C_1:
5035                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1,
5036                                  rte_be_to_cpu_32(mask));
5037                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1,
5038                                 rte_be_to_cpu_32(value));
5039                 break;
5040         case REG_C_2:
5041                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2,
5042                                  rte_be_to_cpu_32(mask));
5043                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2,
5044                                 rte_be_to_cpu_32(value));
5045                 break;
5046         case REG_C_3:
5047                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3,
5048                                  rte_be_to_cpu_32(mask));
5049                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3,
5050                                 rte_be_to_cpu_32(value));
5051                 break;
5052         case REG_C_4:
5053                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4,
5054                                  rte_be_to_cpu_32(mask));
5055                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4,
5056                                 rte_be_to_cpu_32(value));
5057                 break;
5058         case REG_C_5:
5059                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5,
5060                                  rte_be_to_cpu_32(mask));
5061                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5,
5062                                 rte_be_to_cpu_32(value));
5063                 break;
5064         case REG_C_6:
5065                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6,
5066                                  rte_be_to_cpu_32(mask));
5067                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6,
5068                                 rte_be_to_cpu_32(value));
5069                 break;
5070         case REG_C_7:
5071                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7,
5072                                  rte_be_to_cpu_32(mask));
5073                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7,
5074                                 rte_be_to_cpu_32(value));
5075                 break;
5076         }
5077 }
5078
5079 /**
5080  * Add source vport match to the specified matcher.
5081  *
5082  * @param[in, out] matcher
5083  *   Flow matcher.
5084  * @param[in, out] key
5085  *   Flow matcher value.
5086  * @param[in] port
5087  *   Source vport value to match
5088  * @param[in] mask
5089  *   Mask
5090  */
5091 static void
5092 flow_dv_translate_item_source_vport(void *matcher, void *key,
5093                                     int16_t port, uint16_t mask)
5094 {
5095         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5096         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5097
5098         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
5099         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
5100 }
5101
5102 /**
5103  * Translate port-id item to eswitch match on  port-id.
5104  *
5105  * @param[in] dev
5106  *   The devich to configure through.
5107  * @param[in, out] matcher
5108  *   Flow matcher.
5109  * @param[in, out] key
5110  *   Flow matcher value.
5111  * @param[in] item
5112  *   Flow pattern to translate.
5113  *
5114  * @return
5115  *   0 on success, a negative errno value otherwise.
5116  */
5117 static int
5118 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
5119                                void *key, const struct rte_flow_item *item)
5120 {
5121         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
5122         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
5123         struct mlx5_priv *priv;
5124         uint16_t mask, id;
5125
5126         mask = pid_m ? pid_m->id : 0xffff;
5127         id = pid_v ? pid_v->id : dev->data->port_id;
5128         priv = mlx5_port_to_eswitch_info(id);
5129         if (!priv)
5130                 return -rte_errno;
5131         /* Translate to vport field or to metadata, depending on mode. */
5132         if (priv->vport_meta_mask)
5133                 flow_dv_translate_item_meta_vport(matcher, key,
5134                                                   priv->vport_meta_tag,
5135                                                   priv->vport_meta_mask);
5136         else
5137                 flow_dv_translate_item_source_vport(matcher, key,
5138                                                     priv->vport_id, mask);
5139         return 0;
5140 }
5141
5142 /**
5143  * Add ICMP6 item to matcher and to the value.
5144  *
5145  * @param[in, out] matcher
5146  *   Flow matcher.
5147  * @param[in, out] key
5148  *   Flow matcher value.
5149  * @param[in] item
5150  *   Flow pattern to translate.
5151  * @param[in] inner
5152  *   Item is inner pattern.
5153  */
5154 static void
5155 flow_dv_translate_item_icmp6(void *matcher, void *key,
5156                               const struct rte_flow_item *item,
5157                               int inner)
5158 {
5159         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
5160         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
5161         void *headers_m;
5162         void *headers_v;
5163         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5164                                      misc_parameters_3);
5165         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5166         if (inner) {
5167                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5168                                          inner_headers);
5169                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5170         } else {
5171                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5172                                          outer_headers);
5173                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5174         }
5175         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5176         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
5177         if (!icmp6_v)
5178                 return;
5179         if (!icmp6_m)
5180                 icmp6_m = &rte_flow_item_icmp6_mask;
5181         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
5182         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
5183                  icmp6_v->type & icmp6_m->type);
5184         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
5185         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
5186                  icmp6_v->code & icmp6_m->code);
5187 }
5188
5189 /**
5190  * Add ICMP item to matcher and to the value.
5191  *
5192  * @param[in, out] matcher
5193  *   Flow matcher.
5194  * @param[in, out] key
5195  *   Flow matcher value.
5196  * @param[in] item
5197  *   Flow pattern to translate.
5198  * @param[in] inner
5199  *   Item is inner pattern.
5200  */
5201 static void
5202 flow_dv_translate_item_icmp(void *matcher, void *key,
5203                             const struct rte_flow_item *item,
5204                             int inner)
5205 {
5206         const struct rte_flow_item_icmp *icmp_m = item->mask;
5207         const struct rte_flow_item_icmp *icmp_v = item->spec;
5208         void *headers_m;
5209         void *headers_v;
5210         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5211                                      misc_parameters_3);
5212         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5213         if (inner) {
5214                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5215                                          inner_headers);
5216                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5217         } else {
5218                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5219                                          outer_headers);
5220                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5221         }
5222         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5223         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
5224         if (!icmp_v)
5225                 return;
5226         if (!icmp_m)
5227                 icmp_m = &rte_flow_item_icmp_mask;
5228         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
5229                  icmp_m->hdr.icmp_type);
5230         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
5231                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
5232         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
5233                  icmp_m->hdr.icmp_code);
5234         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
5235                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
5236 }
5237
5238 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
5239
5240 #define HEADER_IS_ZERO(match_criteria, headers)                              \
5241         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
5242                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
5243
5244 /**
5245  * Calculate flow matcher enable bitmap.
5246  *
5247  * @param match_criteria
5248  *   Pointer to flow matcher criteria.
5249  *
5250  * @return
5251  *   Bitmap of enabled fields.
5252  */
5253 static uint8_t
5254 flow_dv_matcher_enable(uint32_t *match_criteria)
5255 {
5256         uint8_t match_criteria_enable;
5257
5258         match_criteria_enable =
5259                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
5260                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
5261         match_criteria_enable |=
5262                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
5263                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
5264         match_criteria_enable |=
5265                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
5266                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
5267         match_criteria_enable |=
5268                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
5269                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
5270         match_criteria_enable |=
5271                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
5272                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
5273         return match_criteria_enable;
5274 }
5275
5276
5277 /**
5278  * Get a flow table.
5279  *
5280  * @param dev[in, out]
5281  *   Pointer to rte_eth_dev structure.
5282  * @param[in] table_id
5283  *   Table id to use.
5284  * @param[in] egress
5285  *   Direction of the table.
5286  * @param[in] transfer
5287  *   E-Switch or NIC flow.
5288  * @param[out] error
5289  *   pointer to error structure.
5290  *
5291  * @return
5292  *   Returns tables resource based on the index, NULL in case of failed.
5293  */
5294 static struct mlx5_flow_tbl_resource *
5295 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
5296                          uint32_t table_id, uint8_t egress,
5297                          uint8_t transfer,
5298                          struct rte_flow_error *error)
5299 {
5300         struct mlx5_priv *priv = dev->data->dev_private;
5301         struct mlx5_ibv_shared *sh = priv->sh;
5302         struct mlx5_flow_tbl_resource *tbl;
5303
5304 #ifdef HAVE_MLX5DV_DR
5305         if (transfer) {
5306                 tbl = &sh->fdb_tbl[table_id];
5307                 if (!tbl->obj)
5308                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5309                                 (sh->fdb_domain, table_id);
5310         } else if (egress) {
5311                 tbl = &sh->tx_tbl[table_id];
5312                 if (!tbl->obj)
5313                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5314                                 (sh->tx_domain, table_id);
5315         } else {
5316                 tbl = &sh->rx_tbl[table_id];
5317                 if (!tbl->obj)
5318                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5319                                 (sh->rx_domain, table_id);
5320         }
5321         if (!tbl->obj) {
5322                 rte_flow_error_set(error, ENOMEM,
5323                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5324                                    NULL, "cannot create table");
5325                 return NULL;
5326         }
5327         rte_atomic32_inc(&tbl->refcnt);
5328         return tbl;
5329 #else
5330         (void)error;
5331         (void)tbl;
5332         if (transfer)
5333                 return &sh->fdb_tbl[table_id];
5334         else if (egress)
5335                 return &sh->tx_tbl[table_id];
5336         else
5337                 return &sh->rx_tbl[table_id];
5338 #endif
5339 }
5340
5341 /**
5342  * Release a flow table.
5343  *
5344  * @param[in] tbl
5345  *   Table resource to be released.
5346  *
5347  * @return
5348  *   Returns 0 if table was released, else return 1;
5349  */
5350 static int
5351 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
5352 {
5353         if (!tbl)
5354                 return 0;
5355         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
5356                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
5357                 tbl->obj = NULL;
5358                 return 0;
5359         }
5360         return 1;
5361 }
5362
5363 /**
5364  * Register the flow matcher.
5365  *
5366  * @param dev[in, out]
5367  *   Pointer to rte_eth_dev structure.
5368  * @param[in, out] matcher
5369  *   Pointer to flow matcher.
5370  * @parm[in, out] dev_flow
5371  *   Pointer to the dev_flow.
5372  * @param[out] error
5373  *   pointer to error structure.
5374  *
5375  * @return
5376  *   0 on success otherwise -errno and errno is set.
5377  */
5378 static int
5379 flow_dv_matcher_register(struct rte_eth_dev *dev,
5380                          struct mlx5_flow_dv_matcher *matcher,
5381                          struct mlx5_flow *dev_flow,
5382                          struct rte_flow_error *error)
5383 {
5384         struct mlx5_priv *priv = dev->data->dev_private;
5385         struct mlx5_ibv_shared *sh = priv->sh;
5386         struct mlx5_flow_dv_matcher *cache_matcher;
5387         struct mlx5dv_flow_matcher_attr dv_attr = {
5388                 .type = IBV_FLOW_ATTR_NORMAL,
5389                 .match_mask = (void *)&matcher->mask,
5390         };
5391         struct mlx5_flow_tbl_resource *tbl = NULL;
5392
5393         /* Lookup from cache. */
5394         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
5395                 if (matcher->crc == cache_matcher->crc &&
5396                     matcher->priority == cache_matcher->priority &&
5397                     matcher->egress == cache_matcher->egress &&
5398                     matcher->group == cache_matcher->group &&
5399                     matcher->transfer == cache_matcher->transfer &&
5400                     !memcmp((const void *)matcher->mask.buf,
5401                             (const void *)cache_matcher->mask.buf,
5402                             cache_matcher->mask.size)) {
5403                         DRV_LOG(DEBUG,
5404                                 "priority %hd use %s matcher %p: refcnt %d++",
5405                                 cache_matcher->priority,
5406                                 cache_matcher->egress ? "tx" : "rx",
5407                                 (void *)cache_matcher,
5408                                 rte_atomic32_read(&cache_matcher->refcnt));
5409                         rte_atomic32_inc(&cache_matcher->refcnt);
5410                         dev_flow->dv.matcher = cache_matcher;
5411                         return 0;
5412                 }
5413         }
5414         /* Register new matcher. */
5415         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
5416         if (!cache_matcher)
5417                 return rte_flow_error_set(error, ENOMEM,
5418                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5419                                           "cannot allocate matcher memory");
5420         tbl = flow_dv_tbl_resource_get(dev, matcher->group,
5421                                        matcher->egress, matcher->transfer,
5422                                        error);
5423         if (!tbl) {
5424                 rte_free(cache_matcher);
5425                 return rte_flow_error_set(error, ENOMEM,
5426                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5427                                           NULL, "cannot create table");
5428         }
5429         *cache_matcher = *matcher;
5430         dv_attr.match_criteria_enable =
5431                 flow_dv_matcher_enable(cache_matcher->mask.buf);
5432         dv_attr.priority = matcher->priority;
5433         if (matcher->egress)
5434                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
5435         cache_matcher->matcher_object =
5436                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
5437         if (!cache_matcher->matcher_object) {
5438                 rte_free(cache_matcher);
5439 #ifdef HAVE_MLX5DV_DR
5440                 flow_dv_tbl_resource_release(tbl);
5441 #endif
5442                 return rte_flow_error_set(error, ENOMEM,
5443                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5444                                           NULL, "cannot create matcher");
5445         }
5446         rte_atomic32_inc(&cache_matcher->refcnt);
5447         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
5448         dev_flow->dv.matcher = cache_matcher;
5449         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
5450                 cache_matcher->priority,
5451                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
5452                 rte_atomic32_read(&cache_matcher->refcnt));
5453         rte_atomic32_inc(&tbl->refcnt);
5454         return 0;
5455 }
5456
5457 /**
5458  * Find existing tag resource or create and register a new one.
5459  *
5460  * @param dev[in, out]
5461  *   Pointer to rte_eth_dev structure.
5462  * @param[in, out] resource
5463  *   Pointer to tag resource.
5464  * @parm[in, out] dev_flow
5465  *   Pointer to the dev_flow.
5466  * @param[out] error
5467  *   pointer to error structure.
5468  *
5469  * @return
5470  *   0 on success otherwise -errno and errno is set.
5471  */
5472 static int
5473 flow_dv_tag_resource_register
5474                         (struct rte_eth_dev *dev,
5475                          struct mlx5_flow_dv_tag_resource *resource,
5476                          struct mlx5_flow *dev_flow,
5477                          struct rte_flow_error *error)
5478 {
5479         struct mlx5_priv *priv = dev->data->dev_private;
5480         struct mlx5_ibv_shared *sh = priv->sh;
5481         struct mlx5_flow_dv_tag_resource *cache_resource;
5482
5483         /* Lookup a matching resource from cache. */
5484         LIST_FOREACH(cache_resource, &sh->tags, next) {
5485                 if (resource->tag == cache_resource->tag) {
5486                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
5487                                 (void *)cache_resource,
5488                                 rte_atomic32_read(&cache_resource->refcnt));
5489                         rte_atomic32_inc(&cache_resource->refcnt);
5490                         dev_flow->flow->tag_resource = cache_resource;
5491                         return 0;
5492                 }
5493         }
5494         /* Register new  resource. */
5495         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
5496         if (!cache_resource)
5497                 return rte_flow_error_set(error, ENOMEM,
5498                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5499                                           "cannot allocate resource memory");
5500         *cache_resource = *resource;
5501         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
5502                 (resource->tag);
5503         if (!cache_resource->action) {
5504                 rte_free(cache_resource);
5505                 return rte_flow_error_set(error, ENOMEM,
5506                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5507                                           NULL, "cannot create action");
5508         }
5509         rte_atomic32_init(&cache_resource->refcnt);
5510         rte_atomic32_inc(&cache_resource->refcnt);
5511         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
5512         dev_flow->flow->tag_resource = cache_resource;
5513         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
5514                 (void *)cache_resource,
5515                 rte_atomic32_read(&cache_resource->refcnt));
5516         return 0;
5517 }
5518
5519 /**
5520  * Release the tag.
5521  *
5522  * @param dev
5523  *   Pointer to Ethernet device.
5524  * @param flow
5525  *   Pointer to mlx5_flow.
5526  *
5527  * @return
5528  *   1 while a reference on it exists, 0 when freed.
5529  */
5530 static int
5531 flow_dv_tag_release(struct rte_eth_dev *dev,
5532                     struct mlx5_flow_dv_tag_resource *tag)
5533 {
5534         assert(tag);
5535         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
5536                 dev->data->port_id, (void *)tag,
5537                 rte_atomic32_read(&tag->refcnt));
5538         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
5539                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
5540                 LIST_REMOVE(tag, next);
5541                 DRV_LOG(DEBUG, "port %u tag %p: removed",
5542                         dev->data->port_id, (void *)tag);
5543                 rte_free(tag);
5544                 return 0;
5545         }
5546         return 1;
5547 }
5548
5549 /**
5550  * Translate port ID action to vport.
5551  *
5552  * @param[in] dev
5553  *   Pointer to rte_eth_dev structure.
5554  * @param[in] action
5555  *   Pointer to the port ID action.
5556  * @param[out] dst_port_id
5557  *   The target port ID.
5558  * @param[out] error
5559  *   Pointer to the error structure.
5560  *
5561  * @return
5562  *   0 on success, a negative errno value otherwise and rte_errno is set.
5563  */
5564 static int
5565 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
5566                                  const struct rte_flow_action *action,
5567                                  uint32_t *dst_port_id,
5568                                  struct rte_flow_error *error)
5569 {
5570         uint32_t port;
5571         struct mlx5_priv *priv;
5572         const struct rte_flow_action_port_id *conf =
5573                         (const struct rte_flow_action_port_id *)action->conf;
5574
5575         port = conf->original ? dev->data->port_id : conf->id;
5576         priv = mlx5_port_to_eswitch_info(port);
5577         if (!priv)
5578                 return rte_flow_error_set(error, -rte_errno,
5579                                           RTE_FLOW_ERROR_TYPE_ACTION,
5580                                           NULL,
5581                                           "No eswitch info was found for port");
5582         if (priv->vport_meta_mask)
5583                 *dst_port_id = priv->vport_meta_tag;
5584         else
5585                 *dst_port_id = priv->vport_id;
5586         return 0;
5587 }
5588
5589 /**
5590  * Add Tx queue matcher
5591  *
5592  * @param[in] dev
5593  *   Pointer to the dev struct.
5594  * @param[in, out] matcher
5595  *   Flow matcher.
5596  * @param[in, out] key
5597  *   Flow matcher value.
5598  * @param[in] item
5599  *   Flow pattern to translate.
5600  * @param[in] inner
5601  *   Item is inner pattern.
5602  */
5603 static void
5604 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
5605                                 void *matcher, void *key,
5606                                 const struct rte_flow_item *item)
5607 {
5608         const struct mlx5_rte_flow_item_tx_queue *queue_m;
5609         const struct mlx5_rte_flow_item_tx_queue *queue_v;
5610         void *misc_m =
5611                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5612         void *misc_v =
5613                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5614         struct mlx5_txq_ctrl *txq;
5615         uint32_t queue;
5616
5617
5618         queue_m = (const void *)item->mask;
5619         if (!queue_m)
5620                 return;
5621         queue_v = (const void *)item->spec;
5622         if (!queue_v)
5623                 return;
5624         txq = mlx5_txq_get(dev, queue_v->queue);
5625         if (!txq)
5626                 return;
5627         queue = txq->obj->sq->id;
5628         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
5629         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
5630                  queue & queue_m->queue);
5631         mlx5_txq_release(dev, queue_v->queue);
5632 }
5633
5634 /**
5635  * Fill the flow with DV spec.
5636  *
5637  * @param[in] dev
5638  *   Pointer to rte_eth_dev structure.
5639  * @param[in, out] dev_flow
5640  *   Pointer to the sub flow.
5641  * @param[in] attr
5642  *   Pointer to the flow attributes.
5643  * @param[in] items
5644  *   Pointer to the list of items.
5645  * @param[in] actions
5646  *   Pointer to the list of actions.
5647  * @param[out] error
5648  *   Pointer to the error structure.
5649  *
5650  * @return
5651  *   0 on success, a negative errno value otherwise and rte_errno is set.
5652  */
5653 static int
5654 flow_dv_translate(struct rte_eth_dev *dev,
5655                   struct mlx5_flow *dev_flow,
5656                   const struct rte_flow_attr *attr,
5657                   const struct rte_flow_item items[],
5658                   const struct rte_flow_action actions[],
5659                   struct rte_flow_error *error)
5660 {
5661         struct mlx5_priv *priv = dev->data->dev_private;
5662         struct rte_flow *flow = dev_flow->flow;
5663         uint64_t item_flags = 0;
5664         uint64_t last_item = 0;
5665         uint64_t action_flags = 0;
5666         uint64_t priority = attr->priority;
5667         struct mlx5_flow_dv_matcher matcher = {
5668                 .mask = {
5669                         .size = sizeof(matcher.mask.buf),
5670                 },
5671         };
5672         int actions_n = 0;
5673         bool actions_end = false;
5674         struct mlx5_flow_dv_modify_hdr_resource res = {
5675                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
5676                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
5677         };
5678         union flow_dv_attr flow_attr = { .attr = 0 };
5679         struct mlx5_flow_dv_tag_resource tag_resource;
5680         uint32_t modify_action_position = UINT32_MAX;
5681         void *match_mask = matcher.mask.buf;
5682         void *match_value = dev_flow->dv.value.buf;
5683         uint8_t next_protocol = 0xff;
5684         struct rte_vlan_hdr vlan = { 0 };
5685         uint32_t table;
5686         int ret = 0;
5687
5688         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
5689                                        &table, error);
5690         if (ret)
5691                 return ret;
5692         flow->group = table;
5693         if (attr->transfer)
5694                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
5695         if (priority == MLX5_FLOW_PRIO_RSVD)
5696                 priority = priv->config.flow_prio - 1;
5697         for (; !actions_end ; actions++) {
5698                 const struct rte_flow_action_queue *queue;
5699                 const struct rte_flow_action_rss *rss;
5700                 const struct rte_flow_action *action = actions;
5701                 const struct rte_flow_action_count *count = action->conf;
5702                 const uint8_t *rss_key;
5703                 const struct rte_flow_action_jump *jump_data;
5704                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
5705                 struct mlx5_flow_tbl_resource *tbl;
5706                 uint32_t port_id = 0;
5707                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
5708                 int action_type = actions->type;
5709                 const struct rte_flow_action *found_action = NULL;
5710
5711                 switch (action_type) {
5712                 case RTE_FLOW_ACTION_TYPE_VOID:
5713                         break;
5714                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5715                         if (flow_dv_translate_action_port_id(dev, action,
5716                                                              &port_id, error))
5717                                 return -rte_errno;
5718                         port_id_resource.port_id = port_id;
5719                         if (flow_dv_port_id_action_resource_register
5720                             (dev, &port_id_resource, dev_flow, error))
5721                                 return -rte_errno;
5722                         dev_flow->dv.actions[actions_n++] =
5723                                 dev_flow->dv.port_id_action->action;
5724                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5725                         break;
5726                 case RTE_FLOW_ACTION_TYPE_FLAG:
5727                         tag_resource.tag =
5728                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
5729                         if (!flow->tag_resource)
5730                                 if (flow_dv_tag_resource_register
5731                                     (dev, &tag_resource, dev_flow, error))
5732                                         return errno;
5733                         dev_flow->dv.actions[actions_n++] =
5734                                 flow->tag_resource->action;
5735                         action_flags |= MLX5_FLOW_ACTION_FLAG;
5736                         break;
5737                 case RTE_FLOW_ACTION_TYPE_MARK:
5738                         tag_resource.tag = mlx5_flow_mark_set
5739                               (((const struct rte_flow_action_mark *)
5740                                (actions->conf))->id);
5741                         if (!flow->tag_resource)
5742                                 if (flow_dv_tag_resource_register
5743                                     (dev, &tag_resource, dev_flow, error))
5744                                         return errno;
5745                         dev_flow->dv.actions[actions_n++] =
5746                                 flow->tag_resource->action;
5747                         action_flags |= MLX5_FLOW_ACTION_MARK;
5748                         break;
5749                 case RTE_FLOW_ACTION_TYPE_DROP:
5750                         action_flags |= MLX5_FLOW_ACTION_DROP;
5751                         break;
5752                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5753                         queue = actions->conf;
5754                         flow->rss.queue_num = 1;
5755                         (*flow->queue)[0] = queue->index;
5756                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5757                         break;
5758                 case RTE_FLOW_ACTION_TYPE_RSS:
5759                         rss = actions->conf;
5760                         if (flow->queue)
5761                                 memcpy((*flow->queue), rss->queue,
5762                                        rss->queue_num * sizeof(uint16_t));
5763                         flow->rss.queue_num = rss->queue_num;
5764                         /* NULL RSS key indicates default RSS key. */
5765                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
5766                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
5767                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
5768                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
5769                         flow->rss.level = rss->level;
5770                         action_flags |= MLX5_FLOW_ACTION_RSS;
5771                         break;
5772                 case RTE_FLOW_ACTION_TYPE_COUNT:
5773                         if (!priv->config.devx) {
5774                                 rte_errno = ENOTSUP;
5775                                 goto cnt_err;
5776                         }
5777                         flow->counter = flow_dv_counter_alloc(dev,
5778                                                               count->shared,
5779                                                               count->id,
5780                                                               flow->group);
5781                         if (flow->counter == NULL)
5782                                 goto cnt_err;
5783                         dev_flow->dv.actions[actions_n++] =
5784                                 flow->counter->action;
5785                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5786                         break;
5787 cnt_err:
5788                         if (rte_errno == ENOTSUP)
5789                                 return rte_flow_error_set
5790                                               (error, ENOTSUP,
5791                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5792                                                NULL,
5793                                                "count action not supported");
5794                         else
5795                                 return rte_flow_error_set
5796                                                 (error, rte_errno,
5797                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5798                                                  action,
5799                                                  "cannot create counter"
5800                                                   " object.");
5801                         break;
5802                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5803                         dev_flow->dv.actions[actions_n++] =
5804                                                 priv->sh->pop_vlan_action;
5805                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5806                         break;
5807                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5808                         flow_dev_get_vlan_info_from_items(items, &vlan);
5809                         vlan.eth_proto = rte_be_to_cpu_16
5810                              ((((const struct rte_flow_action_of_push_vlan *)
5811                                                    actions->conf)->ethertype));
5812                         found_action = mlx5_flow_find_action
5813                                         (actions + 1,
5814                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
5815                         if (found_action)
5816                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5817                         found_action = mlx5_flow_find_action
5818                                         (actions + 1,
5819                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
5820                         if (found_action)
5821                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5822                         if (flow_dv_create_action_push_vlan
5823                                             (dev, attr, &vlan, dev_flow, error))
5824                                 return -rte_errno;
5825                         dev_flow->dv.actions[actions_n++] =
5826                                            dev_flow->dv.push_vlan_res->action;
5827                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5828                         break;
5829                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5830                         /* of_vlan_push action handled this action */
5831                         assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
5832                         break;
5833                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5834                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
5835                                 break;
5836                         flow_dev_get_vlan_info_from_items(items, &vlan);
5837                         mlx5_update_vlan_vid_pcp(actions, &vlan);
5838                         /* If no VLAN push - this is a modify header action */
5839                         if (flow_dv_convert_action_modify_vlan_vid
5840                                                         (&res, actions, error))
5841                                 return -rte_errno;
5842                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5843                         break;
5844                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5845                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5846                         if (flow_dv_create_action_l2_encap(dev, actions,
5847                                                            dev_flow,
5848                                                            attr->transfer,
5849                                                            error))
5850                                 return -rte_errno;
5851                         dev_flow->dv.actions[actions_n++] =
5852                                 dev_flow->dv.encap_decap->verbs_action;
5853                         action_flags |= actions->type ==
5854                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
5855                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
5856                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
5857                         break;
5858                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5859                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5860                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
5861                                                            attr->transfer,
5862                                                            error))
5863                                 return -rte_errno;
5864                         dev_flow->dv.actions[actions_n++] =
5865                                 dev_flow->dv.encap_decap->verbs_action;
5866                         action_flags |= actions->type ==
5867                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
5868                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
5869                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
5870                         break;
5871                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5872                         /* Handle encap with preceding decap. */
5873                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
5874                                 if (flow_dv_create_action_raw_encap
5875                                         (dev, actions, dev_flow, attr, error))
5876                                         return -rte_errno;
5877                                 dev_flow->dv.actions[actions_n++] =
5878                                         dev_flow->dv.encap_decap->verbs_action;
5879                         } else {
5880                                 /* Handle encap without preceding decap. */
5881                                 if (flow_dv_create_action_l2_encap
5882                                     (dev, actions, dev_flow, attr->transfer,
5883                                      error))
5884                                         return -rte_errno;
5885                                 dev_flow->dv.actions[actions_n++] =
5886                                         dev_flow->dv.encap_decap->verbs_action;
5887                         }
5888                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
5889                         break;
5890                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5891                         /* Check if this decap is followed by encap. */
5892                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
5893                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
5894                                action++) {
5895                         }
5896                         /* Handle decap only if it isn't followed by encap. */
5897                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5898                                 if (flow_dv_create_action_l2_decap
5899                                     (dev, dev_flow, attr->transfer, error))
5900                                         return -rte_errno;
5901                                 dev_flow->dv.actions[actions_n++] =
5902                                         dev_flow->dv.encap_decap->verbs_action;
5903                         }
5904                         /* If decap is followed by encap, handle it at encap. */
5905                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
5906                         break;
5907                 case RTE_FLOW_ACTION_TYPE_JUMP:
5908                         jump_data = action->conf;
5909                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
5910                                                        jump_data->group, &table,
5911                                                        error);
5912                         if (ret)
5913                                 return ret;
5914                         tbl = flow_dv_tbl_resource_get(dev, table,
5915                                                        attr->egress,
5916                                                        attr->transfer, error);
5917                         if (!tbl)
5918                                 return rte_flow_error_set
5919                                                 (error, errno,
5920                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5921                                                  NULL,
5922                                                  "cannot create jump action.");
5923                         jump_tbl_resource.tbl = tbl;
5924                         if (flow_dv_jump_tbl_resource_register
5925                             (dev, &jump_tbl_resource, dev_flow, error)) {
5926                                 flow_dv_tbl_resource_release(tbl);
5927                                 return rte_flow_error_set
5928                                                 (error, errno,
5929                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5930                                                  NULL,
5931                                                  "cannot create jump action.");
5932                         }
5933                         dev_flow->dv.actions[actions_n++] =
5934                                 dev_flow->dv.jump->action;
5935                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5936                         break;
5937                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5938                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5939                         if (flow_dv_convert_action_modify_mac(&res, actions,
5940                                                               error))
5941                                 return -rte_errno;
5942                         action_flags |= actions->type ==
5943                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5944                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
5945                                         MLX5_FLOW_ACTION_SET_MAC_DST;
5946                         break;
5947                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5948                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5949                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
5950                                                                error))
5951                                 return -rte_errno;
5952                         action_flags |= actions->type ==
5953                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5954                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
5955                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
5956                         break;
5957                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5958                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5959                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
5960                                                                error))
5961                                 return -rte_errno;
5962                         action_flags |= actions->type ==
5963                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5964                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
5965                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
5966                         break;
5967                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5968                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5969                         if (flow_dv_convert_action_modify_tp(&res, actions,
5970                                                              items, &flow_attr,
5971                                                              error))
5972                                 return -rte_errno;
5973                         action_flags |= actions->type ==
5974                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5975                                         MLX5_FLOW_ACTION_SET_TP_SRC :
5976                                         MLX5_FLOW_ACTION_SET_TP_DST;
5977                         break;
5978                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5979                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
5980                                                                   &flow_attr,
5981                                                                   error))
5982                                 return -rte_errno;
5983                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
5984                         break;
5985                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5986                         if (flow_dv_convert_action_modify_ttl(&res, actions,
5987                                                              items, &flow_attr,
5988                                                              error))
5989                                 return -rte_errno;
5990                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
5991                         break;
5992                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5993                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5994                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
5995                                                                   error))
5996                                 return -rte_errno;
5997                         action_flags |= actions->type ==
5998                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5999                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
6000                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
6001                         break;
6002
6003                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
6004                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
6005                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
6006                                                                   error))
6007                                 return -rte_errno;
6008                         action_flags |= actions->type ==
6009                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
6010                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
6011                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
6012                         break;
6013                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
6014                         if (flow_dv_convert_action_set_reg(&res, actions,
6015                                                            error))
6016                                 return -rte_errno;
6017                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6018                         break;
6019                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
6020                         if (flow_dv_convert_action_copy_mreg(dev, &res,
6021                                                              actions, error))
6022                                 return -rte_errno;
6023                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6024                         break;
6025                 case RTE_FLOW_ACTION_TYPE_END:
6026                         actions_end = true;
6027                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
6028                                 /* create modify action if needed. */
6029                                 if (flow_dv_modify_hdr_resource_register
6030                                                                 (dev, &res,
6031                                                                  dev_flow,
6032                                                                  error))
6033                                         return -rte_errno;
6034                                 dev_flow->dv.actions[modify_action_position] =
6035                                         dev_flow->dv.modify_hdr->verbs_action;
6036                         }
6037                         break;
6038                 default:
6039                         break;
6040                 }
6041                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
6042                     modify_action_position == UINT32_MAX)
6043                         modify_action_position = actions_n++;
6044         }
6045         dev_flow->dv.actions_n = actions_n;
6046         dev_flow->actions = action_flags;
6047         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6048                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6049                 int item_type = items->type;
6050
6051                 switch (item_type) {
6052                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6053                         flow_dv_translate_item_port_id(dev, match_mask,
6054                                                        match_value, items);
6055                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6056                         break;
6057                 case RTE_FLOW_ITEM_TYPE_ETH:
6058                         flow_dv_translate_item_eth(match_mask, match_value,
6059                                                    items, tunnel);
6060                         matcher.priority = MLX5_PRIORITY_MAP_L2;
6061                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6062                                              MLX5_FLOW_LAYER_OUTER_L2;
6063                         break;
6064                 case RTE_FLOW_ITEM_TYPE_VLAN:
6065                         flow_dv_translate_item_vlan(dev_flow,
6066                                                     match_mask, match_value,
6067                                                     items, tunnel);
6068                         matcher.priority = MLX5_PRIORITY_MAP_L2;
6069                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
6070                                               MLX5_FLOW_LAYER_INNER_VLAN) :
6071                                              (MLX5_FLOW_LAYER_OUTER_L2 |
6072                                               MLX5_FLOW_LAYER_OUTER_VLAN);
6073                         break;
6074                 case RTE_FLOW_ITEM_TYPE_IPV4:
6075                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6076                                                   &item_flags, &tunnel);
6077                         flow_dv_translate_item_ipv4(match_mask, match_value,
6078                                                     items, tunnel, flow->group);
6079                         matcher.priority = MLX5_PRIORITY_MAP_L3;
6080                         dev_flow->dv.hash_fields |=
6081                                 mlx5_flow_hashfields_adjust
6082                                         (dev_flow, tunnel,
6083                                          MLX5_IPV4_LAYER_TYPES,
6084                                          MLX5_IPV4_IBV_RX_HASH);
6085                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6086                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6087                         if (items->mask != NULL &&
6088                             ((const struct rte_flow_item_ipv4 *)
6089                              items->mask)->hdr.next_proto_id) {
6090                                 next_protocol =
6091                                         ((const struct rte_flow_item_ipv4 *)
6092                                          (items->spec))->hdr.next_proto_id;
6093                                 next_protocol &=
6094                                         ((const struct rte_flow_item_ipv4 *)
6095                                          (items->mask))->hdr.next_proto_id;
6096                         } else {
6097                                 /* Reset for inner layer. */
6098                                 next_protocol = 0xff;
6099                         }
6100                         break;
6101                 case RTE_FLOW_ITEM_TYPE_IPV6:
6102                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6103                                                   &item_flags, &tunnel);
6104                         flow_dv_translate_item_ipv6(match_mask, match_value,
6105                                                     items, tunnel, flow->group);
6106                         matcher.priority = MLX5_PRIORITY_MAP_L3;
6107                         dev_flow->dv.hash_fields |=
6108                                 mlx5_flow_hashfields_adjust
6109                                         (dev_flow, tunnel,
6110                                          MLX5_IPV6_LAYER_TYPES,
6111                                          MLX5_IPV6_IBV_RX_HASH);
6112                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6113                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6114                         if (items->mask != NULL &&
6115                             ((const struct rte_flow_item_ipv6 *)
6116                              items->mask)->hdr.proto) {
6117                                 next_protocol =
6118                                         ((const struct rte_flow_item_ipv6 *)
6119                                          items->spec)->hdr.proto;
6120                                 next_protocol &=
6121                                         ((const struct rte_flow_item_ipv6 *)
6122                                          items->mask)->hdr.proto;
6123                         } else {
6124                                 /* Reset for inner layer. */
6125                                 next_protocol = 0xff;
6126                         }
6127                         break;
6128                 case RTE_FLOW_ITEM_TYPE_TCP:
6129                         flow_dv_translate_item_tcp(match_mask, match_value,
6130                                                    items, tunnel);
6131                         matcher.priority = MLX5_PRIORITY_MAP_L4;
6132                         dev_flow->dv.hash_fields |=
6133                                 mlx5_flow_hashfields_adjust
6134                                         (dev_flow, tunnel, ETH_RSS_TCP,
6135                                          IBV_RX_HASH_SRC_PORT_TCP |
6136                                          IBV_RX_HASH_DST_PORT_TCP);
6137                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6138                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6139                         break;
6140                 case RTE_FLOW_ITEM_TYPE_UDP:
6141                         flow_dv_translate_item_udp(match_mask, match_value,
6142                                                    items, tunnel);
6143                         matcher.priority = MLX5_PRIORITY_MAP_L4;
6144                         dev_flow->dv.hash_fields |=
6145                                 mlx5_flow_hashfields_adjust
6146                                         (dev_flow, tunnel, ETH_RSS_UDP,
6147                                          IBV_RX_HASH_SRC_PORT_UDP |
6148                                          IBV_RX_HASH_DST_PORT_UDP);
6149                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6150                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6151                         break;
6152                 case RTE_FLOW_ITEM_TYPE_GRE:
6153                         flow_dv_translate_item_gre(match_mask, match_value,
6154                                                    items, tunnel);
6155                         last_item = MLX5_FLOW_LAYER_GRE;
6156                         break;
6157                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6158                         flow_dv_translate_item_gre_key(match_mask,
6159                                                        match_value, items);
6160                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6161                         break;
6162                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6163                         flow_dv_translate_item_nvgre(match_mask, match_value,
6164                                                      items, tunnel);
6165                         last_item = MLX5_FLOW_LAYER_GRE;
6166                         break;
6167                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6168                         flow_dv_translate_item_vxlan(match_mask, match_value,
6169                                                      items, tunnel);
6170                         last_item = MLX5_FLOW_LAYER_VXLAN;
6171                         break;
6172                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6173                         flow_dv_translate_item_vxlan(match_mask, match_value,
6174                                                      items, tunnel);
6175                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6176                         break;
6177                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6178                         flow_dv_translate_item_geneve(match_mask, match_value,
6179                                                       items, tunnel);
6180                         last_item = MLX5_FLOW_LAYER_GENEVE;
6181                         break;
6182                 case RTE_FLOW_ITEM_TYPE_MPLS:
6183                         flow_dv_translate_item_mpls(match_mask, match_value,
6184                                                     items, last_item, tunnel);
6185                         last_item = MLX5_FLOW_LAYER_MPLS;
6186                         break;
6187                 case RTE_FLOW_ITEM_TYPE_META:
6188                         flow_dv_translate_item_meta(match_mask, match_value,
6189                                                     items);
6190                         last_item = MLX5_FLOW_ITEM_METADATA;
6191                         break;
6192                 case RTE_FLOW_ITEM_TYPE_ICMP:
6193                         flow_dv_translate_item_icmp(match_mask, match_value,
6194                                                     items, tunnel);
6195                         last_item = MLX5_FLOW_LAYER_ICMP;
6196                         break;
6197                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6198                         flow_dv_translate_item_icmp6(match_mask, match_value,
6199                                                       items, tunnel);
6200                         last_item = MLX5_FLOW_LAYER_ICMP6;
6201                         break;
6202                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6203                         flow_dv_translate_item_tag(match_mask, match_value,
6204                                                    items);
6205                         last_item = MLX5_FLOW_ITEM_TAG;
6206                         break;
6207                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6208                         flow_dv_translate_item_tx_queue(dev, match_mask,
6209                                                         match_value,
6210                                                         items);
6211                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
6212                         break;
6213                 default:
6214                         break;
6215                 }
6216                 item_flags |= last_item;
6217         }
6218         /*
6219          * In case of ingress traffic when E-Switch mode is enabled,
6220          * we have two cases where we need to set the source port manually.
6221          * The first one, is in case of Nic steering rule, and the second is
6222          * E-Switch rule where no port_id item was found. In both cases
6223          * the source port is set according the current port in use.
6224          */
6225         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
6226             (priv->representor || priv->master)) {
6227                 if (flow_dv_translate_item_port_id(dev, match_mask,
6228                                                    match_value, NULL))
6229                         return -rte_errno;
6230         }
6231         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
6232                                          dev_flow->dv.value.buf));
6233         dev_flow->layers = item_flags;
6234         /* Register matcher. */
6235         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
6236                                     matcher.mask.size);
6237         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
6238                                                      matcher.priority);
6239         matcher.egress = attr->egress;
6240         matcher.group = flow->group;
6241         matcher.transfer = attr->transfer;
6242         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
6243                 return -rte_errno;
6244         return 0;
6245 }
6246
6247 /**
6248  * Apply the flow to the NIC.
6249  *
6250  * @param[in] dev
6251  *   Pointer to the Ethernet device structure.
6252  * @param[in, out] flow
6253  *   Pointer to flow structure.
6254  * @param[out] error
6255  *   Pointer to error structure.
6256  *
6257  * @return
6258  *   0 on success, a negative errno value otherwise and rte_errno is set.
6259  */
6260 static int
6261 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
6262               struct rte_flow_error *error)
6263 {
6264         struct mlx5_flow_dv *dv;
6265         struct mlx5_flow *dev_flow;
6266         struct mlx5_priv *priv = dev->data->dev_private;
6267         int n;
6268         int err;
6269
6270         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6271                 dv = &dev_flow->dv;
6272                 n = dv->actions_n;
6273                 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
6274                         if (flow->transfer) {
6275                                 dv->actions[n++] = priv->sh->esw_drop_action;
6276                         } else {
6277                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
6278                                 if (!dv->hrxq) {
6279                                         rte_flow_error_set
6280                                                 (error, errno,
6281                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6282                                                  NULL,
6283                                                  "cannot get drop hash queue");
6284                                         goto error;
6285                                 }
6286                                 dv->actions[n++] = dv->hrxq->action;
6287                         }
6288                 } else if (dev_flow->actions &
6289                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
6290                         struct mlx5_hrxq *hrxq;
6291
6292                         hrxq = mlx5_hrxq_get(dev, flow->key,
6293                                              MLX5_RSS_HASH_KEY_LEN,
6294                                              dv->hash_fields,
6295                                              (*flow->queue),
6296                                              flow->rss.queue_num);
6297                         if (!hrxq) {
6298                                 hrxq = mlx5_hrxq_new
6299                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
6300                                          dv->hash_fields, (*flow->queue),
6301                                          flow->rss.queue_num,
6302                                          !!(dev_flow->layers &
6303                                             MLX5_FLOW_LAYER_TUNNEL));
6304                         }
6305                         if (!hrxq) {
6306                                 rte_flow_error_set
6307                                         (error, rte_errno,
6308                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6309                                          "cannot get hash queue");
6310                                 goto error;
6311                         }
6312                         dv->hrxq = hrxq;
6313                         dv->actions[n++] = dv->hrxq->action;
6314                 }
6315                 dv->flow =
6316                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
6317                                                   (void *)&dv->value, n,
6318                                                   dv->actions);
6319                 if (!dv->flow) {
6320                         rte_flow_error_set(error, errno,
6321                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6322                                            NULL,
6323                                            "hardware refuses to create flow");
6324                         goto error;
6325                 }
6326                 if (priv->vmwa_context &&
6327                     dev_flow->dv.vf_vlan.tag &&
6328                     !dev_flow->dv.vf_vlan.created) {
6329                         /*
6330                          * The rule contains the VLAN pattern.
6331                          * For VF we are going to create VLAN
6332                          * interface to make hypervisor set correct
6333                          * e-Switch vport context.
6334                          */
6335                         mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
6336                 }
6337         }
6338         return 0;
6339 error:
6340         err = rte_errno; /* Save rte_errno before cleanup. */
6341         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6342                 struct mlx5_flow_dv *dv = &dev_flow->dv;
6343                 if (dv->hrxq) {
6344                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6345                                 mlx5_hrxq_drop_release(dev);
6346                         else
6347                                 mlx5_hrxq_release(dev, dv->hrxq);
6348                         dv->hrxq = NULL;
6349                 }
6350                 if (dev_flow->dv.vf_vlan.tag &&
6351                     dev_flow->dv.vf_vlan.created)
6352                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6353         }
6354         rte_errno = err; /* Restore rte_errno. */
6355         return -rte_errno;
6356 }
6357
6358 /**
6359  * Release the flow matcher.
6360  *
6361  * @param dev
6362  *   Pointer to Ethernet device.
6363  * @param flow
6364  *   Pointer to mlx5_flow.
6365  *
6366  * @return
6367  *   1 while a reference on it exists, 0 when freed.
6368  */
6369 static int
6370 flow_dv_matcher_release(struct rte_eth_dev *dev,
6371                         struct mlx5_flow *flow)
6372 {
6373         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
6374         struct mlx5_priv *priv = dev->data->dev_private;
6375         struct mlx5_ibv_shared *sh = priv->sh;
6376         struct mlx5_flow_tbl_resource *tbl;
6377
6378         assert(matcher->matcher_object);
6379         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
6380                 dev->data->port_id, (void *)matcher,
6381                 rte_atomic32_read(&matcher->refcnt));
6382         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
6383                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
6384                            (matcher->matcher_object));
6385                 LIST_REMOVE(matcher, next);
6386                 if (matcher->egress)
6387                         tbl = &sh->tx_tbl[matcher->group];
6388                 else
6389                         tbl = &sh->rx_tbl[matcher->group];
6390                 flow_dv_tbl_resource_release(tbl);
6391                 rte_free(matcher);
6392                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
6393                         dev->data->port_id, (void *)matcher);
6394                 return 0;
6395         }
6396         return 1;
6397 }
6398
6399 /**
6400  * Release an encap/decap resource.
6401  *
6402  * @param flow
6403  *   Pointer to mlx5_flow.
6404  *
6405  * @return
6406  *   1 while a reference on it exists, 0 when freed.
6407  */
6408 static int
6409 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
6410 {
6411         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
6412                                                 flow->dv.encap_decap;
6413
6414         assert(cache_resource->verbs_action);
6415         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
6416                 (void *)cache_resource,
6417                 rte_atomic32_read(&cache_resource->refcnt));
6418         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6419                 claim_zero(mlx5_glue->destroy_flow_action
6420                                 (cache_resource->verbs_action));
6421                 LIST_REMOVE(cache_resource, next);
6422                 rte_free(cache_resource);
6423                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
6424                         (void *)cache_resource);
6425                 return 0;
6426         }
6427         return 1;
6428 }
6429
6430 /**
6431  * Release an jump to table action resource.
6432  *
6433  * @param flow
6434  *   Pointer to mlx5_flow.
6435  *
6436  * @return
6437  *   1 while a reference on it exists, 0 when freed.
6438  */
6439 static int
6440 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
6441 {
6442         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
6443                                                 flow->dv.jump;
6444
6445         assert(cache_resource->action);
6446         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
6447                 (void *)cache_resource,
6448                 rte_atomic32_read(&cache_resource->refcnt));
6449         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6450                 claim_zero(mlx5_glue->destroy_flow_action
6451                                 (cache_resource->action));
6452                 LIST_REMOVE(cache_resource, next);
6453                 flow_dv_tbl_resource_release(cache_resource->tbl);
6454                 rte_free(cache_resource);
6455                 DRV_LOG(DEBUG, "jump table resource %p: removed",
6456                         (void *)cache_resource);
6457                 return 0;
6458         }
6459         return 1;
6460 }
6461
6462 /**
6463  * Release a modify-header resource.
6464  *
6465  * @param flow
6466  *   Pointer to mlx5_flow.
6467  *
6468  * @return
6469  *   1 while a reference on it exists, 0 when freed.
6470  */
6471 static int
6472 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
6473 {
6474         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
6475                                                 flow->dv.modify_hdr;
6476
6477         assert(cache_resource->verbs_action);
6478         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
6479                 (void *)cache_resource,
6480                 rte_atomic32_read(&cache_resource->refcnt));
6481         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6482                 claim_zero(mlx5_glue->destroy_flow_action
6483                                 (cache_resource->verbs_action));
6484                 LIST_REMOVE(cache_resource, next);
6485                 rte_free(cache_resource);
6486                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
6487                         (void *)cache_resource);
6488                 return 0;
6489         }
6490         return 1;
6491 }
6492
6493 /**
6494  * Release port ID action resource.
6495  *
6496  * @param flow
6497  *   Pointer to mlx5_flow.
6498  *
6499  * @return
6500  *   1 while a reference on it exists, 0 when freed.
6501  */
6502 static int
6503 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
6504 {
6505         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
6506                 flow->dv.port_id_action;
6507
6508         assert(cache_resource->action);
6509         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
6510                 (void *)cache_resource,
6511                 rte_atomic32_read(&cache_resource->refcnt));
6512         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6513                 claim_zero(mlx5_glue->destroy_flow_action
6514                                 (cache_resource->action));
6515                 LIST_REMOVE(cache_resource, next);
6516                 rte_free(cache_resource);
6517                 DRV_LOG(DEBUG, "port id action resource %p: removed",
6518                         (void *)cache_resource);
6519                 return 0;
6520         }
6521         return 1;
6522 }
6523
6524 /**
6525  * Release push vlan action resource.
6526  *
6527  * @param flow
6528  *   Pointer to mlx5_flow.
6529  *
6530  * @return
6531  *   1 while a reference on it exists, 0 when freed.
6532  */
6533 static int
6534 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
6535 {
6536         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
6537                 flow->dv.push_vlan_res;
6538
6539         assert(cache_resource->action);
6540         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
6541                 (void *)cache_resource,
6542                 rte_atomic32_read(&cache_resource->refcnt));
6543         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6544                 claim_zero(mlx5_glue->destroy_flow_action
6545                                 (cache_resource->action));
6546                 LIST_REMOVE(cache_resource, next);
6547                 rte_free(cache_resource);
6548                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
6549                         (void *)cache_resource);
6550                 return 0;
6551         }
6552         return 1;
6553 }
6554
6555 /**
6556  * Remove the flow from the NIC but keeps it in memory.
6557  *
6558  * @param[in] dev
6559  *   Pointer to Ethernet device.
6560  * @param[in, out] flow
6561  *   Pointer to flow structure.
6562  */
6563 static void
6564 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6565 {
6566         struct mlx5_flow_dv *dv;
6567         struct mlx5_flow *dev_flow;
6568
6569         if (!flow)
6570                 return;
6571         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6572                 dv = &dev_flow->dv;
6573                 if (dv->flow) {
6574                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
6575                         dv->flow = NULL;
6576                 }
6577                 if (dv->hrxq) {
6578                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6579                                 mlx5_hrxq_drop_release(dev);
6580                         else
6581                                 mlx5_hrxq_release(dev, dv->hrxq);
6582                         dv->hrxq = NULL;
6583                 }
6584                 if (dev_flow->dv.vf_vlan.tag &&
6585                     dev_flow->dv.vf_vlan.created)
6586                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6587         }
6588 }
6589
6590 /**
6591  * Remove the flow from the NIC and the memory.
6592  *
6593  * @param[in] dev
6594  *   Pointer to the Ethernet device structure.
6595  * @param[in, out] flow
6596  *   Pointer to flow structure.
6597  */
6598 static void
6599 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6600 {
6601         struct mlx5_flow *dev_flow;
6602
6603         if (!flow)
6604                 return;
6605         flow_dv_remove(dev, flow);
6606         if (flow->counter) {
6607                 flow_dv_counter_release(dev, flow->counter);
6608                 flow->counter = NULL;
6609         }
6610         if (flow->tag_resource) {
6611                 flow_dv_tag_release(dev, flow->tag_resource);
6612                 flow->tag_resource = NULL;
6613         }
6614         while (!LIST_EMPTY(&flow->dev_flows)) {
6615                 dev_flow = LIST_FIRST(&flow->dev_flows);
6616                 LIST_REMOVE(dev_flow, next);
6617                 if (dev_flow->dv.matcher)
6618                         flow_dv_matcher_release(dev, dev_flow);
6619                 if (dev_flow->dv.encap_decap)
6620                         flow_dv_encap_decap_resource_release(dev_flow);
6621                 if (dev_flow->dv.modify_hdr)
6622                         flow_dv_modify_hdr_resource_release(dev_flow);
6623                 if (dev_flow->dv.jump)
6624                         flow_dv_jump_tbl_resource_release(dev_flow);
6625                 if (dev_flow->dv.port_id_action)
6626                         flow_dv_port_id_action_resource_release(dev_flow);
6627                 if (dev_flow->dv.push_vlan_res)
6628                         flow_dv_push_vlan_action_resource_release(dev_flow);
6629                 rte_free(dev_flow);
6630         }
6631 }
6632
6633 /**
6634  * Query a dv flow  rule for its statistics via devx.
6635  *
6636  * @param[in] dev
6637  *   Pointer to Ethernet device.
6638  * @param[in] flow
6639  *   Pointer to the sub flow.
6640  * @param[out] data
6641  *   data retrieved by the query.
6642  * @param[out] error
6643  *   Perform verbose error reporting if not NULL.
6644  *
6645  * @return
6646  *   0 on success, a negative errno value otherwise and rte_errno is set.
6647  */
6648 static int
6649 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
6650                     void *data, struct rte_flow_error *error)
6651 {
6652         struct mlx5_priv *priv = dev->data->dev_private;
6653         struct rte_flow_query_count *qc = data;
6654
6655         if (!priv->config.devx)
6656                 return rte_flow_error_set(error, ENOTSUP,
6657                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6658                                           NULL,
6659                                           "counters are not supported");
6660         if (flow->counter) {
6661                 uint64_t pkts, bytes;
6662                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
6663                                                &bytes);
6664
6665                 if (err)
6666                         return rte_flow_error_set(error, -err,
6667                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6668                                         NULL, "cannot read counters");
6669                 qc->hits_set = 1;
6670                 qc->bytes_set = 1;
6671                 qc->hits = pkts - flow->counter->hits;
6672                 qc->bytes = bytes - flow->counter->bytes;
6673                 if (qc->reset) {
6674                         flow->counter->hits = pkts;
6675                         flow->counter->bytes = bytes;
6676                 }
6677                 return 0;
6678         }
6679         return rte_flow_error_set(error, EINVAL,
6680                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6681                                   NULL,
6682                                   "counters are not available");
6683 }
6684
6685 /**
6686  * Query a flow.
6687  *
6688  * @see rte_flow_query()
6689  * @see rte_flow_ops
6690  */
6691 static int
6692 flow_dv_query(struct rte_eth_dev *dev,
6693               struct rte_flow *flow __rte_unused,
6694               const struct rte_flow_action *actions __rte_unused,
6695               void *data __rte_unused,
6696               struct rte_flow_error *error __rte_unused)
6697 {
6698         int ret = -EINVAL;
6699
6700         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6701                 switch (actions->type) {
6702                 case RTE_FLOW_ACTION_TYPE_VOID:
6703                         break;
6704                 case RTE_FLOW_ACTION_TYPE_COUNT:
6705                         ret = flow_dv_query_count(dev, flow, data, error);
6706                         break;
6707                 default:
6708                         return rte_flow_error_set(error, ENOTSUP,
6709                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6710                                                   actions,
6711                                                   "action not supported");
6712                 }
6713         }
6714         return ret;
6715 }
6716
6717 /*
6718  * Mutex-protected thunk to flow_dv_translate().
6719  */
6720 static int
6721 flow_d_translate(struct rte_eth_dev *dev,
6722                  struct mlx5_flow *dev_flow,
6723                  const struct rte_flow_attr *attr,
6724                  const struct rte_flow_item items[],
6725                  const struct rte_flow_action actions[],
6726                  struct rte_flow_error *error)
6727 {
6728         int ret;
6729
6730         flow_d_shared_lock(dev);
6731         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
6732         flow_d_shared_unlock(dev);
6733         return ret;
6734 }
6735
6736 /*
6737  * Mutex-protected thunk to flow_dv_apply().
6738  */
6739 static int
6740 flow_d_apply(struct rte_eth_dev *dev,
6741              struct rte_flow *flow,
6742              struct rte_flow_error *error)
6743 {
6744         int ret;
6745
6746         flow_d_shared_lock(dev);
6747         ret = flow_dv_apply(dev, flow, error);
6748         flow_d_shared_unlock(dev);
6749         return ret;
6750 }
6751
6752 /*
6753  * Mutex-protected thunk to flow_dv_remove().
6754  */
6755 static void
6756 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6757 {
6758         flow_d_shared_lock(dev);
6759         flow_dv_remove(dev, flow);
6760         flow_d_shared_unlock(dev);
6761 }
6762
6763 /*
6764  * Mutex-protected thunk to flow_dv_destroy().
6765  */
6766 static void
6767 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6768 {
6769         flow_d_shared_lock(dev);
6770         flow_dv_destroy(dev, flow);
6771         flow_d_shared_unlock(dev);
6772 }
6773
6774 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
6775         .validate = flow_dv_validate,
6776         .prepare = flow_dv_prepare,
6777         .translate = flow_d_translate,
6778         .apply = flow_d_apply,
6779         .remove = flow_d_remove,
6780         .destroy = flow_d_destroy,
6781         .query = flow_dv_query,
6782 };
6783
6784 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */