7409bae130803a496a76f78cbd0699d417a63b24
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29 #include <rte_vxlan.h>
30
31 #include "mlx5.h"
32 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #include "mlx5_prm.h"
36 #include "mlx5_rxtx.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 /**
74  * Initialize flow attributes structure according to flow items' types.
75  *
76  * @param[in] item
77  *   Pointer to item specification.
78  * @param[out] attr
79  *   Pointer to flow attributes structure.
80  */
81 static void
82 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
83 {
84         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
85                 switch (item->type) {
86                 case RTE_FLOW_ITEM_TYPE_IPV4:
87                         attr->ipv4 = 1;
88                         break;
89                 case RTE_FLOW_ITEM_TYPE_IPV6:
90                         attr->ipv6 = 1;
91                         break;
92                 case RTE_FLOW_ITEM_TYPE_UDP:
93                         attr->udp = 1;
94                         break;
95                 case RTE_FLOW_ITEM_TYPE_TCP:
96                         attr->tcp = 1;
97                         break;
98                 default:
99                         break;
100                 }
101         }
102         attr->valid = 1;
103 }
104
105 struct field_modify_info {
106         uint32_t size; /* Size of field in protocol header, in bytes. */
107         uint32_t offset; /* Offset of field in protocol header, in bytes. */
108         enum mlx5_modification_field id;
109 };
110
111 struct field_modify_info modify_eth[] = {
112         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
113         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
114         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
115         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
116         {0, 0, 0},
117 };
118
119 struct field_modify_info modify_vlan_out_first_vid[] = {
120         /* Size in bits !!! */
121         {12, 0, MLX5_MODI_OUT_FIRST_VID},
122         {0, 0, 0},
123 };
124
125 struct field_modify_info modify_ipv4[] = {
126         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
127         {4, 12, MLX5_MODI_OUT_SIPV4},
128         {4, 16, MLX5_MODI_OUT_DIPV4},
129         {0, 0, 0},
130 };
131
132 struct field_modify_info modify_ipv6[] = {
133         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
134         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
135         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
136         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
137         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
138         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
139         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
140         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
141         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
142         {0, 0, 0},
143 };
144
145 struct field_modify_info modify_udp[] = {
146         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
147         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
148         {0, 0, 0},
149 };
150
151 struct field_modify_info modify_tcp[] = {
152         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
153         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
154         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
155         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
156         {0, 0, 0},
157 };
158
159 static void
160 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
161                           uint8_t next_protocol, uint64_t *item_flags,
162                           int *tunnel)
163 {
164         assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
165                item->type == RTE_FLOW_ITEM_TYPE_IPV6);
166         if (next_protocol == IPPROTO_IPIP) {
167                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
168                 *tunnel = 1;
169         }
170         if (next_protocol == IPPROTO_IPV6) {
171                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
172                 *tunnel = 1;
173         }
174 }
175
176 /**
177  * Acquire the synchronizing object to protect multithreaded access
178  * to shared dv context. Lock occurs only if context is actually
179  * shared, i.e. we have multiport IB device and representors are
180  * created.
181  *
182  * @param[in] dev
183  *   Pointer to the rte_eth_dev structure.
184  */
185 static void
186 flow_d_shared_lock(struct rte_eth_dev *dev)
187 {
188         struct mlx5_priv *priv = dev->data->dev_private;
189         struct mlx5_ibv_shared *sh = priv->sh;
190
191         if (sh->dv_refcnt > 1) {
192                 int ret;
193
194                 ret = pthread_mutex_lock(&sh->dv_mutex);
195                 assert(!ret);
196                 (void)ret;
197         }
198 }
199
200 static void
201 flow_d_shared_unlock(struct rte_eth_dev *dev)
202 {
203         struct mlx5_priv *priv = dev->data->dev_private;
204         struct mlx5_ibv_shared *sh = priv->sh;
205
206         if (sh->dv_refcnt > 1) {
207                 int ret;
208
209                 ret = pthread_mutex_unlock(&sh->dv_mutex);
210                 assert(!ret);
211                 (void)ret;
212         }
213 }
214
215 /* Update VLAN's VID/PCP based on input rte_flow_action.
216  *
217  * @param[in] action
218  *   Pointer to struct rte_flow_action.
219  * @param[out] vlan
220  *   Pointer to struct rte_vlan_hdr.
221  */
222 static void
223 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
224                          struct rte_vlan_hdr *vlan)
225 {
226         uint16_t vlan_tci;
227         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
228                 vlan_tci =
229                     ((const struct rte_flow_action_of_set_vlan_pcp *)
230                                                action->conf)->vlan_pcp;
231                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
232                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
233                 vlan->vlan_tci |= vlan_tci;
234         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
235                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
236                 vlan->vlan_tci |= rte_be_to_cpu_16
237                     (((const struct rte_flow_action_of_set_vlan_vid *)
238                                              action->conf)->vlan_vid);
239         }
240 }
241
242 /**
243  * Fetch 1, 2, 3 or 4 byte field from the byte array
244  * and return as unsigned integer in host-endian format.
245  *
246  * @param[in] data
247  *   Pointer to data array.
248  * @param[in] size
249  *   Size of field to extract.
250  *
251  * @return
252  *   converted field in host endian format.
253  */
254 static inline uint32_t
255 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
256 {
257         uint32_t ret;
258
259         switch (size) {
260         case 1:
261                 ret = *data;
262                 break;
263         case 2:
264                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
265                 break;
266         case 3:
267                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
268                 ret = (ret << 8) | *(data + sizeof(uint16_t));
269                 break;
270         case 4:
271                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
272                 break;
273         default:
274                 assert(false);
275                 ret = 0;
276                 break;
277         }
278         return ret;
279 }
280
281 /**
282  * Convert modify-header action to DV specification.
283  *
284  * Data length of each action is determined by provided field description
285  * and the item mask. Data bit offset and width of each action is determined
286  * by provided item mask.
287  *
288  * @param[in] item
289  *   Pointer to item specification.
290  * @param[in] field
291  *   Pointer to field modification information.
292  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
293  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
294  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
295  * @param[in] dcopy
296  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
297  *   Negative offset value sets the same offset as source offset.
298  *   size field is ignored, value is taken from source field.
299  * @param[in,out] resource
300  *   Pointer to the modify-header resource.
301  * @param[in] type
302  *   Type of modification.
303  * @param[out] error
304  *   Pointer to the error structure.
305  *
306  * @return
307  *   0 on success, a negative errno value otherwise and rte_errno is set.
308  */
309 static int
310 flow_dv_convert_modify_action(struct rte_flow_item *item,
311                               struct field_modify_info *field,
312                               struct field_modify_info *dcopy,
313                               struct mlx5_flow_dv_modify_hdr_resource *resource,
314                               uint32_t type, struct rte_flow_error *error)
315 {
316         uint32_t i = resource->actions_num;
317         struct mlx5_modification_cmd *actions = resource->actions;
318
319         /*
320          * The item and mask are provided in big-endian format.
321          * The fields should be presented as in big-endian format either.
322          * Mask must be always present, it defines the actual field width.
323          */
324         assert(item->mask);
325         assert(field->size);
326         do {
327                 unsigned int size_b;
328                 unsigned int off_b;
329                 uint32_t mask;
330                 uint32_t data;
331
332                 if (i >= MLX5_MODIFY_NUM)
333                         return rte_flow_error_set(error, EINVAL,
334                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
335                                  "too many items to modify");
336                 /* Fetch variable byte size mask from the array. */
337                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
338                                            field->offset, field->size);
339                 if (!mask) {
340                         ++field;
341                         continue;
342                 }
343                 /* Deduce actual data width in bits from mask value. */
344                 off_b = rte_bsf32(mask);
345                 size_b = sizeof(uint32_t) * CHAR_BIT -
346                          off_b - __builtin_clz(mask);
347                 assert(size_b);
348                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
349                 actions[i].action_type = type;
350                 actions[i].field = field->id;
351                 actions[i].offset = off_b;
352                 actions[i].length = size_b;
353                 /* Convert entire record to expected big-endian format. */
354                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
355                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
356                         assert(dcopy);
357                         actions[i].dst_field = dcopy->id;
358                         actions[i].dst_offset =
359                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
360                         /* Convert entire record to big-endian format. */
361                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
362                 } else {
363                         assert(item->spec);
364                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
365                                                    field->offset, field->size);
366                         /* Shift out the trailing masked bits from data. */
367                         data = (data & mask) >> off_b;
368                         actions[i].data1 = rte_cpu_to_be_32(data);
369                 }
370                 ++i;
371                 ++field;
372         } while (field->size);
373         resource->actions_num = i;
374         if (!resource->actions_num)
375                 return rte_flow_error_set(error, EINVAL,
376                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
377                                           "invalid modification flow item");
378         return 0;
379 }
380
381 /**
382  * Convert modify-header set IPv4 address action to DV specification.
383  *
384  * @param[in,out] resource
385  *   Pointer to the modify-header resource.
386  * @param[in] action
387  *   Pointer to action specification.
388  * @param[out] error
389  *   Pointer to the error structure.
390  *
391  * @return
392  *   0 on success, a negative errno value otherwise and rte_errno is set.
393  */
394 static int
395 flow_dv_convert_action_modify_ipv4
396                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
397                          const struct rte_flow_action *action,
398                          struct rte_flow_error *error)
399 {
400         const struct rte_flow_action_set_ipv4 *conf =
401                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
402         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
403         struct rte_flow_item_ipv4 ipv4;
404         struct rte_flow_item_ipv4 ipv4_mask;
405
406         memset(&ipv4, 0, sizeof(ipv4));
407         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
408         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
409                 ipv4.hdr.src_addr = conf->ipv4_addr;
410                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
411         } else {
412                 ipv4.hdr.dst_addr = conf->ipv4_addr;
413                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
414         }
415         item.spec = &ipv4;
416         item.mask = &ipv4_mask;
417         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
418                                              MLX5_MODIFICATION_TYPE_SET, error);
419 }
420
421 /**
422  * Convert modify-header set IPv6 address action to DV specification.
423  *
424  * @param[in,out] resource
425  *   Pointer to the modify-header resource.
426  * @param[in] action
427  *   Pointer to action specification.
428  * @param[out] error
429  *   Pointer to the error structure.
430  *
431  * @return
432  *   0 on success, a negative errno value otherwise and rte_errno is set.
433  */
434 static int
435 flow_dv_convert_action_modify_ipv6
436                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
437                          const struct rte_flow_action *action,
438                          struct rte_flow_error *error)
439 {
440         const struct rte_flow_action_set_ipv6 *conf =
441                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
442         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
443         struct rte_flow_item_ipv6 ipv6;
444         struct rte_flow_item_ipv6 ipv6_mask;
445
446         memset(&ipv6, 0, sizeof(ipv6));
447         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
448         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
449                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
450                        sizeof(ipv6.hdr.src_addr));
451                 memcpy(&ipv6_mask.hdr.src_addr,
452                        &rte_flow_item_ipv6_mask.hdr.src_addr,
453                        sizeof(ipv6.hdr.src_addr));
454         } else {
455                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
456                        sizeof(ipv6.hdr.dst_addr));
457                 memcpy(&ipv6_mask.hdr.dst_addr,
458                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
459                        sizeof(ipv6.hdr.dst_addr));
460         }
461         item.spec = &ipv6;
462         item.mask = &ipv6_mask;
463         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
464                                              MLX5_MODIFICATION_TYPE_SET, error);
465 }
466
467 /**
468  * Convert modify-header set MAC address action to DV specification.
469  *
470  * @param[in,out] resource
471  *   Pointer to the modify-header resource.
472  * @param[in] action
473  *   Pointer to action specification.
474  * @param[out] error
475  *   Pointer to the error structure.
476  *
477  * @return
478  *   0 on success, a negative errno value otherwise and rte_errno is set.
479  */
480 static int
481 flow_dv_convert_action_modify_mac
482                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
483                          const struct rte_flow_action *action,
484                          struct rte_flow_error *error)
485 {
486         const struct rte_flow_action_set_mac *conf =
487                 (const struct rte_flow_action_set_mac *)(action->conf);
488         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
489         struct rte_flow_item_eth eth;
490         struct rte_flow_item_eth eth_mask;
491
492         memset(&eth, 0, sizeof(eth));
493         memset(&eth_mask, 0, sizeof(eth_mask));
494         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
495                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
496                        sizeof(eth.src.addr_bytes));
497                 memcpy(&eth_mask.src.addr_bytes,
498                        &rte_flow_item_eth_mask.src.addr_bytes,
499                        sizeof(eth_mask.src.addr_bytes));
500         } else {
501                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
502                        sizeof(eth.dst.addr_bytes));
503                 memcpy(&eth_mask.dst.addr_bytes,
504                        &rte_flow_item_eth_mask.dst.addr_bytes,
505                        sizeof(eth_mask.dst.addr_bytes));
506         }
507         item.spec = &eth;
508         item.mask = &eth_mask;
509         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
510                                              MLX5_MODIFICATION_TYPE_SET, error);
511 }
512
513 /**
514  * Convert modify-header set VLAN VID action to DV specification.
515  *
516  * @param[in,out] resource
517  *   Pointer to the modify-header resource.
518  * @param[in] action
519  *   Pointer to action specification.
520  * @param[out] error
521  *   Pointer to the error structure.
522  *
523  * @return
524  *   0 on success, a negative errno value otherwise and rte_errno is set.
525  */
526 static int
527 flow_dv_convert_action_modify_vlan_vid
528                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
529                          const struct rte_flow_action *action,
530                          struct rte_flow_error *error)
531 {
532         const struct rte_flow_action_of_set_vlan_vid *conf =
533                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
534         int i = resource->actions_num;
535         struct mlx5_modification_cmd *actions = &resource->actions[i];
536         struct field_modify_info *field = modify_vlan_out_first_vid;
537
538         if (i >= MLX5_MODIFY_NUM)
539                 return rte_flow_error_set(error, EINVAL,
540                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
541                          "too many items to modify");
542         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
543         actions[i].field = field->id;
544         actions[i].length = field->size;
545         actions[i].offset = field->offset;
546         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
547         actions[i].data1 = conf->vlan_vid;
548         actions[i].data1 = actions[i].data1 << 16;
549         resource->actions_num = ++i;
550         return 0;
551 }
552
553 /**
554  * Convert modify-header set TP action to DV specification.
555  *
556  * @param[in,out] resource
557  *   Pointer to the modify-header resource.
558  * @param[in] action
559  *   Pointer to action specification.
560  * @param[in] items
561  *   Pointer to rte_flow_item objects list.
562  * @param[in] attr
563  *   Pointer to flow attributes structure.
564  * @param[out] error
565  *   Pointer to the error structure.
566  *
567  * @return
568  *   0 on success, a negative errno value otherwise and rte_errno is set.
569  */
570 static int
571 flow_dv_convert_action_modify_tp
572                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
573                          const struct rte_flow_action *action,
574                          const struct rte_flow_item *items,
575                          union flow_dv_attr *attr,
576                          struct rte_flow_error *error)
577 {
578         const struct rte_flow_action_set_tp *conf =
579                 (const struct rte_flow_action_set_tp *)(action->conf);
580         struct rte_flow_item item;
581         struct rte_flow_item_udp udp;
582         struct rte_flow_item_udp udp_mask;
583         struct rte_flow_item_tcp tcp;
584         struct rte_flow_item_tcp tcp_mask;
585         struct field_modify_info *field;
586
587         if (!attr->valid)
588                 flow_dv_attr_init(items, attr);
589         if (attr->udp) {
590                 memset(&udp, 0, sizeof(udp));
591                 memset(&udp_mask, 0, sizeof(udp_mask));
592                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
593                         udp.hdr.src_port = conf->port;
594                         udp_mask.hdr.src_port =
595                                         rte_flow_item_udp_mask.hdr.src_port;
596                 } else {
597                         udp.hdr.dst_port = conf->port;
598                         udp_mask.hdr.dst_port =
599                                         rte_flow_item_udp_mask.hdr.dst_port;
600                 }
601                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
602                 item.spec = &udp;
603                 item.mask = &udp_mask;
604                 field = modify_udp;
605         }
606         if (attr->tcp) {
607                 memset(&tcp, 0, sizeof(tcp));
608                 memset(&tcp_mask, 0, sizeof(tcp_mask));
609                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
610                         tcp.hdr.src_port = conf->port;
611                         tcp_mask.hdr.src_port =
612                                         rte_flow_item_tcp_mask.hdr.src_port;
613                 } else {
614                         tcp.hdr.dst_port = conf->port;
615                         tcp_mask.hdr.dst_port =
616                                         rte_flow_item_tcp_mask.hdr.dst_port;
617                 }
618                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
619                 item.spec = &tcp;
620                 item.mask = &tcp_mask;
621                 field = modify_tcp;
622         }
623         return flow_dv_convert_modify_action(&item, field, NULL, resource,
624                                              MLX5_MODIFICATION_TYPE_SET, error);
625 }
626
627 /**
628  * Convert modify-header set TTL action to DV specification.
629  *
630  * @param[in,out] resource
631  *   Pointer to the modify-header resource.
632  * @param[in] action
633  *   Pointer to action specification.
634  * @param[in] items
635  *   Pointer to rte_flow_item objects list.
636  * @param[in] attr
637  *   Pointer to flow attributes structure.
638  * @param[out] error
639  *   Pointer to the error structure.
640  *
641  * @return
642  *   0 on success, a negative errno value otherwise and rte_errno is set.
643  */
644 static int
645 flow_dv_convert_action_modify_ttl
646                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
647                          const struct rte_flow_action *action,
648                          const struct rte_flow_item *items,
649                          union flow_dv_attr *attr,
650                          struct rte_flow_error *error)
651 {
652         const struct rte_flow_action_set_ttl *conf =
653                 (const struct rte_flow_action_set_ttl *)(action->conf);
654         struct rte_flow_item item;
655         struct rte_flow_item_ipv4 ipv4;
656         struct rte_flow_item_ipv4 ipv4_mask;
657         struct rte_flow_item_ipv6 ipv6;
658         struct rte_flow_item_ipv6 ipv6_mask;
659         struct field_modify_info *field;
660
661         if (!attr->valid)
662                 flow_dv_attr_init(items, attr);
663         if (attr->ipv4) {
664                 memset(&ipv4, 0, sizeof(ipv4));
665                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
666                 ipv4.hdr.time_to_live = conf->ttl_value;
667                 ipv4_mask.hdr.time_to_live = 0xFF;
668                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
669                 item.spec = &ipv4;
670                 item.mask = &ipv4_mask;
671                 field = modify_ipv4;
672         }
673         if (attr->ipv6) {
674                 memset(&ipv6, 0, sizeof(ipv6));
675                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
676                 ipv6.hdr.hop_limits = conf->ttl_value;
677                 ipv6_mask.hdr.hop_limits = 0xFF;
678                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
679                 item.spec = &ipv6;
680                 item.mask = &ipv6_mask;
681                 field = modify_ipv6;
682         }
683         return flow_dv_convert_modify_action(&item, field, NULL, resource,
684                                              MLX5_MODIFICATION_TYPE_SET, error);
685 }
686
687 /**
688  * Convert modify-header decrement TTL action to DV specification.
689  *
690  * @param[in,out] resource
691  *   Pointer to the modify-header resource.
692  * @param[in] action
693  *   Pointer to action specification.
694  * @param[in] items
695  *   Pointer to rte_flow_item objects list.
696  * @param[in] attr
697  *   Pointer to flow attributes structure.
698  * @param[out] error
699  *   Pointer to the error structure.
700  *
701  * @return
702  *   0 on success, a negative errno value otherwise and rte_errno is set.
703  */
704 static int
705 flow_dv_convert_action_modify_dec_ttl
706                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
707                          const struct rte_flow_item *items,
708                          union flow_dv_attr *attr,
709                          struct rte_flow_error *error)
710 {
711         struct rte_flow_item item;
712         struct rte_flow_item_ipv4 ipv4;
713         struct rte_flow_item_ipv4 ipv4_mask;
714         struct rte_flow_item_ipv6 ipv6;
715         struct rte_flow_item_ipv6 ipv6_mask;
716         struct field_modify_info *field;
717
718         if (!attr->valid)
719                 flow_dv_attr_init(items, attr);
720         if (attr->ipv4) {
721                 memset(&ipv4, 0, sizeof(ipv4));
722                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
723                 ipv4.hdr.time_to_live = 0xFF;
724                 ipv4_mask.hdr.time_to_live = 0xFF;
725                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
726                 item.spec = &ipv4;
727                 item.mask = &ipv4_mask;
728                 field = modify_ipv4;
729         }
730         if (attr->ipv6) {
731                 memset(&ipv6, 0, sizeof(ipv6));
732                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
733                 ipv6.hdr.hop_limits = 0xFF;
734                 ipv6_mask.hdr.hop_limits = 0xFF;
735                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
736                 item.spec = &ipv6;
737                 item.mask = &ipv6_mask;
738                 field = modify_ipv6;
739         }
740         return flow_dv_convert_modify_action(&item, field, NULL, resource,
741                                              MLX5_MODIFICATION_TYPE_ADD, error);
742 }
743
744 /**
745  * Convert modify-header increment/decrement TCP Sequence number
746  * to DV specification.
747  *
748  * @param[in,out] resource
749  *   Pointer to the modify-header resource.
750  * @param[in] action
751  *   Pointer to action specification.
752  * @param[out] error
753  *   Pointer to the error structure.
754  *
755  * @return
756  *   0 on success, a negative errno value otherwise and rte_errno is set.
757  */
758 static int
759 flow_dv_convert_action_modify_tcp_seq
760                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
761                          const struct rte_flow_action *action,
762                          struct rte_flow_error *error)
763 {
764         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
765         uint64_t value = rte_be_to_cpu_32(*conf);
766         struct rte_flow_item item;
767         struct rte_flow_item_tcp tcp;
768         struct rte_flow_item_tcp tcp_mask;
769
770         memset(&tcp, 0, sizeof(tcp));
771         memset(&tcp_mask, 0, sizeof(tcp_mask));
772         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
773                 /*
774                  * The HW has no decrement operation, only increment operation.
775                  * To simulate decrement X from Y using increment operation
776                  * we need to add UINT32_MAX X times to Y.
777                  * Each adding of UINT32_MAX decrements Y by 1.
778                  */
779                 value *= UINT32_MAX;
780         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
781         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
782         item.type = RTE_FLOW_ITEM_TYPE_TCP;
783         item.spec = &tcp;
784         item.mask = &tcp_mask;
785         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
786                                              MLX5_MODIFICATION_TYPE_ADD, error);
787 }
788
789 /**
790  * Convert modify-header increment/decrement TCP Acknowledgment number
791  * to DV specification.
792  *
793  * @param[in,out] resource
794  *   Pointer to the modify-header resource.
795  * @param[in] action
796  *   Pointer to action specification.
797  * @param[out] error
798  *   Pointer to the error structure.
799  *
800  * @return
801  *   0 on success, a negative errno value otherwise and rte_errno is set.
802  */
803 static int
804 flow_dv_convert_action_modify_tcp_ack
805                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
806                          const struct rte_flow_action *action,
807                          struct rte_flow_error *error)
808 {
809         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
810         uint64_t value = rte_be_to_cpu_32(*conf);
811         struct rte_flow_item item;
812         struct rte_flow_item_tcp tcp;
813         struct rte_flow_item_tcp tcp_mask;
814
815         memset(&tcp, 0, sizeof(tcp));
816         memset(&tcp_mask, 0, sizeof(tcp_mask));
817         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
818                 /*
819                  * The HW has no decrement operation, only increment operation.
820                  * To simulate decrement X from Y using increment operation
821                  * we need to add UINT32_MAX X times to Y.
822                  * Each adding of UINT32_MAX decrements Y by 1.
823                  */
824                 value *= UINT32_MAX;
825         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
826         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
827         item.type = RTE_FLOW_ITEM_TYPE_TCP;
828         item.spec = &tcp;
829         item.mask = &tcp_mask;
830         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
831                                              MLX5_MODIFICATION_TYPE_ADD, error);
832 }
833
834 static enum mlx5_modification_field reg_to_field[] = {
835         [REG_A] = MLX5_MODI_META_DATA_REG_A,
836         [REG_B] = MLX5_MODI_META_DATA_REG_B,
837         [REG_C_0] = MLX5_MODI_META_REG_C_0,
838         [REG_C_1] = MLX5_MODI_META_REG_C_1,
839         [REG_C_2] = MLX5_MODI_META_REG_C_2,
840         [REG_C_3] = MLX5_MODI_META_REG_C_3,
841         [REG_C_4] = MLX5_MODI_META_REG_C_4,
842         [REG_C_5] = MLX5_MODI_META_REG_C_5,
843         [REG_C_6] = MLX5_MODI_META_REG_C_6,
844         [REG_C_7] = MLX5_MODI_META_REG_C_7,
845 };
846
847 /**
848  * Convert register set to DV specification.
849  *
850  * @param[in,out] resource
851  *   Pointer to the modify-header resource.
852  * @param[in] action
853  *   Pointer to action specification.
854  * @param[out] error
855  *   Pointer to the error structure.
856  *
857  * @return
858  *   0 on success, a negative errno value otherwise and rte_errno is set.
859  */
860 static int
861 flow_dv_convert_action_set_reg
862                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
863                          const struct rte_flow_action *action,
864                          struct rte_flow_error *error)
865 {
866         const struct mlx5_rte_flow_action_set_tag *conf = (action->conf);
867         struct mlx5_modification_cmd *actions = resource->actions;
868         uint32_t i = resource->actions_num;
869
870         if (i >= MLX5_MODIFY_NUM)
871                 return rte_flow_error_set(error, EINVAL,
872                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
873                                           "too many items to modify");
874         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
875         actions[i].field = reg_to_field[conf->id];
876         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
877         actions[i].data1 = conf->data;
878         ++i;
879         resource->actions_num = i;
880         if (!resource->actions_num)
881                 return rte_flow_error_set(error, EINVAL,
882                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
883                                           "invalid modification flow item");
884         return 0;
885 }
886
887 /**
888  * Validate META item.
889  *
890  * @param[in] dev
891  *   Pointer to the rte_eth_dev structure.
892  * @param[in] item
893  *   Item specification.
894  * @param[in] attr
895  *   Attributes of flow that includes this item.
896  * @param[out] error
897  *   Pointer to error structure.
898  *
899  * @return
900  *   0 on success, a negative errno value otherwise and rte_errno is set.
901  */
902 static int
903 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
904                            const struct rte_flow_item *item,
905                            const struct rte_flow_attr *attr,
906                            struct rte_flow_error *error)
907 {
908         const struct rte_flow_item_meta *spec = item->spec;
909         const struct rte_flow_item_meta *mask = item->mask;
910         const struct rte_flow_item_meta nic_mask = {
911                 .data = UINT32_MAX
912         };
913         int ret;
914
915         if (!spec)
916                 return rte_flow_error_set(error, EINVAL,
917                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
918                                           item->spec,
919                                           "data cannot be empty");
920         if (!spec->data)
921                 return rte_flow_error_set(error, EINVAL,
922                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
923                                           NULL,
924                                           "data cannot be zero");
925         if (!mask)
926                 mask = &rte_flow_item_meta_mask;
927         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
928                                         (const uint8_t *)&nic_mask,
929                                         sizeof(struct rte_flow_item_meta),
930                                         error);
931         if (ret < 0)
932                 return ret;
933         if (attr->ingress)
934                 return rte_flow_error_set(error, ENOTSUP,
935                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
936                                           NULL,
937                                           "pattern not supported for ingress");
938         return 0;
939 }
940
941 /**
942  * Validate vport item.
943  *
944  * @param[in] dev
945  *   Pointer to the rte_eth_dev structure.
946  * @param[in] item
947  *   Item specification.
948  * @param[in] attr
949  *   Attributes of flow that includes this item.
950  * @param[in] item_flags
951  *   Bit-fields that holds the items detected until now.
952  * @param[out] error
953  *   Pointer to error structure.
954  *
955  * @return
956  *   0 on success, a negative errno value otherwise and rte_errno is set.
957  */
958 static int
959 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
960                               const struct rte_flow_item *item,
961                               const struct rte_flow_attr *attr,
962                               uint64_t item_flags,
963                               struct rte_flow_error *error)
964 {
965         const struct rte_flow_item_port_id *spec = item->spec;
966         const struct rte_flow_item_port_id *mask = item->mask;
967         const struct rte_flow_item_port_id switch_mask = {
968                         .id = 0xffffffff,
969         };
970         struct mlx5_priv *esw_priv;
971         struct mlx5_priv *dev_priv;
972         int ret;
973
974         if (!attr->transfer)
975                 return rte_flow_error_set(error, EINVAL,
976                                           RTE_FLOW_ERROR_TYPE_ITEM,
977                                           NULL,
978                                           "match on port id is valid only"
979                                           " when transfer flag is enabled");
980         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
981                 return rte_flow_error_set(error, ENOTSUP,
982                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
983                                           "multiple source ports are not"
984                                           " supported");
985         if (!mask)
986                 mask = &switch_mask;
987         if (mask->id != 0xffffffff)
988                 return rte_flow_error_set(error, ENOTSUP,
989                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
990                                            mask,
991                                            "no support for partial mask on"
992                                            " \"id\" field");
993         ret = mlx5_flow_item_acceptable
994                                 (item, (const uint8_t *)mask,
995                                  (const uint8_t *)&rte_flow_item_port_id_mask,
996                                  sizeof(struct rte_flow_item_port_id),
997                                  error);
998         if (ret)
999                 return ret;
1000         if (!spec)
1001                 return 0;
1002         esw_priv = mlx5_port_to_eswitch_info(spec->id);
1003         if (!esw_priv)
1004                 return rte_flow_error_set(error, rte_errno,
1005                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1006                                           "failed to obtain E-Switch info for"
1007                                           " port");
1008         dev_priv = mlx5_dev_to_eswitch_info(dev);
1009         if (!dev_priv)
1010                 return rte_flow_error_set(error, rte_errno,
1011                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1012                                           NULL,
1013                                           "failed to obtain E-Switch info");
1014         if (esw_priv->domain_id != dev_priv->domain_id)
1015                 return rte_flow_error_set(error, EINVAL,
1016                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1017                                           "cannot match on a port from a"
1018                                           " different E-Switch");
1019         return 0;
1020 }
1021
1022 /**
1023  * Validate the pop VLAN action.
1024  *
1025  * @param[in] dev
1026  *   Pointer to the rte_eth_dev structure.
1027  * @param[in] action_flags
1028  *   Holds the actions detected until now.
1029  * @param[in] action
1030  *   Pointer to the pop vlan action.
1031  * @param[in] item_flags
1032  *   The items found in this flow rule.
1033  * @param[in] attr
1034  *   Pointer to flow attributes.
1035  * @param[out] error
1036  *   Pointer to error structure.
1037  *
1038  * @return
1039  *   0 on success, a negative errno value otherwise and rte_errno is set.
1040  */
1041 static int
1042 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1043                                  uint64_t action_flags,
1044                                  const struct rte_flow_action *action,
1045                                  uint64_t item_flags,
1046                                  const struct rte_flow_attr *attr,
1047                                  struct rte_flow_error *error)
1048 {
1049         struct mlx5_priv *priv = dev->data->dev_private;
1050
1051         (void)action;
1052         (void)attr;
1053         if (!priv->sh->pop_vlan_action)
1054                 return rte_flow_error_set(error, ENOTSUP,
1055                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1056                                           NULL,
1057                                           "pop vlan action is not supported");
1058         /*
1059          * Check for inconsistencies:
1060          *  fail strip_vlan in a flow that matches packets without VLAN tags.
1061          *  fail strip_vlan in a flow that matches packets without explicitly a
1062          *  matching on VLAN tag ?
1063          */
1064         if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
1065                 return rte_flow_error_set(error, ENOTSUP,
1066                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1067                                           NULL,
1068                                           "no support for multiple vlan pop "
1069                                           "actions");
1070         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1071                 return rte_flow_error_set(error, ENOTSUP,
1072                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1073                                           NULL,
1074                                           "cannot pop vlan without a "
1075                                           "match on (outer) vlan in the flow");
1076         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1077                 return rte_flow_error_set(error, EINVAL,
1078                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1079                                           "wrong action order, port_id should "
1080                                           "be after pop VLAN action");
1081         return 0;
1082 }
1083
1084 /**
1085  * Get VLAN default info from vlan match info.
1086  *
1087  * @param[in] dev
1088  *   Pointer to the rte_eth_dev structure.
1089  * @param[in] item
1090  *   the list of item specifications.
1091  * @param[out] vlan
1092  *   pointer VLAN info to fill to.
1093  * @param[out] error
1094  *   Pointer to error structure.
1095  *
1096  * @return
1097  *   0 on success, a negative errno value otherwise and rte_errno is set.
1098  */
1099 static void
1100 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1101                                   struct rte_vlan_hdr *vlan)
1102 {
1103         const struct rte_flow_item_vlan nic_mask = {
1104                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1105                                 MLX5DV_FLOW_VLAN_VID_MASK),
1106                 .inner_type = RTE_BE16(0xffff),
1107         };
1108
1109         if (items == NULL)
1110                 return;
1111         for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
1112                items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
1113                 ;
1114         if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1115                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1116                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1117
1118                 if (!vlan_m)
1119                         vlan_m = &nic_mask;
1120                 /* Only full match values are accepted */
1121                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1122                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1123                         vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
1124                         vlan->vlan_tci |=
1125                                 rte_be_to_cpu_16(vlan_v->tci &
1126                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1127                 }
1128                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1129                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1130                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1131                         vlan->vlan_tci |=
1132                                 rte_be_to_cpu_16(vlan_v->tci &
1133                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1134                 }
1135                 if (vlan_m->inner_type == nic_mask.inner_type)
1136                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1137                                                            vlan_m->inner_type);
1138         }
1139 }
1140
1141 /**
1142  * Validate the push VLAN action.
1143  *
1144  * @param[in] action_flags
1145  *   Holds the actions detected until now.
1146  * @param[in] action
1147  *   Pointer to the encap action.
1148  * @param[in] attr
1149  *   Pointer to flow attributes
1150  * @param[out] error
1151  *   Pointer to error structure.
1152  *
1153  * @return
1154  *   0 on success, a negative errno value otherwise and rte_errno is set.
1155  */
1156 static int
1157 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1158                                   uint64_t item_flags,
1159                                   const struct rte_flow_action *action,
1160                                   const struct rte_flow_attr *attr,
1161                                   struct rte_flow_error *error)
1162 {
1163         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1164
1165         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1166             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1167                 return rte_flow_error_set(error, EINVAL,
1168                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1169                                           "invalid vlan ethertype");
1170         if (action_flags &
1171                 (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1172                 return rte_flow_error_set(error, ENOTSUP,
1173                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1174                                           "no support for multiple VLAN "
1175                                           "actions");
1176         if (!mlx5_flow_find_action
1177                         (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) &&
1178             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1179                 return rte_flow_error_set(error, ENOTSUP,
1180                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
1181                                 "push VLAN needs to match on VLAN in order to "
1182                                 "get VLAN VID information because there is "
1183                                 "no followed set VLAN VID action");
1184         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1185                 return rte_flow_error_set(error, EINVAL,
1186                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1187                                           "wrong action order, port_id should "
1188                                           "be after push VLAN");
1189         (void)attr;
1190         return 0;
1191 }
1192
1193 /**
1194  * Validate the set VLAN PCP.
1195  *
1196  * @param[in] action_flags
1197  *   Holds the actions detected until now.
1198  * @param[in] actions
1199  *   Pointer to the list of actions remaining in the flow rule.
1200  * @param[in] attr
1201  *   Pointer to flow attributes
1202  * @param[out] error
1203  *   Pointer to error structure.
1204  *
1205  * @return
1206  *   0 on success, a negative errno value otherwise and rte_errno is set.
1207  */
1208 static int
1209 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1210                                      const struct rte_flow_action actions[],
1211                                      struct rte_flow_error *error)
1212 {
1213         const struct rte_flow_action *action = actions;
1214         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1215
1216         if (conf->vlan_pcp > 7)
1217                 return rte_flow_error_set(error, EINVAL,
1218                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1219                                           "VLAN PCP value is too big");
1220         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1221                 return rte_flow_error_set(error, ENOTSUP,
1222                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1223                                           "set VLAN PCP action must follow "
1224                                           "the push VLAN action");
1225         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1226                 return rte_flow_error_set(error, ENOTSUP,
1227                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1228                                           "Multiple VLAN PCP modification are "
1229                                           "not supported");
1230         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1231                 return rte_flow_error_set(error, EINVAL,
1232                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1233                                           "wrong action order, port_id should "
1234                                           "be after set VLAN PCP");
1235         return 0;
1236 }
1237
1238 /**
1239  * Validate the set VLAN VID.
1240  *
1241  * @param[in] item_flags
1242  *   Holds the items detected in this rule.
1243  * @param[in] actions
1244  *   Pointer to the list of actions remaining in the flow rule.
1245  * @param[in] attr
1246  *   Pointer to flow attributes
1247  * @param[out] error
1248  *   Pointer to error structure.
1249  *
1250  * @return
1251  *   0 on success, a negative errno value otherwise and rte_errno is set.
1252  */
1253 static int
1254 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1255                                      uint64_t action_flags,
1256                                      const struct rte_flow_action actions[],
1257                                      struct rte_flow_error *error)
1258 {
1259         const struct rte_flow_action *action = actions;
1260         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1261
1262         if (conf->vlan_vid > RTE_BE16(0xFFE))
1263                 return rte_flow_error_set(error, EINVAL,
1264                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1265                                           "VLAN VID value is too big");
1266         /* there is an of_push_vlan action before us */
1267         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
1268                 if (mlx5_flow_find_action(actions + 1,
1269                                           RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1270                         return rte_flow_error_set(error, ENOTSUP,
1271                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
1272                                         "Multiple VLAN VID modifications are "
1273                                         "not supported");
1274                 else
1275                         return 0;
1276         }
1277
1278         /*
1279          * Action is on an existing VLAN header:
1280          *    Need to verify this is a single modify CID action.
1281          *   Rule mast include a match on outer VLAN.
1282          */
1283         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1284                 return rte_flow_error_set(error, ENOTSUP,
1285                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1286                                           "Multiple VLAN VID modifications are "
1287                                           "not supported");
1288         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1289                 return rte_flow_error_set(error, EINVAL,
1290                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1291                                           "match on VLAN is required in order "
1292                                           "to set VLAN VID");
1293         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1294                 return rte_flow_error_set(error, EINVAL,
1295                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1296                                           "wrong action order, port_id should "
1297                                           "be after set VLAN VID");
1298         return 0;
1299 }
1300
1301 /**
1302  * Validate count action.
1303  *
1304  * @param[in] dev
1305  *   device otr.
1306  * @param[out] error
1307  *   Pointer to error structure.
1308  *
1309  * @return
1310  *   0 on success, a negative errno value otherwise and rte_errno is set.
1311  */
1312 static int
1313 flow_dv_validate_action_count(struct rte_eth_dev *dev,
1314                               struct rte_flow_error *error)
1315 {
1316         struct mlx5_priv *priv = dev->data->dev_private;
1317
1318         if (!priv->config.devx)
1319                 goto notsup_err;
1320 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
1321         return 0;
1322 #endif
1323 notsup_err:
1324         return rte_flow_error_set
1325                       (error, ENOTSUP,
1326                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1327                        NULL,
1328                        "count action not supported");
1329 }
1330
1331 /**
1332  * Validate the L2 encap action.
1333  *
1334  * @param[in] action_flags
1335  *   Holds the actions detected until now.
1336  * @param[in] action
1337  *   Pointer to the encap action.
1338  * @param[in] attr
1339  *   Pointer to flow attributes
1340  * @param[out] error
1341  *   Pointer to error structure.
1342  *
1343  * @return
1344  *   0 on success, a negative errno value otherwise and rte_errno is set.
1345  */
1346 static int
1347 flow_dv_validate_action_l2_encap(uint64_t action_flags,
1348                                  const struct rte_flow_action *action,
1349                                  const struct rte_flow_attr *attr,
1350                                  struct rte_flow_error *error)
1351 {
1352         if (!(action->conf))
1353                 return rte_flow_error_set(error, EINVAL,
1354                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1355                                           "configuration cannot be null");
1356         if (action_flags & MLX5_FLOW_ACTION_DROP)
1357                 return rte_flow_error_set(error, EINVAL,
1358                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1359                                           "can't drop and encap in same flow");
1360         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1361                 return rte_flow_error_set(error, EINVAL,
1362                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1363                                           "can only have a single encap or"
1364                                           " decap action in a flow");
1365         if (!attr->transfer && attr->ingress)
1366                 return rte_flow_error_set(error, ENOTSUP,
1367                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1368                                           NULL,
1369                                           "encap action not supported for "
1370                                           "ingress");
1371         return 0;
1372 }
1373
1374 /**
1375  * Validate the L2 decap action.
1376  *
1377  * @param[in] action_flags
1378  *   Holds the actions detected until now.
1379  * @param[in] attr
1380  *   Pointer to flow attributes
1381  * @param[out] error
1382  *   Pointer to error structure.
1383  *
1384  * @return
1385  *   0 on success, a negative errno value otherwise and rte_errno is set.
1386  */
1387 static int
1388 flow_dv_validate_action_l2_decap(uint64_t action_flags,
1389                                  const struct rte_flow_attr *attr,
1390                                  struct rte_flow_error *error)
1391 {
1392         if (action_flags & MLX5_FLOW_ACTION_DROP)
1393                 return rte_flow_error_set(error, EINVAL,
1394                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1395                                           "can't drop and decap in same flow");
1396         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1397                 return rte_flow_error_set(error, EINVAL,
1398                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1399                                           "can only have a single encap or"
1400                                           " decap action in a flow");
1401         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1402                 return rte_flow_error_set(error, EINVAL,
1403                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1404                                           "can't have decap action after"
1405                                           " modify action");
1406         if (attr->egress)
1407                 return rte_flow_error_set(error, ENOTSUP,
1408                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1409                                           NULL,
1410                                           "decap action not supported for "
1411                                           "egress");
1412         return 0;
1413 }
1414
1415 /**
1416  * Validate the raw encap action.
1417  *
1418  * @param[in] action_flags
1419  *   Holds the actions detected until now.
1420  * @param[in] action
1421  *   Pointer to the encap action.
1422  * @param[in] attr
1423  *   Pointer to flow attributes
1424  * @param[out] error
1425  *   Pointer to error structure.
1426  *
1427  * @return
1428  *   0 on success, a negative errno value otherwise and rte_errno is set.
1429  */
1430 static int
1431 flow_dv_validate_action_raw_encap(uint64_t action_flags,
1432                                   const struct rte_flow_action *action,
1433                                   const struct rte_flow_attr *attr,
1434                                   struct rte_flow_error *error)
1435 {
1436         const struct rte_flow_action_raw_encap *raw_encap =
1437                 (const struct rte_flow_action_raw_encap *)action->conf;
1438         if (!(action->conf))
1439                 return rte_flow_error_set(error, EINVAL,
1440                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1441                                           "configuration cannot be null");
1442         if (action_flags & MLX5_FLOW_ACTION_DROP)
1443                 return rte_flow_error_set(error, EINVAL,
1444                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1445                                           "can't drop and encap in same flow");
1446         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1447                 return rte_flow_error_set(error, EINVAL,
1448                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1449                                           "can only have a single encap"
1450                                           " action in a flow");
1451         /* encap without preceding decap is not supported for ingress */
1452         if (!attr->transfer &&  attr->ingress &&
1453             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
1454                 return rte_flow_error_set(error, ENOTSUP,
1455                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1456                                           NULL,
1457                                           "encap action not supported for "
1458                                           "ingress");
1459         if (!raw_encap->size || !raw_encap->data)
1460                 return rte_flow_error_set(error, EINVAL,
1461                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1462                                           "raw encap data cannot be empty");
1463         return 0;
1464 }
1465
1466 /**
1467  * Validate the raw decap action.
1468  *
1469  * @param[in] action_flags
1470  *   Holds the actions detected until now.
1471  * @param[in] action
1472  *   Pointer to the encap action.
1473  * @param[in] attr
1474  *   Pointer to flow attributes
1475  * @param[out] error
1476  *   Pointer to error structure.
1477  *
1478  * @return
1479  *   0 on success, a negative errno value otherwise and rte_errno is set.
1480  */
1481 static int
1482 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1483                                   const struct rte_flow_action *action,
1484                                   const struct rte_flow_attr *attr,
1485                                   struct rte_flow_error *error)
1486 {
1487         if (action_flags & MLX5_FLOW_ACTION_DROP)
1488                 return rte_flow_error_set(error, EINVAL,
1489                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1490                                           "can't drop and decap in same flow");
1491         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1492                 return rte_flow_error_set(error, EINVAL,
1493                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1494                                           "can't have encap action before"
1495                                           " decap action");
1496         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1497                 return rte_flow_error_set(error, EINVAL,
1498                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1499                                           "can only have a single decap"
1500                                           " action in a flow");
1501         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1502                 return rte_flow_error_set(error, EINVAL,
1503                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1504                                           "can't have decap action after"
1505                                           " modify action");
1506         /* decap action is valid on egress only if it is followed by encap */
1507         if (attr->egress) {
1508                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1509                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1510                        action++) {
1511                 }
1512                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1513                         return rte_flow_error_set
1514                                         (error, ENOTSUP,
1515                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1516                                          NULL, "decap action not supported"
1517                                          " for egress");
1518         }
1519         return 0;
1520 }
1521
1522 /**
1523  * Find existing encap/decap resource or create and register a new one.
1524  *
1525  * @param dev[in, out]
1526  *   Pointer to rte_eth_dev structure.
1527  * @param[in, out] resource
1528  *   Pointer to encap/decap resource.
1529  * @parm[in, out] dev_flow
1530  *   Pointer to the dev_flow.
1531  * @param[out] error
1532  *   pointer to error structure.
1533  *
1534  * @return
1535  *   0 on success otherwise -errno and errno is set.
1536  */
1537 static int
1538 flow_dv_encap_decap_resource_register
1539                         (struct rte_eth_dev *dev,
1540                          struct mlx5_flow_dv_encap_decap_resource *resource,
1541                          struct mlx5_flow *dev_flow,
1542                          struct rte_flow_error *error)
1543 {
1544         struct mlx5_priv *priv = dev->data->dev_private;
1545         struct mlx5_ibv_shared *sh = priv->sh;
1546         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1547         struct rte_flow *flow = dev_flow->flow;
1548         struct mlx5dv_dr_domain *domain;
1549
1550         resource->flags = flow->group ? 0 : 1;
1551         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1552                 domain = sh->fdb_domain;
1553         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1554                 domain = sh->rx_domain;
1555         else
1556                 domain = sh->tx_domain;
1557
1558         /* Lookup a matching resource from cache. */
1559         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1560                 if (resource->reformat_type == cache_resource->reformat_type &&
1561                     resource->ft_type == cache_resource->ft_type &&
1562                     resource->flags == cache_resource->flags &&
1563                     resource->size == cache_resource->size &&
1564                     !memcmp((const void *)resource->buf,
1565                             (const void *)cache_resource->buf,
1566                             resource->size)) {
1567                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1568                                 (void *)cache_resource,
1569                                 rte_atomic32_read(&cache_resource->refcnt));
1570                         rte_atomic32_inc(&cache_resource->refcnt);
1571                         dev_flow->dv.encap_decap = cache_resource;
1572                         return 0;
1573                 }
1574         }
1575         /* Register new encap/decap resource. */
1576         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1577         if (!cache_resource)
1578                 return rte_flow_error_set(error, ENOMEM,
1579                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1580                                           "cannot allocate resource memory");
1581         *cache_resource = *resource;
1582         cache_resource->verbs_action =
1583                 mlx5_glue->dv_create_flow_action_packet_reformat
1584                         (sh->ctx, cache_resource->reformat_type,
1585                          cache_resource->ft_type, domain, cache_resource->flags,
1586                          cache_resource->size,
1587                          (cache_resource->size ? cache_resource->buf : NULL));
1588         if (!cache_resource->verbs_action) {
1589                 rte_free(cache_resource);
1590                 return rte_flow_error_set(error, ENOMEM,
1591                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1592                                           NULL, "cannot create action");
1593         }
1594         rte_atomic32_init(&cache_resource->refcnt);
1595         rte_atomic32_inc(&cache_resource->refcnt);
1596         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1597         dev_flow->dv.encap_decap = cache_resource;
1598         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1599                 (void *)cache_resource,
1600                 rte_atomic32_read(&cache_resource->refcnt));
1601         return 0;
1602 }
1603
1604 /**
1605  * Find existing table jump resource or create and register a new one.
1606  *
1607  * @param dev[in, out]
1608  *   Pointer to rte_eth_dev structure.
1609  * @param[in, out] resource
1610  *   Pointer to jump table resource.
1611  * @parm[in, out] dev_flow
1612  *   Pointer to the dev_flow.
1613  * @param[out] error
1614  *   pointer to error structure.
1615  *
1616  * @return
1617  *   0 on success otherwise -errno and errno is set.
1618  */
1619 static int
1620 flow_dv_jump_tbl_resource_register
1621                         (struct rte_eth_dev *dev,
1622                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1623                          struct mlx5_flow *dev_flow,
1624                          struct rte_flow_error *error)
1625 {
1626         struct mlx5_priv *priv = dev->data->dev_private;
1627         struct mlx5_ibv_shared *sh = priv->sh;
1628         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1629
1630         /* Lookup a matching resource from cache. */
1631         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1632                 if (resource->tbl == cache_resource->tbl) {
1633                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1634                                 (void *)cache_resource,
1635                                 rte_atomic32_read(&cache_resource->refcnt));
1636                         rte_atomic32_inc(&cache_resource->refcnt);
1637                         dev_flow->dv.jump = cache_resource;
1638                         return 0;
1639                 }
1640         }
1641         /* Register new jump table resource. */
1642         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1643         if (!cache_resource)
1644                 return rte_flow_error_set(error, ENOMEM,
1645                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1646                                           "cannot allocate resource memory");
1647         *cache_resource = *resource;
1648         cache_resource->action =
1649                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1650                 (resource->tbl->obj);
1651         if (!cache_resource->action) {
1652                 rte_free(cache_resource);
1653                 return rte_flow_error_set(error, ENOMEM,
1654                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1655                                           NULL, "cannot create action");
1656         }
1657         rte_atomic32_init(&cache_resource->refcnt);
1658         rte_atomic32_inc(&cache_resource->refcnt);
1659         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1660         dev_flow->dv.jump = cache_resource;
1661         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1662                 (void *)cache_resource,
1663                 rte_atomic32_read(&cache_resource->refcnt));
1664         return 0;
1665 }
1666
1667 /**
1668  * Find existing table port ID resource or create and register a new one.
1669  *
1670  * @param dev[in, out]
1671  *   Pointer to rte_eth_dev structure.
1672  * @param[in, out] resource
1673  *   Pointer to port ID action resource.
1674  * @parm[in, out] dev_flow
1675  *   Pointer to the dev_flow.
1676  * @param[out] error
1677  *   pointer to error structure.
1678  *
1679  * @return
1680  *   0 on success otherwise -errno and errno is set.
1681  */
1682 static int
1683 flow_dv_port_id_action_resource_register
1684                         (struct rte_eth_dev *dev,
1685                          struct mlx5_flow_dv_port_id_action_resource *resource,
1686                          struct mlx5_flow *dev_flow,
1687                          struct rte_flow_error *error)
1688 {
1689         struct mlx5_priv *priv = dev->data->dev_private;
1690         struct mlx5_ibv_shared *sh = priv->sh;
1691         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1692
1693         /* Lookup a matching resource from cache. */
1694         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1695                 if (resource->port_id == cache_resource->port_id) {
1696                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1697                                 "refcnt %d++",
1698                                 (void *)cache_resource,
1699                                 rte_atomic32_read(&cache_resource->refcnt));
1700                         rte_atomic32_inc(&cache_resource->refcnt);
1701                         dev_flow->dv.port_id_action = cache_resource;
1702                         return 0;
1703                 }
1704         }
1705         /* Register new port id action resource. */
1706         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1707         if (!cache_resource)
1708                 return rte_flow_error_set(error, ENOMEM,
1709                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1710                                           "cannot allocate resource memory");
1711         *cache_resource = *resource;
1712         cache_resource->action =
1713                 mlx5_glue->dr_create_flow_action_dest_vport
1714                         (priv->sh->fdb_domain, resource->port_id);
1715         if (!cache_resource->action) {
1716                 rte_free(cache_resource);
1717                 return rte_flow_error_set(error, ENOMEM,
1718                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1719                                           NULL, "cannot create action");
1720         }
1721         rte_atomic32_init(&cache_resource->refcnt);
1722         rte_atomic32_inc(&cache_resource->refcnt);
1723         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1724         dev_flow->dv.port_id_action = cache_resource;
1725         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1726                 (void *)cache_resource,
1727                 rte_atomic32_read(&cache_resource->refcnt));
1728         return 0;
1729 }
1730
1731 /**
1732  * Find existing push vlan resource or create and register a new one.
1733  *
1734  * @param dev[in, out]
1735  *   Pointer to rte_eth_dev structure.
1736  * @param[in, out] resource
1737  *   Pointer to port ID action resource.
1738  * @parm[in, out] dev_flow
1739  *   Pointer to the dev_flow.
1740  * @param[out] error
1741  *   pointer to error structure.
1742  *
1743  * @return
1744  *   0 on success otherwise -errno and errno is set.
1745  */
1746 static int
1747 flow_dv_push_vlan_action_resource_register
1748                        (struct rte_eth_dev *dev,
1749                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
1750                         struct mlx5_flow *dev_flow,
1751                         struct rte_flow_error *error)
1752 {
1753         struct mlx5_priv *priv = dev->data->dev_private;
1754         struct mlx5_ibv_shared *sh = priv->sh;
1755         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
1756         struct mlx5dv_dr_domain *domain;
1757
1758         /* Lookup a matching resource from cache. */
1759         LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
1760                 if (resource->vlan_tag == cache_resource->vlan_tag &&
1761                     resource->ft_type == cache_resource->ft_type) {
1762                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
1763                                 "refcnt %d++",
1764                                 (void *)cache_resource,
1765                                 rte_atomic32_read(&cache_resource->refcnt));
1766                         rte_atomic32_inc(&cache_resource->refcnt);
1767                         dev_flow->dv.push_vlan_res = cache_resource;
1768                         return 0;
1769                 }
1770         }
1771         /* Register new push_vlan action resource. */
1772         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1773         if (!cache_resource)
1774                 return rte_flow_error_set(error, ENOMEM,
1775                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1776                                           "cannot allocate resource memory");
1777         *cache_resource = *resource;
1778         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1779                 domain = sh->fdb_domain;
1780         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1781                 domain = sh->rx_domain;
1782         else
1783                 domain = sh->tx_domain;
1784         cache_resource->action =
1785                 mlx5_glue->dr_create_flow_action_push_vlan(domain,
1786                                                            resource->vlan_tag);
1787         if (!cache_resource->action) {
1788                 rte_free(cache_resource);
1789                 return rte_flow_error_set(error, ENOMEM,
1790                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1791                                           NULL, "cannot create action");
1792         }
1793         rte_atomic32_init(&cache_resource->refcnt);
1794         rte_atomic32_inc(&cache_resource->refcnt);
1795         LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
1796         dev_flow->dv.push_vlan_res = cache_resource;
1797         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
1798                 (void *)cache_resource,
1799                 rte_atomic32_read(&cache_resource->refcnt));
1800         return 0;
1801 }
1802 /**
1803  * Get the size of specific rte_flow_item_type
1804  *
1805  * @param[in] item_type
1806  *   Tested rte_flow_item_type.
1807  *
1808  * @return
1809  *   sizeof struct item_type, 0 if void or irrelevant.
1810  */
1811 static size_t
1812 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1813 {
1814         size_t retval;
1815
1816         switch (item_type) {
1817         case RTE_FLOW_ITEM_TYPE_ETH:
1818                 retval = sizeof(struct rte_flow_item_eth);
1819                 break;
1820         case RTE_FLOW_ITEM_TYPE_VLAN:
1821                 retval = sizeof(struct rte_flow_item_vlan);
1822                 break;
1823         case RTE_FLOW_ITEM_TYPE_IPV4:
1824                 retval = sizeof(struct rte_flow_item_ipv4);
1825                 break;
1826         case RTE_FLOW_ITEM_TYPE_IPV6:
1827                 retval = sizeof(struct rte_flow_item_ipv6);
1828                 break;
1829         case RTE_FLOW_ITEM_TYPE_UDP:
1830                 retval = sizeof(struct rte_flow_item_udp);
1831                 break;
1832         case RTE_FLOW_ITEM_TYPE_TCP:
1833                 retval = sizeof(struct rte_flow_item_tcp);
1834                 break;
1835         case RTE_FLOW_ITEM_TYPE_VXLAN:
1836                 retval = sizeof(struct rte_flow_item_vxlan);
1837                 break;
1838         case RTE_FLOW_ITEM_TYPE_GRE:
1839                 retval = sizeof(struct rte_flow_item_gre);
1840                 break;
1841         case RTE_FLOW_ITEM_TYPE_NVGRE:
1842                 retval = sizeof(struct rte_flow_item_nvgre);
1843                 break;
1844         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1845                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1846                 break;
1847         case RTE_FLOW_ITEM_TYPE_MPLS:
1848                 retval = sizeof(struct rte_flow_item_mpls);
1849                 break;
1850         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1851         default:
1852                 retval = 0;
1853                 break;
1854         }
1855         return retval;
1856 }
1857
1858 #define MLX5_ENCAP_IPV4_VERSION         0x40
1859 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1860 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1861 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1862 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1863 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1864 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1865
1866 /**
1867  * Convert the encap action data from list of rte_flow_item to raw buffer
1868  *
1869  * @param[in] items
1870  *   Pointer to rte_flow_item objects list.
1871  * @param[out] buf
1872  *   Pointer to the output buffer.
1873  * @param[out] size
1874  *   Pointer to the output buffer size.
1875  * @param[out] error
1876  *   Pointer to the error structure.
1877  *
1878  * @return
1879  *   0 on success, a negative errno value otherwise and rte_errno is set.
1880  */
1881 static int
1882 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1883                            size_t *size, struct rte_flow_error *error)
1884 {
1885         struct rte_ether_hdr *eth = NULL;
1886         struct rte_vlan_hdr *vlan = NULL;
1887         struct rte_ipv4_hdr *ipv4 = NULL;
1888         struct rte_ipv6_hdr *ipv6 = NULL;
1889         struct rte_udp_hdr *udp = NULL;
1890         struct rte_vxlan_hdr *vxlan = NULL;
1891         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1892         struct rte_gre_hdr *gre = NULL;
1893         size_t len;
1894         size_t temp_size = 0;
1895
1896         if (!items)
1897                 return rte_flow_error_set(error, EINVAL,
1898                                           RTE_FLOW_ERROR_TYPE_ACTION,
1899                                           NULL, "invalid empty data");
1900         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1901                 len = flow_dv_get_item_len(items->type);
1902                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1903                         return rte_flow_error_set(error, EINVAL,
1904                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1905                                                   (void *)items->type,
1906                                                   "items total size is too big"
1907                                                   " for encap action");
1908                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1909                 switch (items->type) {
1910                 case RTE_FLOW_ITEM_TYPE_ETH:
1911                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1912                         break;
1913                 case RTE_FLOW_ITEM_TYPE_VLAN:
1914                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1915                         if (!eth)
1916                                 return rte_flow_error_set(error, EINVAL,
1917                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1918                                                 (void *)items->type,
1919                                                 "eth header not found");
1920                         if (!eth->ether_type)
1921                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1922                         break;
1923                 case RTE_FLOW_ITEM_TYPE_IPV4:
1924                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1925                         if (!vlan && !eth)
1926                                 return rte_flow_error_set(error, EINVAL,
1927                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1928                                                 (void *)items->type,
1929                                                 "neither eth nor vlan"
1930                                                 " header found");
1931                         if (vlan && !vlan->eth_proto)
1932                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1933                         else if (eth && !eth->ether_type)
1934                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1935                         if (!ipv4->version_ihl)
1936                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1937                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1938                         if (!ipv4->time_to_live)
1939                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1940                         break;
1941                 case RTE_FLOW_ITEM_TYPE_IPV6:
1942                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1943                         if (!vlan && !eth)
1944                                 return rte_flow_error_set(error, EINVAL,
1945                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1946                                                 (void *)items->type,
1947                                                 "neither eth nor vlan"
1948                                                 " header found");
1949                         if (vlan && !vlan->eth_proto)
1950                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1951                         else if (eth && !eth->ether_type)
1952                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1953                         if (!ipv6->vtc_flow)
1954                                 ipv6->vtc_flow =
1955                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1956                         if (!ipv6->hop_limits)
1957                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1958                         break;
1959                 case RTE_FLOW_ITEM_TYPE_UDP:
1960                         udp = (struct rte_udp_hdr *)&buf[temp_size];
1961                         if (!ipv4 && !ipv6)
1962                                 return rte_flow_error_set(error, EINVAL,
1963                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1964                                                 (void *)items->type,
1965                                                 "ip header not found");
1966                         if (ipv4 && !ipv4->next_proto_id)
1967                                 ipv4->next_proto_id = IPPROTO_UDP;
1968                         else if (ipv6 && !ipv6->proto)
1969                                 ipv6->proto = IPPROTO_UDP;
1970                         break;
1971                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1972                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1973                         if (!udp)
1974                                 return rte_flow_error_set(error, EINVAL,
1975                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1976                                                 (void *)items->type,
1977                                                 "udp header not found");
1978                         if (!udp->dst_port)
1979                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1980                         if (!vxlan->vx_flags)
1981                                 vxlan->vx_flags =
1982                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1983                         break;
1984                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1985                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1986                         if (!udp)
1987                                 return rte_flow_error_set(error, EINVAL,
1988                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1989                                                 (void *)items->type,
1990                                                 "udp header not found");
1991                         if (!vxlan_gpe->proto)
1992                                 return rte_flow_error_set(error, EINVAL,
1993                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1994                                                 (void *)items->type,
1995                                                 "next protocol not found");
1996                         if (!udp->dst_port)
1997                                 udp->dst_port =
1998                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1999                         if (!vxlan_gpe->vx_flags)
2000                                 vxlan_gpe->vx_flags =
2001                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
2002                         break;
2003                 case RTE_FLOW_ITEM_TYPE_GRE:
2004                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2005                         gre = (struct rte_gre_hdr *)&buf[temp_size];
2006                         if (!gre->proto)
2007                                 return rte_flow_error_set(error, EINVAL,
2008                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2009                                                 (void *)items->type,
2010                                                 "next protocol not found");
2011                         if (!ipv4 && !ipv6)
2012                                 return rte_flow_error_set(error, EINVAL,
2013                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2014                                                 (void *)items->type,
2015                                                 "ip header not found");
2016                         if (ipv4 && !ipv4->next_proto_id)
2017                                 ipv4->next_proto_id = IPPROTO_GRE;
2018                         else if (ipv6 && !ipv6->proto)
2019                                 ipv6->proto = IPPROTO_GRE;
2020                         break;
2021                 case RTE_FLOW_ITEM_TYPE_VOID:
2022                         break;
2023                 default:
2024                         return rte_flow_error_set(error, EINVAL,
2025                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2026                                                   (void *)items->type,
2027                                                   "unsupported item type");
2028                         break;
2029                 }
2030                 temp_size += len;
2031         }
2032         *size = temp_size;
2033         return 0;
2034 }
2035
2036 static int
2037 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
2038 {
2039         struct rte_ether_hdr *eth = NULL;
2040         struct rte_vlan_hdr *vlan = NULL;
2041         struct rte_ipv6_hdr *ipv6 = NULL;
2042         struct rte_udp_hdr *udp = NULL;
2043         char *next_hdr;
2044         uint16_t proto;
2045
2046         eth = (struct rte_ether_hdr *)data;
2047         next_hdr = (char *)(eth + 1);
2048         proto = RTE_BE16(eth->ether_type);
2049
2050         /* VLAN skipping */
2051         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
2052                 vlan = (struct rte_vlan_hdr *)next_hdr;
2053                 proto = RTE_BE16(vlan->eth_proto);
2054                 next_hdr += sizeof(struct rte_vlan_hdr);
2055         }
2056
2057         /* HW calculates IPv4 csum. no need to proceed */
2058         if (proto == RTE_ETHER_TYPE_IPV4)
2059                 return 0;
2060
2061         /* non IPv4/IPv6 header. not supported */
2062         if (proto != RTE_ETHER_TYPE_IPV6) {
2063                 return rte_flow_error_set(error, ENOTSUP,
2064                                           RTE_FLOW_ERROR_TYPE_ACTION,
2065                                           NULL, "Cannot offload non IPv4/IPv6");
2066         }
2067
2068         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
2069
2070         /* ignore non UDP */
2071         if (ipv6->proto != IPPROTO_UDP)
2072                 return 0;
2073
2074         udp = (struct rte_udp_hdr *)(ipv6 + 1);
2075         udp->dgram_cksum = 0;
2076
2077         return 0;
2078 }
2079
2080 /**
2081  * Convert L2 encap action to DV specification.
2082  *
2083  * @param[in] dev
2084  *   Pointer to rte_eth_dev structure.
2085  * @param[in] action
2086  *   Pointer to action structure.
2087  * @param[in, out] dev_flow
2088  *   Pointer to the mlx5_flow.
2089  * @param[in] transfer
2090  *   Mark if the flow is E-Switch flow.
2091  * @param[out] error
2092  *   Pointer to the error structure.
2093  *
2094  * @return
2095  *   0 on success, a negative errno value otherwise and rte_errno is set.
2096  */
2097 static int
2098 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2099                                const struct rte_flow_action *action,
2100                                struct mlx5_flow *dev_flow,
2101                                uint8_t transfer,
2102                                struct rte_flow_error *error)
2103 {
2104         const struct rte_flow_item *encap_data;
2105         const struct rte_flow_action_raw_encap *raw_encap_data;
2106         struct mlx5_flow_dv_encap_decap_resource res = {
2107                 .reformat_type =
2108                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2109                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2110                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2111         };
2112
2113         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2114                 raw_encap_data =
2115                         (const struct rte_flow_action_raw_encap *)action->conf;
2116                 res.size = raw_encap_data->size;
2117                 memcpy(res.buf, raw_encap_data->data, res.size);
2118                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
2119                         return -rte_errno;
2120         } else {
2121                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2122                         encap_data =
2123                                 ((const struct rte_flow_action_vxlan_encap *)
2124                                                 action->conf)->definition;
2125                 else
2126                         encap_data =
2127                                 ((const struct rte_flow_action_nvgre_encap *)
2128                                                 action->conf)->definition;
2129                 if (flow_dv_convert_encap_data(encap_data, res.buf,
2130                                                &res.size, error))
2131                         return -rte_errno;
2132         }
2133         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2134                 return rte_flow_error_set(error, EINVAL,
2135                                           RTE_FLOW_ERROR_TYPE_ACTION,
2136                                           NULL, "can't create L2 encap action");
2137         return 0;
2138 }
2139
2140 /**
2141  * Convert L2 decap action to DV specification.
2142  *
2143  * @param[in] dev
2144  *   Pointer to rte_eth_dev structure.
2145  * @param[in, out] dev_flow
2146  *   Pointer to the mlx5_flow.
2147  * @param[in] transfer
2148  *   Mark if the flow is E-Switch flow.
2149  * @param[out] error
2150  *   Pointer to the error structure.
2151  *
2152  * @return
2153  *   0 on success, a negative errno value otherwise and rte_errno is set.
2154  */
2155 static int
2156 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
2157                                struct mlx5_flow *dev_flow,
2158                                uint8_t transfer,
2159                                struct rte_flow_error *error)
2160 {
2161         struct mlx5_flow_dv_encap_decap_resource res = {
2162                 .size = 0,
2163                 .reformat_type =
2164                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
2165                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2166                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
2167         };
2168
2169         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2170                 return rte_flow_error_set(error, EINVAL,
2171                                           RTE_FLOW_ERROR_TYPE_ACTION,
2172                                           NULL, "can't create L2 decap action");
2173         return 0;
2174 }
2175
2176 /**
2177  * Convert raw decap/encap (L3 tunnel) action to DV specification.
2178  *
2179  * @param[in] dev
2180  *   Pointer to rte_eth_dev structure.
2181  * @param[in] action
2182  *   Pointer to action structure.
2183  * @param[in, out] dev_flow
2184  *   Pointer to the mlx5_flow.
2185  * @param[in] attr
2186  *   Pointer to the flow attributes.
2187  * @param[out] error
2188  *   Pointer to the error structure.
2189  *
2190  * @return
2191  *   0 on success, a negative errno value otherwise and rte_errno is set.
2192  */
2193 static int
2194 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
2195                                 const struct rte_flow_action *action,
2196                                 struct mlx5_flow *dev_flow,
2197                                 const struct rte_flow_attr *attr,
2198                                 struct rte_flow_error *error)
2199 {
2200         const struct rte_flow_action_raw_encap *encap_data;
2201         struct mlx5_flow_dv_encap_decap_resource res;
2202
2203         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
2204         res.size = encap_data->size;
2205         memcpy(res.buf, encap_data->data, res.size);
2206         res.reformat_type = attr->egress ?
2207                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
2208                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
2209         if (attr->transfer)
2210                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2211         else
2212                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2213                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2214         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2215                 return rte_flow_error_set(error, EINVAL,
2216                                           RTE_FLOW_ERROR_TYPE_ACTION,
2217                                           NULL, "can't create encap action");
2218         return 0;
2219 }
2220
2221 /**
2222  * Create action push VLAN.
2223  *
2224  * @param[in] dev
2225  *   Pointer to rte_eth_dev structure.
2226  * @param[in] vlan_tag
2227  *   the vlan tag to push to the Ethernet header.
2228  * @param[in, out] dev_flow
2229  *   Pointer to the mlx5_flow.
2230  * @param[in] attr
2231  *   Pointer to the flow attributes.
2232  * @param[out] error
2233  *   Pointer to the error structure.
2234  *
2235  * @return
2236  *   0 on success, a negative errno value otherwise and rte_errno is set.
2237  */
2238 static int
2239 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
2240                                 const struct rte_flow_attr *attr,
2241                                 const struct rte_vlan_hdr *vlan,
2242                                 struct mlx5_flow *dev_flow,
2243                                 struct rte_flow_error *error)
2244 {
2245         struct mlx5_flow_dv_push_vlan_action_resource res;
2246
2247         res.vlan_tag =
2248                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
2249                                  vlan->vlan_tci);
2250         if (attr->transfer)
2251                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2252         else
2253                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2254                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2255         return flow_dv_push_vlan_action_resource_register
2256                                             (dev, &res, dev_flow, error);
2257 }
2258
2259 /**
2260  * Validate the modify-header actions.
2261  *
2262  * @param[in] action_flags
2263  *   Holds the actions detected until now.
2264  * @param[in] action
2265  *   Pointer to the modify action.
2266  * @param[out] error
2267  *   Pointer to error structure.
2268  *
2269  * @return
2270  *   0 on success, a negative errno value otherwise and rte_errno is set.
2271  */
2272 static int
2273 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
2274                                    const struct rte_flow_action *action,
2275                                    struct rte_flow_error *error)
2276 {
2277         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
2278                 return rte_flow_error_set(error, EINVAL,
2279                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2280                                           NULL, "action configuration not set");
2281         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2282                 return rte_flow_error_set(error, EINVAL,
2283                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2284                                           "can't have encap action before"
2285                                           " modify action");
2286         return 0;
2287 }
2288
2289 /**
2290  * Validate the modify-header MAC address actions.
2291  *
2292  * @param[in] action_flags
2293  *   Holds the actions detected until now.
2294  * @param[in] action
2295  *   Pointer to the modify action.
2296  * @param[in] item_flags
2297  *   Holds the items detected.
2298  * @param[out] error
2299  *   Pointer to error structure.
2300  *
2301  * @return
2302  *   0 on success, a negative errno value otherwise and rte_errno is set.
2303  */
2304 static int
2305 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
2306                                    const struct rte_flow_action *action,
2307                                    const uint64_t item_flags,
2308                                    struct rte_flow_error *error)
2309 {
2310         int ret = 0;
2311
2312         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2313         if (!ret) {
2314                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
2315                         return rte_flow_error_set(error, EINVAL,
2316                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2317                                                   NULL,
2318                                                   "no L2 item in pattern");
2319         }
2320         return ret;
2321 }
2322
2323 /**
2324  * Validate the modify-header IPv4 address actions.
2325  *
2326  * @param[in] action_flags
2327  *   Holds the actions detected until now.
2328  * @param[in] action
2329  *   Pointer to the modify action.
2330  * @param[in] item_flags
2331  *   Holds the items detected.
2332  * @param[out] error
2333  *   Pointer to error structure.
2334  *
2335  * @return
2336  *   0 on success, a negative errno value otherwise and rte_errno is set.
2337  */
2338 static int
2339 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
2340                                     const struct rte_flow_action *action,
2341                                     const uint64_t item_flags,
2342                                     struct rte_flow_error *error)
2343 {
2344         int ret = 0;
2345
2346         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2347         if (!ret) {
2348                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
2349                         return rte_flow_error_set(error, EINVAL,
2350                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2351                                                   NULL,
2352                                                   "no ipv4 item in pattern");
2353         }
2354         return ret;
2355 }
2356
2357 /**
2358  * Validate the modify-header IPv6 address actions.
2359  *
2360  * @param[in] action_flags
2361  *   Holds the actions detected until now.
2362  * @param[in] action
2363  *   Pointer to the modify action.
2364  * @param[in] item_flags
2365  *   Holds the items detected.
2366  * @param[out] error
2367  *   Pointer to error structure.
2368  *
2369  * @return
2370  *   0 on success, a negative errno value otherwise and rte_errno is set.
2371  */
2372 static int
2373 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
2374                                     const struct rte_flow_action *action,
2375                                     const uint64_t item_flags,
2376                                     struct rte_flow_error *error)
2377 {
2378         int ret = 0;
2379
2380         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2381         if (!ret) {
2382                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
2383                         return rte_flow_error_set(error, EINVAL,
2384                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2385                                                   NULL,
2386                                                   "no ipv6 item in pattern");
2387         }
2388         return ret;
2389 }
2390
2391 /**
2392  * Validate the modify-header TP actions.
2393  *
2394  * @param[in] action_flags
2395  *   Holds the actions detected until now.
2396  * @param[in] action
2397  *   Pointer to the modify action.
2398  * @param[in] item_flags
2399  *   Holds the items detected.
2400  * @param[out] error
2401  *   Pointer to error structure.
2402  *
2403  * @return
2404  *   0 on success, a negative errno value otherwise and rte_errno is set.
2405  */
2406 static int
2407 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
2408                                   const struct rte_flow_action *action,
2409                                   const uint64_t item_flags,
2410                                   struct rte_flow_error *error)
2411 {
2412         int ret = 0;
2413
2414         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2415         if (!ret) {
2416                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
2417                         return rte_flow_error_set(error, EINVAL,
2418                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2419                                                   NULL, "no transport layer "
2420                                                   "in pattern");
2421         }
2422         return ret;
2423 }
2424
2425 /**
2426  * Validate the modify-header actions of increment/decrement
2427  * TCP Sequence-number.
2428  *
2429  * @param[in] action_flags
2430  *   Holds the actions detected until now.
2431  * @param[in] action
2432  *   Pointer to the modify action.
2433  * @param[in] item_flags
2434  *   Holds the items detected.
2435  * @param[out] error
2436  *   Pointer to error structure.
2437  *
2438  * @return
2439  *   0 on success, a negative errno value otherwise and rte_errno is set.
2440  */
2441 static int
2442 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
2443                                        const struct rte_flow_action *action,
2444                                        const uint64_t item_flags,
2445                                        struct rte_flow_error *error)
2446 {
2447         int ret = 0;
2448
2449         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2450         if (!ret) {
2451                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2452                         return rte_flow_error_set(error, EINVAL,
2453                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2454                                                   NULL, "no TCP item in"
2455                                                   " pattern");
2456                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
2457                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
2458                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
2459                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
2460                         return rte_flow_error_set(error, EINVAL,
2461                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2462                                                   NULL,
2463                                                   "cannot decrease and increase"
2464                                                   " TCP sequence number"
2465                                                   " at the same time");
2466         }
2467         return ret;
2468 }
2469
2470 /**
2471  * Validate the modify-header actions of increment/decrement
2472  * TCP Acknowledgment number.
2473  *
2474  * @param[in] action_flags
2475  *   Holds the actions detected until now.
2476  * @param[in] action
2477  *   Pointer to the modify action.
2478  * @param[in] item_flags
2479  *   Holds the items detected.
2480  * @param[out] error
2481  *   Pointer to error structure.
2482  *
2483  * @return
2484  *   0 on success, a negative errno value otherwise and rte_errno is set.
2485  */
2486 static int
2487 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
2488                                        const struct rte_flow_action *action,
2489                                        const uint64_t item_flags,
2490                                        struct rte_flow_error *error)
2491 {
2492         int ret = 0;
2493
2494         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2495         if (!ret) {
2496                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2497                         return rte_flow_error_set(error, EINVAL,
2498                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2499                                                   NULL, "no TCP item in"
2500                                                   " pattern");
2501                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
2502                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
2503                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
2504                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
2505                         return rte_flow_error_set(error, EINVAL,
2506                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2507                                                   NULL,
2508                                                   "cannot decrease and increase"
2509                                                   " TCP acknowledgment number"
2510                                                   " at the same time");
2511         }
2512         return ret;
2513 }
2514
2515 /**
2516  * Validate the modify-header TTL actions.
2517  *
2518  * @param[in] action_flags
2519  *   Holds the actions detected until now.
2520  * @param[in] action
2521  *   Pointer to the modify action.
2522  * @param[in] item_flags
2523  *   Holds the items detected.
2524  * @param[out] error
2525  *   Pointer to error structure.
2526  *
2527  * @return
2528  *   0 on success, a negative errno value otherwise and rte_errno is set.
2529  */
2530 static int
2531 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
2532                                    const struct rte_flow_action *action,
2533                                    const uint64_t item_flags,
2534                                    struct rte_flow_error *error)
2535 {
2536         int ret = 0;
2537
2538         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2539         if (!ret) {
2540                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
2541                         return rte_flow_error_set(error, EINVAL,
2542                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2543                                                   NULL,
2544                                                   "no IP protocol in pattern");
2545         }
2546         return ret;
2547 }
2548
2549 /**
2550  * Validate jump action.
2551  *
2552  * @param[in] action
2553  *   Pointer to the jump action.
2554  * @param[in] action_flags
2555  *   Holds the actions detected until now.
2556  * @param[in] attributes
2557  *   Pointer to flow attributes
2558  * @param[in] external
2559  *   Action belongs to flow rule created by request external to PMD.
2560  * @param[out] error
2561  *   Pointer to error structure.
2562  *
2563  * @return
2564  *   0 on success, a negative errno value otherwise and rte_errno is set.
2565  */
2566 static int
2567 flow_dv_validate_action_jump(const struct rte_flow_action *action,
2568                              uint64_t action_flags,
2569                              const struct rte_flow_attr *attributes,
2570                              bool external, struct rte_flow_error *error)
2571 {
2572         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
2573                                                     MLX5_MAX_TABLES;
2574         uint32_t target_group, table;
2575         int ret = 0;
2576
2577         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2578                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2579                 return rte_flow_error_set(error, EINVAL,
2580                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2581                                           "can't have 2 fate actions in"
2582                                           " same flow");
2583         if (!action->conf)
2584                 return rte_flow_error_set(error, EINVAL,
2585                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2586                                           NULL, "action configuration not set");
2587         target_group =
2588                 ((const struct rte_flow_action_jump *)action->conf)->group;
2589         ret = mlx5_flow_group_to_table(attributes, external, target_group,
2590                                        &table, error);
2591         if (ret)
2592                 return ret;
2593         if (table >= max_group)
2594                 return rte_flow_error_set(error, EINVAL,
2595                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2596                                           "target group index out of range");
2597         if (attributes->group >= target_group)
2598                 return rte_flow_error_set(error, EINVAL,
2599                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2600                                           "target group must be higher than"
2601                                           " the current flow group");
2602         return 0;
2603 }
2604
2605 /*
2606  * Validate the port_id action.
2607  *
2608  * @param[in] dev
2609  *   Pointer to rte_eth_dev structure.
2610  * @param[in] action_flags
2611  *   Bit-fields that holds the actions detected until now.
2612  * @param[in] action
2613  *   Port_id RTE action structure.
2614  * @param[in] attr
2615  *   Attributes of flow that includes this action.
2616  * @param[out] error
2617  *   Pointer to error structure.
2618  *
2619  * @return
2620  *   0 on success, a negative errno value otherwise and rte_errno is set.
2621  */
2622 static int
2623 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2624                                 uint64_t action_flags,
2625                                 const struct rte_flow_action *action,
2626                                 const struct rte_flow_attr *attr,
2627                                 struct rte_flow_error *error)
2628 {
2629         const struct rte_flow_action_port_id *port_id;
2630         struct mlx5_priv *act_priv;
2631         struct mlx5_priv *dev_priv;
2632         uint16_t port;
2633
2634         if (!attr->transfer)
2635                 return rte_flow_error_set(error, ENOTSUP,
2636                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2637                                           NULL,
2638                                           "port id action is valid in transfer"
2639                                           " mode only");
2640         if (!action || !action->conf)
2641                 return rte_flow_error_set(error, ENOTSUP,
2642                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2643                                           NULL,
2644                                           "port id action parameters must be"
2645                                           " specified");
2646         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2647                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2648                 return rte_flow_error_set(error, EINVAL,
2649                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2650                                           "can have only one fate actions in"
2651                                           " a flow");
2652         dev_priv = mlx5_dev_to_eswitch_info(dev);
2653         if (!dev_priv)
2654                 return rte_flow_error_set(error, rte_errno,
2655                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2656                                           NULL,
2657                                           "failed to obtain E-Switch info");
2658         port_id = action->conf;
2659         port = port_id->original ? dev->data->port_id : port_id->id;
2660         act_priv = mlx5_port_to_eswitch_info(port);
2661         if (!act_priv)
2662                 return rte_flow_error_set
2663                                 (error, rte_errno,
2664                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2665                                  "failed to obtain E-Switch port id for port");
2666         if (act_priv->domain_id != dev_priv->domain_id)
2667                 return rte_flow_error_set
2668                                 (error, EINVAL,
2669                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2670                                  "port does not belong to"
2671                                  " E-Switch being configured");
2672         return 0;
2673 }
2674
2675 /**
2676  * Find existing modify-header resource or create and register a new one.
2677  *
2678  * @param dev[in, out]
2679  *   Pointer to rte_eth_dev structure.
2680  * @param[in, out] resource
2681  *   Pointer to modify-header resource.
2682  * @parm[in, out] dev_flow
2683  *   Pointer to the dev_flow.
2684  * @param[out] error
2685  *   pointer to error structure.
2686  *
2687  * @return
2688  *   0 on success otherwise -errno and errno is set.
2689  */
2690 static int
2691 flow_dv_modify_hdr_resource_register
2692                         (struct rte_eth_dev *dev,
2693                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2694                          struct mlx5_flow *dev_flow,
2695                          struct rte_flow_error *error)
2696 {
2697         struct mlx5_priv *priv = dev->data->dev_private;
2698         struct mlx5_ibv_shared *sh = priv->sh;
2699         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2700         struct mlx5dv_dr_domain *ns;
2701
2702         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2703                 ns = sh->fdb_domain;
2704         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2705                 ns = sh->tx_domain;
2706         else
2707                 ns = sh->rx_domain;
2708         resource->flags =
2709                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2710         /* Lookup a matching resource from cache. */
2711         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2712                 if (resource->ft_type == cache_resource->ft_type &&
2713                     resource->actions_num == cache_resource->actions_num &&
2714                     resource->flags == cache_resource->flags &&
2715                     !memcmp((const void *)resource->actions,
2716                             (const void *)cache_resource->actions,
2717                             (resource->actions_num *
2718                                             sizeof(resource->actions[0])))) {
2719                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2720                                 (void *)cache_resource,
2721                                 rte_atomic32_read(&cache_resource->refcnt));
2722                         rte_atomic32_inc(&cache_resource->refcnt);
2723                         dev_flow->dv.modify_hdr = cache_resource;
2724                         return 0;
2725                 }
2726         }
2727         /* Register new modify-header resource. */
2728         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2729         if (!cache_resource)
2730                 return rte_flow_error_set(error, ENOMEM,
2731                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2732                                           "cannot allocate resource memory");
2733         *cache_resource = *resource;
2734         cache_resource->verbs_action =
2735                 mlx5_glue->dv_create_flow_action_modify_header
2736                                         (sh->ctx, cache_resource->ft_type,
2737                                          ns, cache_resource->flags,
2738                                          cache_resource->actions_num *
2739                                          sizeof(cache_resource->actions[0]),
2740                                          (uint64_t *)cache_resource->actions);
2741         if (!cache_resource->verbs_action) {
2742                 rte_free(cache_resource);
2743                 return rte_flow_error_set(error, ENOMEM,
2744                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2745                                           NULL, "cannot create action");
2746         }
2747         rte_atomic32_init(&cache_resource->refcnt);
2748         rte_atomic32_inc(&cache_resource->refcnt);
2749         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2750         dev_flow->dv.modify_hdr = cache_resource;
2751         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2752                 (void *)cache_resource,
2753                 rte_atomic32_read(&cache_resource->refcnt));
2754         return 0;
2755 }
2756
2757 #define MLX5_CNT_CONTAINER_RESIZE 64
2758
2759 /**
2760  * Get or create a flow counter.
2761  *
2762  * @param[in] dev
2763  *   Pointer to the Ethernet device structure.
2764  * @param[in] shared
2765  *   Indicate if this counter is shared with other flows.
2766  * @param[in] id
2767  *   Counter identifier.
2768  *
2769  * @return
2770  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2771  */
2772 static struct mlx5_flow_counter *
2773 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2774                                uint32_t id)
2775 {
2776         struct mlx5_priv *priv = dev->data->dev_private;
2777         struct mlx5_flow_counter *cnt = NULL;
2778         struct mlx5_devx_obj *dcs = NULL;
2779
2780         if (!priv->config.devx) {
2781                 rte_errno = ENOTSUP;
2782                 return NULL;
2783         }
2784         if (shared) {
2785                 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2786                         if (cnt->shared && cnt->id == id) {
2787                                 cnt->ref_cnt++;
2788                                 return cnt;
2789                         }
2790                 }
2791         }
2792         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2793         if (!dcs)
2794                 return NULL;
2795         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2796         if (!cnt) {
2797                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2798                 rte_errno = ENOMEM;
2799                 return NULL;
2800         }
2801         struct mlx5_flow_counter tmpl = {
2802                 .shared = shared,
2803                 .ref_cnt = 1,
2804                 .id = id,
2805                 .dcs = dcs,
2806         };
2807         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2808         if (!tmpl.action) {
2809                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2810                 rte_errno = errno;
2811                 rte_free(cnt);
2812                 return NULL;
2813         }
2814         *cnt = tmpl;
2815         TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2816         return cnt;
2817 }
2818
2819 /**
2820  * Release a flow counter.
2821  *
2822  * @param[in] dev
2823  *   Pointer to the Ethernet device structure.
2824  * @param[in] counter
2825  *   Pointer to the counter handler.
2826  */
2827 static void
2828 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2829                                  struct mlx5_flow_counter *counter)
2830 {
2831         struct mlx5_priv *priv = dev->data->dev_private;
2832
2833         if (!counter)
2834                 return;
2835         if (--counter->ref_cnt == 0) {
2836                 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2837                 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2838                 rte_free(counter);
2839         }
2840 }
2841
2842 /**
2843  * Query a devx flow counter.
2844  *
2845  * @param[in] dev
2846  *   Pointer to the Ethernet device structure.
2847  * @param[in] cnt
2848  *   Pointer to the flow counter.
2849  * @param[out] pkts
2850  *   The statistics value of packets.
2851  * @param[out] bytes
2852  *   The statistics value of bytes.
2853  *
2854  * @return
2855  *   0 on success, otherwise a negative errno value and rte_errno is set.
2856  */
2857 static inline int
2858 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2859                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2860                      uint64_t *bytes)
2861 {
2862         return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2863                                                 0, NULL, NULL, 0);
2864 }
2865
2866 /**
2867  * Get a pool by a counter.
2868  *
2869  * @param[in] cnt
2870  *   Pointer to the counter.
2871  *
2872  * @return
2873  *   The counter pool.
2874  */
2875 static struct mlx5_flow_counter_pool *
2876 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2877 {
2878         if (!cnt->batch) {
2879                 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2880                 return (struct mlx5_flow_counter_pool *)cnt - 1;
2881         }
2882         return cnt->pool;
2883 }
2884
2885 /**
2886  * Get a pool by devx counter ID.
2887  *
2888  * @param[in] cont
2889  *   Pointer to the counter container.
2890  * @param[in] id
2891  *   The counter devx ID.
2892  *
2893  * @return
2894  *   The counter pool pointer if exists, NULL otherwise,
2895  */
2896 static struct mlx5_flow_counter_pool *
2897 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2898 {
2899         struct mlx5_flow_counter_pool *pool;
2900
2901         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2902                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2903                                 MLX5_COUNTERS_PER_POOL;
2904
2905                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2906                         return pool;
2907         };
2908         return NULL;
2909 }
2910
2911 /**
2912  * Allocate a new memory for the counter values wrapped by all the needed
2913  * management.
2914  *
2915  * @param[in] dev
2916  *   Pointer to the Ethernet device structure.
2917  * @param[in] raws_n
2918  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2919  *
2920  * @return
2921  *   The new memory management pointer on success, otherwise NULL and rte_errno
2922  *   is set.
2923  */
2924 static struct mlx5_counter_stats_mem_mng *
2925 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2926 {
2927         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2928                                         (dev->data->dev_private))->sh;
2929         struct mlx5_devx_mkey_attr mkey_attr;
2930         struct mlx5_counter_stats_mem_mng *mem_mng;
2931         volatile struct flow_counter_stats *raw_data;
2932         int size = (sizeof(struct flow_counter_stats) *
2933                         MLX5_COUNTERS_PER_POOL +
2934                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2935                         sizeof(struct mlx5_counter_stats_mem_mng);
2936         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2937         int i;
2938
2939         if (!mem) {
2940                 rte_errno = ENOMEM;
2941                 return NULL;
2942         }
2943         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2944         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2945         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2946                                                  IBV_ACCESS_LOCAL_WRITE);
2947         if (!mem_mng->umem) {
2948                 rte_errno = errno;
2949                 rte_free(mem);
2950                 return NULL;
2951         }
2952         mkey_attr.addr = (uintptr_t)mem;
2953         mkey_attr.size = size;
2954         mkey_attr.umem_id = mem_mng->umem->umem_id;
2955         mkey_attr.pd = sh->pdn;
2956         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2957         if (!mem_mng->dm) {
2958                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
2959                 rte_errno = errno;
2960                 rte_free(mem);
2961                 return NULL;
2962         }
2963         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
2964         raw_data = (volatile struct flow_counter_stats *)mem;
2965         for (i = 0; i < raws_n; ++i) {
2966                 mem_mng->raws[i].mem_mng = mem_mng;
2967                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
2968         }
2969         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
2970         return mem_mng;
2971 }
2972
2973 /**
2974  * Resize a counter container.
2975  *
2976  * @param[in] dev
2977  *   Pointer to the Ethernet device structure.
2978  * @param[in] batch
2979  *   Whether the pool is for counter that was allocated by batch command.
2980  *
2981  * @return
2982  *   The new container pointer on success, otherwise NULL and rte_errno is set.
2983  */
2984 static struct mlx5_pools_container *
2985 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
2986 {
2987         struct mlx5_priv *priv = dev->data->dev_private;
2988         struct mlx5_pools_container *cont =
2989                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
2990         struct mlx5_pools_container *new_cont =
2991                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
2992         struct mlx5_counter_stats_mem_mng *mem_mng;
2993         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
2994         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
2995         int i;
2996
2997         if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
2998                 /* The last resize still hasn't detected by the host thread. */
2999                 rte_errno = EAGAIN;
3000                 return NULL;
3001         }
3002         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
3003         if (!new_cont->pools) {
3004                 rte_errno = ENOMEM;
3005                 return NULL;
3006         }
3007         if (cont->n)
3008                 memcpy(new_cont->pools, cont->pools, cont->n *
3009                        sizeof(struct mlx5_flow_counter_pool *));
3010         mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
3011                 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
3012         if (!mem_mng) {
3013                 rte_free(new_cont->pools);
3014                 return NULL;
3015         }
3016         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
3017                 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
3018                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
3019                                  i, next);
3020         new_cont->n = resize;
3021         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
3022         TAILQ_INIT(&new_cont->pool_list);
3023         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
3024         new_cont->init_mem_mng = mem_mng;
3025         rte_cio_wmb();
3026          /* Flip the master container. */
3027         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
3028         return new_cont;
3029 }
3030
3031 /**
3032  * Query a devx flow counter.
3033  *
3034  * @param[in] dev
3035  *   Pointer to the Ethernet device structure.
3036  * @param[in] cnt
3037  *   Pointer to the flow counter.
3038  * @param[out] pkts
3039  *   The statistics value of packets.
3040  * @param[out] bytes
3041  *   The statistics value of bytes.
3042  *
3043  * @return
3044  *   0 on success, otherwise a negative errno value and rte_errno is set.
3045  */
3046 static inline int
3047 _flow_dv_query_count(struct rte_eth_dev *dev,
3048                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
3049                      uint64_t *bytes)
3050 {
3051         struct mlx5_priv *priv = dev->data->dev_private;
3052         struct mlx5_flow_counter_pool *pool =
3053                         flow_dv_counter_pool_get(cnt);
3054         int offset = cnt - &pool->counters_raw[0];
3055
3056         if (priv->counter_fallback)
3057                 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
3058
3059         rte_spinlock_lock(&pool->sl);
3060         /*
3061          * The single counters allocation may allocate smaller ID than the
3062          * current allocated in parallel to the host reading.
3063          * In this case the new counter values must be reported as 0.
3064          */
3065         if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
3066                 *pkts = 0;
3067                 *bytes = 0;
3068         } else {
3069                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
3070                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
3071         }
3072         rte_spinlock_unlock(&pool->sl);
3073         return 0;
3074 }
3075
3076 /**
3077  * Create and initialize a new counter pool.
3078  *
3079  * @param[in] dev
3080  *   Pointer to the Ethernet device structure.
3081  * @param[out] dcs
3082  *   The devX counter handle.
3083  * @param[in] batch
3084  *   Whether the pool is for counter that was allocated by batch command.
3085  *
3086  * @return
3087  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
3088  */
3089 static struct mlx5_flow_counter_pool *
3090 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
3091                     uint32_t batch)
3092 {
3093         struct mlx5_priv *priv = dev->data->dev_private;
3094         struct mlx5_flow_counter_pool *pool;
3095         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3096                                                                0);
3097         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
3098         uint32_t size;
3099
3100         if (cont->n == n_valid) {
3101                 cont = flow_dv_container_resize(dev, batch);
3102                 if (!cont)
3103                         return NULL;
3104         }
3105         size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
3106                         sizeof(struct mlx5_flow_counter);
3107         pool = rte_calloc(__func__, 1, size, 0);
3108         if (!pool) {
3109                 rte_errno = ENOMEM;
3110                 return NULL;
3111         }
3112         pool->min_dcs = dcs;
3113         pool->raw = cont->init_mem_mng->raws + n_valid %
3114                                                      MLX5_CNT_CONTAINER_RESIZE;
3115         pool->raw_hw = NULL;
3116         rte_spinlock_init(&pool->sl);
3117         /*
3118          * The generation of the new allocated counters in this pool is 0, 2 in
3119          * the pool generation makes all the counters valid for allocation.
3120          */
3121         rte_atomic64_set(&pool->query_gen, 0x2);
3122         TAILQ_INIT(&pool->counters);
3123         TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3124         cont->pools[n_valid] = pool;
3125         /* Pool initialization must be updated before host thread access. */
3126         rte_cio_wmb();
3127         rte_atomic16_add(&cont->n_valid, 1);
3128         return pool;
3129 }
3130
3131 /**
3132  * Prepare a new counter and/or a new counter pool.
3133  *
3134  * @param[in] dev
3135  *   Pointer to the Ethernet device structure.
3136  * @param[out] cnt_free
3137  *   Where to put the pointer of a new counter.
3138  * @param[in] batch
3139  *   Whether the pool is for counter that was allocated by batch command.
3140  *
3141  * @return
3142  *   The free counter pool pointer and @p cnt_free is set on success,
3143  *   NULL otherwise and rte_errno is set.
3144  */
3145 static struct mlx5_flow_counter_pool *
3146 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
3147                              struct mlx5_flow_counter **cnt_free,
3148                              uint32_t batch)
3149 {
3150         struct mlx5_priv *priv = dev->data->dev_private;
3151         struct mlx5_flow_counter_pool *pool;
3152         struct mlx5_devx_obj *dcs = NULL;
3153         struct mlx5_flow_counter *cnt;
3154         uint32_t i;
3155
3156         if (!batch) {
3157                 /* bulk_bitmap must be 0 for single counter allocation. */
3158                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
3159                 if (!dcs)
3160                         return NULL;
3161                 pool = flow_dv_find_pool_by_id
3162                         (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
3163                 if (!pool) {
3164                         pool = flow_dv_pool_create(dev, dcs, batch);
3165                         if (!pool) {
3166                                 mlx5_devx_cmd_destroy(dcs);
3167                                 return NULL;
3168                         }
3169                 } else if (dcs->id < pool->min_dcs->id) {
3170                         rte_atomic64_set(&pool->a64_dcs,
3171                                          (int64_t)(uintptr_t)dcs);
3172                 }
3173                 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
3174                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3175                 cnt->dcs = dcs;
3176                 *cnt_free = cnt;
3177                 return pool;
3178         }
3179         /* bulk_bitmap is in 128 counters units. */
3180         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
3181                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
3182         if (!dcs) {
3183                 rte_errno = ENODATA;
3184                 return NULL;
3185         }
3186         pool = flow_dv_pool_create(dev, dcs, batch);
3187         if (!pool) {
3188                 mlx5_devx_cmd_destroy(dcs);
3189                 return NULL;
3190         }
3191         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3192                 cnt = &pool->counters_raw[i];
3193                 cnt->pool = pool;
3194                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3195         }
3196         *cnt_free = &pool->counters_raw[0];
3197         return pool;
3198 }
3199
3200 /**
3201  * Search for existed shared counter.
3202  *
3203  * @param[in] cont
3204  *   Pointer to the relevant counter pool container.
3205  * @param[in] id
3206  *   The shared counter ID to search.
3207  *
3208  * @return
3209  *   NULL if not existed, otherwise pointer to the shared counter.
3210  */
3211 static struct mlx5_flow_counter *
3212 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
3213                               uint32_t id)
3214 {
3215         static struct mlx5_flow_counter *cnt;
3216         struct mlx5_flow_counter_pool *pool;
3217         int i;
3218
3219         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3220                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3221                         cnt = &pool->counters_raw[i];
3222                         if (cnt->ref_cnt && cnt->shared && cnt->id == id)
3223                                 return cnt;
3224                 }
3225         }
3226         return NULL;
3227 }
3228
3229 /**
3230  * Allocate a flow counter.
3231  *
3232  * @param[in] dev
3233  *   Pointer to the Ethernet device structure.
3234  * @param[in] shared
3235  *   Indicate if this counter is shared with other flows.
3236  * @param[in] id
3237  *   Counter identifier.
3238  * @param[in] group
3239  *   Counter flow group.
3240  *
3241  * @return
3242  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
3243  */
3244 static struct mlx5_flow_counter *
3245 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
3246                       uint16_t group)
3247 {
3248         struct mlx5_priv *priv = dev->data->dev_private;
3249         struct mlx5_flow_counter_pool *pool = NULL;
3250         struct mlx5_flow_counter *cnt_free = NULL;
3251         /*
3252          * Currently group 0 flow counter cannot be assigned to a flow if it is
3253          * not the first one in the batch counter allocation, so it is better
3254          * to allocate counters one by one for these flows in a separate
3255          * container.
3256          * A counter can be shared between different groups so need to take
3257          * shared counters from the single container.
3258          */
3259         uint32_t batch = (group && !shared) ? 1 : 0;
3260         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3261                                                                0);
3262
3263         if (priv->counter_fallback)
3264                 return flow_dv_counter_alloc_fallback(dev, shared, id);
3265         if (!priv->config.devx) {
3266                 rte_errno = ENOTSUP;
3267                 return NULL;
3268         }
3269         if (shared) {
3270                 cnt_free = flow_dv_counter_shared_search(cont, id);
3271                 if (cnt_free) {
3272                         if (cnt_free->ref_cnt + 1 == 0) {
3273                                 rte_errno = E2BIG;
3274                                 return NULL;
3275                         }
3276                         cnt_free->ref_cnt++;
3277                         return cnt_free;
3278                 }
3279         }
3280         /* Pools which has a free counters are in the start. */
3281         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3282                 /*
3283                  * The free counter reset values must be updated between the
3284                  * counter release to the counter allocation, so, at least one
3285                  * query must be done in this time. ensure it by saving the
3286                  * query generation in the release time.
3287                  * The free list is sorted according to the generation - so if
3288                  * the first one is not updated, all the others are not
3289                  * updated too.
3290                  */
3291                 cnt_free = TAILQ_FIRST(&pool->counters);
3292                 if (cnt_free && cnt_free->query_gen + 1 <
3293                     rte_atomic64_read(&pool->query_gen))
3294                         break;
3295                 cnt_free = NULL;
3296         }
3297         if (!cnt_free) {
3298                 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
3299                 if (!pool)
3300                         return NULL;
3301         }
3302         cnt_free->batch = batch;
3303         /* Create a DV counter action only in the first time usage. */
3304         if (!cnt_free->action) {
3305                 uint16_t offset;
3306                 struct mlx5_devx_obj *dcs;
3307
3308                 if (batch) {
3309                         offset = cnt_free - &pool->counters_raw[0];
3310                         dcs = pool->min_dcs;
3311                 } else {
3312                         offset = 0;
3313                         dcs = cnt_free->dcs;
3314                 }
3315                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
3316                                         (dcs->obj, offset);
3317                 if (!cnt_free->action) {
3318                         rte_errno = errno;
3319                         return NULL;
3320                 }
3321         }
3322         /* Update the counter reset values. */
3323         if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
3324                                  &cnt_free->bytes))
3325                 return NULL;
3326         cnt_free->shared = shared;
3327         cnt_free->ref_cnt = 1;
3328         cnt_free->id = id;
3329         if (!priv->sh->cmng.query_thread_on)
3330                 /* Start the asynchronous batch query by the host thread. */
3331                 mlx5_set_query_alarm(priv->sh);
3332         TAILQ_REMOVE(&pool->counters, cnt_free, next);
3333         if (TAILQ_EMPTY(&pool->counters)) {
3334                 /* Move the pool to the end of the container pool list. */
3335                 TAILQ_REMOVE(&cont->pool_list, pool, next);
3336                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3337         }
3338         return cnt_free;
3339 }
3340
3341 /**
3342  * Release a flow counter.
3343  *
3344  * @param[in] dev
3345  *   Pointer to the Ethernet device structure.
3346  * @param[in] counter
3347  *   Pointer to the counter handler.
3348  */
3349 static void
3350 flow_dv_counter_release(struct rte_eth_dev *dev,
3351                         struct mlx5_flow_counter *counter)
3352 {
3353         struct mlx5_priv *priv = dev->data->dev_private;
3354
3355         if (!counter)
3356                 return;
3357         if (priv->counter_fallback) {
3358                 flow_dv_counter_release_fallback(dev, counter);
3359                 return;
3360         }
3361         if (--counter->ref_cnt == 0) {
3362                 struct mlx5_flow_counter_pool *pool =
3363                                 flow_dv_counter_pool_get(counter);
3364
3365                 /* Put the counter in the end - the last updated one. */
3366                 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
3367                 counter->query_gen = rte_atomic64_read(&pool->query_gen);
3368         }
3369 }
3370
3371 /**
3372  * Verify the @p attributes will be correctly understood by the NIC and store
3373  * them in the @p flow if everything is correct.
3374  *
3375  * @param[in] dev
3376  *   Pointer to dev struct.
3377  * @param[in] attributes
3378  *   Pointer to flow attributes
3379  * @param[in] external
3380  *   This flow rule is created by request external to PMD.
3381  * @param[out] error
3382  *   Pointer to error structure.
3383  *
3384  * @return
3385  *   0 on success, a negative errno value otherwise and rte_errno is set.
3386  */
3387 static int
3388 flow_dv_validate_attributes(struct rte_eth_dev *dev,
3389                             const struct rte_flow_attr *attributes,
3390                             bool external __rte_unused,
3391                             struct rte_flow_error *error)
3392 {
3393         struct mlx5_priv *priv = dev->data->dev_private;
3394         uint32_t priority_max = priv->config.flow_prio - 1;
3395
3396 #ifndef HAVE_MLX5DV_DR
3397         if (attributes->group)
3398                 return rte_flow_error_set(error, ENOTSUP,
3399                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3400                                           NULL,
3401                                           "groups are not supported");
3402 #else
3403         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
3404                                                     MLX5_MAX_TABLES;
3405         uint32_t table;
3406         int ret;
3407
3408         ret = mlx5_flow_group_to_table(attributes, external,
3409                                        attributes->group,
3410                                        &table, error);
3411         if (ret)
3412                 return ret;
3413         if (table >= max_group)
3414                 return rte_flow_error_set(error, EINVAL,
3415                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
3416                                           "group index out of range");
3417 #endif
3418         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
3419             attributes->priority >= priority_max)
3420                 return rte_flow_error_set(error, ENOTSUP,
3421                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
3422                                           NULL,
3423                                           "priority out of range");
3424         if (attributes->transfer) {
3425                 if (!priv->config.dv_esw_en)
3426                         return rte_flow_error_set
3427                                 (error, ENOTSUP,
3428                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3429                                  "E-Switch dr is not supported");
3430                 if (!(priv->representor || priv->master))
3431                         return rte_flow_error_set
3432                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3433                                  NULL, "E-Switch configuration can only be"
3434                                  " done by a master or a representor device");
3435                 if (attributes->egress)
3436                         return rte_flow_error_set
3437                                 (error, ENOTSUP,
3438                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
3439                                  "egress is not supported");
3440         }
3441         if (!(attributes->egress ^ attributes->ingress))
3442                 return rte_flow_error_set(error, ENOTSUP,
3443                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
3444                                           "must specify exactly one of "
3445                                           "ingress or egress");
3446         return 0;
3447 }
3448
3449 /**
3450  * Internal validation function. For validating both actions and items.
3451  *
3452  * @param[in] dev
3453  *   Pointer to the rte_eth_dev structure.
3454  * @param[in] attr
3455  *   Pointer to the flow attributes.
3456  * @param[in] items
3457  *   Pointer to the list of items.
3458  * @param[in] actions
3459  *   Pointer to the list of actions.
3460  * @param[in] external
3461  *   This flow rule is created by request external to PMD.
3462  * @param[out] error
3463  *   Pointer to the error structure.
3464  *
3465  * @return
3466  *   0 on success, a negative errno value otherwise and rte_errno is set.
3467  */
3468 static int
3469 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3470                  const struct rte_flow_item items[],
3471                  const struct rte_flow_action actions[],
3472                  bool external, struct rte_flow_error *error)
3473 {
3474         int ret;
3475         uint64_t action_flags = 0;
3476         uint64_t item_flags = 0;
3477         uint64_t last_item = 0;
3478         uint8_t next_protocol = 0xff;
3479         uint16_t ether_type = 0;
3480         int actions_n = 0;
3481         const struct rte_flow_item *gre_item = NULL;
3482         struct rte_flow_item_tcp nic_tcp_mask = {
3483                 .hdr = {
3484                         .tcp_flags = 0xFF,
3485                         .src_port = RTE_BE16(UINT16_MAX),
3486                         .dst_port = RTE_BE16(UINT16_MAX),
3487                 }
3488         };
3489
3490         if (items == NULL)
3491                 return -1;
3492         ret = flow_dv_validate_attributes(dev, attr, external, error);
3493         if (ret < 0)
3494                 return ret;
3495         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3496                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3497                 int type = items->type;
3498
3499                 switch (type) {
3500                 case RTE_FLOW_ITEM_TYPE_VOID:
3501                         break;
3502                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3503                         ret = flow_dv_validate_item_port_id
3504                                         (dev, items, attr, item_flags, error);
3505                         if (ret < 0)
3506                                 return ret;
3507                         last_item = MLX5_FLOW_ITEM_PORT_ID;
3508                         break;
3509                 case RTE_FLOW_ITEM_TYPE_ETH:
3510                         ret = mlx5_flow_validate_item_eth(items, item_flags,
3511                                                           error);
3512                         if (ret < 0)
3513                                 return ret;
3514                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3515                                              MLX5_FLOW_LAYER_OUTER_L2;
3516                         if (items->mask != NULL && items->spec != NULL) {
3517                                 ether_type =
3518                                         ((const struct rte_flow_item_eth *)
3519                                          items->spec)->type;
3520                                 ether_type &=
3521                                         ((const struct rte_flow_item_eth *)
3522                                          items->mask)->type;
3523                                 ether_type = rte_be_to_cpu_16(ether_type);
3524                         } else {
3525                                 ether_type = 0;
3526                         }
3527                         break;
3528                 case RTE_FLOW_ITEM_TYPE_VLAN:
3529                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
3530                                                            dev, error);
3531                         if (ret < 0)
3532                                 return ret;
3533                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
3534                                              MLX5_FLOW_LAYER_OUTER_VLAN;
3535                         if (items->mask != NULL && items->spec != NULL) {
3536                                 ether_type =
3537                                         ((const struct rte_flow_item_vlan *)
3538                                          items->spec)->inner_type;
3539                                 ether_type &=
3540                                         ((const struct rte_flow_item_vlan *)
3541                                          items->mask)->inner_type;
3542                                 ether_type = rte_be_to_cpu_16(ether_type);
3543                         } else {
3544                                 ether_type = 0;
3545                         }
3546                         break;
3547                 case RTE_FLOW_ITEM_TYPE_IPV4:
3548                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3549                                                   &item_flags, &tunnel);
3550                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
3551                                                            last_item,
3552                                                            ether_type, NULL,
3553                                                            error);
3554                         if (ret < 0)
3555                                 return ret;
3556                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3557                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3558                         if (items->mask != NULL &&
3559                             ((const struct rte_flow_item_ipv4 *)
3560                              items->mask)->hdr.next_proto_id) {
3561                                 next_protocol =
3562                                         ((const struct rte_flow_item_ipv4 *)
3563                                          (items->spec))->hdr.next_proto_id;
3564                                 next_protocol &=
3565                                         ((const struct rte_flow_item_ipv4 *)
3566                                          (items->mask))->hdr.next_proto_id;
3567                         } else {
3568                                 /* Reset for inner layer. */
3569                                 next_protocol = 0xff;
3570                         }
3571                         break;
3572                 case RTE_FLOW_ITEM_TYPE_IPV6:
3573                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3574                                                   &item_flags, &tunnel);
3575                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
3576                                                            last_item,
3577                                                            ether_type, NULL,
3578                                                            error);
3579                         if (ret < 0)
3580                                 return ret;
3581                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3582                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3583                         if (items->mask != NULL &&
3584                             ((const struct rte_flow_item_ipv6 *)
3585                              items->mask)->hdr.proto) {
3586                                 next_protocol =
3587                                         ((const struct rte_flow_item_ipv6 *)
3588                                          items->spec)->hdr.proto;
3589                                 next_protocol &=
3590                                         ((const struct rte_flow_item_ipv6 *)
3591                                          items->mask)->hdr.proto;
3592                         } else {
3593                                 /* Reset for inner layer. */
3594                                 next_protocol = 0xff;
3595                         }
3596                         break;
3597                 case RTE_FLOW_ITEM_TYPE_TCP:
3598                         ret = mlx5_flow_validate_item_tcp
3599                                                 (items, item_flags,
3600                                                  next_protocol,
3601                                                  &nic_tcp_mask,
3602                                                  error);
3603                         if (ret < 0)
3604                                 return ret;
3605                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3606                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3607                         break;
3608                 case RTE_FLOW_ITEM_TYPE_UDP:
3609                         ret = mlx5_flow_validate_item_udp(items, item_flags,
3610                                                           next_protocol,
3611                                                           error);
3612                         if (ret < 0)
3613                                 return ret;
3614                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3615                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3616                         break;
3617                 case RTE_FLOW_ITEM_TYPE_GRE:
3618                         ret = mlx5_flow_validate_item_gre(items, item_flags,
3619                                                           next_protocol, error);
3620                         if (ret < 0)
3621                                 return ret;
3622                         gre_item = items;
3623                         last_item = MLX5_FLOW_LAYER_GRE;
3624                         break;
3625                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3626                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
3627                                                             next_protocol,
3628                                                             error);
3629                         if (ret < 0)
3630                                 return ret;
3631                         last_item = MLX5_FLOW_LAYER_NVGRE;
3632                         break;
3633                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
3634                         ret = mlx5_flow_validate_item_gre_key
3635                                 (items, item_flags, gre_item, error);
3636                         if (ret < 0)
3637                                 return ret;
3638                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
3639                         break;
3640                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3641                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
3642                                                             error);
3643                         if (ret < 0)
3644                                 return ret;
3645                         last_item = MLX5_FLOW_LAYER_VXLAN;
3646                         break;
3647                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3648                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
3649                                                                 item_flags, dev,
3650                                                                 error);
3651                         if (ret < 0)
3652                                 return ret;
3653                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3654                         break;
3655                 case RTE_FLOW_ITEM_TYPE_GENEVE:
3656                         ret = mlx5_flow_validate_item_geneve(items,
3657                                                              item_flags, dev,
3658                                                              error);
3659                         if (ret < 0)
3660                                 return ret;
3661                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3662                         break;
3663                 case RTE_FLOW_ITEM_TYPE_MPLS:
3664                         ret = mlx5_flow_validate_item_mpls(dev, items,
3665                                                            item_flags,
3666                                                            last_item, error);
3667                         if (ret < 0)
3668                                 return ret;
3669                         last_item = MLX5_FLOW_LAYER_MPLS;
3670                         break;
3671                 case RTE_FLOW_ITEM_TYPE_META:
3672                         ret = flow_dv_validate_item_meta(dev, items, attr,
3673                                                          error);
3674                         if (ret < 0)
3675                                 return ret;
3676                         last_item = MLX5_FLOW_ITEM_METADATA;
3677                         break;
3678                 case RTE_FLOW_ITEM_TYPE_ICMP:
3679                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
3680                                                            next_protocol,
3681                                                            error);
3682                         if (ret < 0)
3683                                 return ret;
3684                         last_item = MLX5_FLOW_LAYER_ICMP;
3685                         break;
3686                 case RTE_FLOW_ITEM_TYPE_ICMP6:
3687                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3688                                                             next_protocol,
3689                                                             error);
3690                         if (ret < 0)
3691                                 return ret;
3692                         last_item = MLX5_FLOW_LAYER_ICMP6;
3693                         break;
3694                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
3695                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
3696                         break;
3697                 default:
3698                         return rte_flow_error_set(error, ENOTSUP,
3699                                                   RTE_FLOW_ERROR_TYPE_ITEM,
3700                                                   NULL, "item not supported");
3701                 }
3702                 item_flags |= last_item;
3703         }
3704         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3705                 int type = actions->type;
3706                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3707                         return rte_flow_error_set(error, ENOTSUP,
3708                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3709                                                   actions, "too many actions");
3710                 switch (type) {
3711                 case RTE_FLOW_ACTION_TYPE_VOID:
3712                         break;
3713                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3714                         ret = flow_dv_validate_action_port_id(dev,
3715                                                               action_flags,
3716                                                               actions,
3717                                                               attr,
3718                                                               error);
3719                         if (ret)
3720                                 return ret;
3721                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3722                         ++actions_n;
3723                         break;
3724                 case RTE_FLOW_ACTION_TYPE_FLAG:
3725                         ret = mlx5_flow_validate_action_flag(action_flags,
3726                                                              attr, error);
3727                         if (ret < 0)
3728                                 return ret;
3729                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3730                         ++actions_n;
3731                         break;
3732                 case RTE_FLOW_ACTION_TYPE_MARK:
3733                         ret = mlx5_flow_validate_action_mark(actions,
3734                                                              action_flags,
3735                                                              attr, error);
3736                         if (ret < 0)
3737                                 return ret;
3738                         action_flags |= MLX5_FLOW_ACTION_MARK;
3739                         ++actions_n;
3740                         break;
3741                 case RTE_FLOW_ACTION_TYPE_DROP:
3742                         ret = mlx5_flow_validate_action_drop(action_flags,
3743                                                              attr, error);
3744                         if (ret < 0)
3745                                 return ret;
3746                         action_flags |= MLX5_FLOW_ACTION_DROP;
3747                         ++actions_n;
3748                         break;
3749                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3750                         ret = mlx5_flow_validate_action_queue(actions,
3751                                                               action_flags, dev,
3752                                                               attr, error);
3753                         if (ret < 0)
3754                                 return ret;
3755                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3756                         ++actions_n;
3757                         break;
3758                 case RTE_FLOW_ACTION_TYPE_RSS:
3759                         ret = mlx5_flow_validate_action_rss(actions,
3760                                                             action_flags, dev,
3761                                                             attr, item_flags,
3762                                                             error);
3763                         if (ret < 0)
3764                                 return ret;
3765                         action_flags |= MLX5_FLOW_ACTION_RSS;
3766                         ++actions_n;
3767                         break;
3768                 case RTE_FLOW_ACTION_TYPE_COUNT:
3769                         ret = flow_dv_validate_action_count(dev, error);
3770                         if (ret < 0)
3771                                 return ret;
3772                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3773                         ++actions_n;
3774                         break;
3775                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3776                         if (flow_dv_validate_action_pop_vlan(dev,
3777                                                              action_flags,
3778                                                              actions,
3779                                                              item_flags, attr,
3780                                                              error))
3781                                 return -rte_errno;
3782                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
3783                         ++actions_n;
3784                         break;
3785                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3786                         ret = flow_dv_validate_action_push_vlan(action_flags,
3787                                                                 item_flags,
3788                                                                 actions, attr,
3789                                                                 error);
3790                         if (ret < 0)
3791                                 return ret;
3792                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
3793                         ++actions_n;
3794                         break;
3795                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3796                         ret = flow_dv_validate_action_set_vlan_pcp
3797                                                 (action_flags, actions, error);
3798                         if (ret < 0)
3799                                 return ret;
3800                         /* Count PCP with push_vlan command. */
3801                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
3802                         break;
3803                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3804                         ret = flow_dv_validate_action_set_vlan_vid
3805                                                 (item_flags, action_flags,
3806                                                  actions, error);
3807                         if (ret < 0)
3808                                 return ret;
3809                         /* Count VID with push_vlan command. */
3810                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
3811                         break;
3812                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3813                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3814                         ret = flow_dv_validate_action_l2_encap(action_flags,
3815                                                                actions, attr,
3816                                                                error);
3817                         if (ret < 0)
3818                                 return ret;
3819                         action_flags |= actions->type ==
3820                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3821                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3822                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3823                         ++actions_n;
3824                         break;
3825                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3826                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3827                         ret = flow_dv_validate_action_l2_decap(action_flags,
3828                                                                attr, error);
3829                         if (ret < 0)
3830                                 return ret;
3831                         action_flags |= actions->type ==
3832                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3833                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3834                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3835                         ++actions_n;
3836                         break;
3837                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3838                         ret = flow_dv_validate_action_raw_encap(action_flags,
3839                                                                 actions, attr,
3840                                                                 error);
3841                         if (ret < 0)
3842                                 return ret;
3843                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3844                         ++actions_n;
3845                         break;
3846                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3847                         ret = flow_dv_validate_action_raw_decap(action_flags,
3848                                                                 actions, attr,
3849                                                                 error);
3850                         if (ret < 0)
3851                                 return ret;
3852                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3853                         ++actions_n;
3854                         break;
3855                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3856                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3857                         ret = flow_dv_validate_action_modify_mac(action_flags,
3858                                                                  actions,
3859                                                                  item_flags,
3860                                                                  error);
3861                         if (ret < 0)
3862                                 return ret;
3863                         /* Count all modify-header actions as one action. */
3864                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3865                                 ++actions_n;
3866                         action_flags |= actions->type ==
3867                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3868                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
3869                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
3870                         break;
3871
3872                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3873                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3874                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
3875                                                                   actions,
3876                                                                   item_flags,
3877                                                                   error);
3878                         if (ret < 0)
3879                                 return ret;
3880                         /* Count all modify-header actions as one action. */
3881                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3882                                 ++actions_n;
3883                         action_flags |= actions->type ==
3884                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3885                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3886                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
3887                         break;
3888                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3889                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3890                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
3891                                                                   actions,
3892                                                                   item_flags,
3893                                                                   error);
3894                         if (ret < 0)
3895                                 return ret;
3896                         /* Count all modify-header actions as one action. */
3897                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3898                                 ++actions_n;
3899                         action_flags |= actions->type ==
3900                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3901                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3902                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
3903                         break;
3904                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3905                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3906                         ret = flow_dv_validate_action_modify_tp(action_flags,
3907                                                                 actions,
3908                                                                 item_flags,
3909                                                                 error);
3910                         if (ret < 0)
3911                                 return ret;
3912                         /* Count all modify-header actions as one action. */
3913                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3914                                 ++actions_n;
3915                         action_flags |= actions->type ==
3916                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3917                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
3918                                                 MLX5_FLOW_ACTION_SET_TP_DST;
3919                         break;
3920                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3921                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3922                         ret = flow_dv_validate_action_modify_ttl(action_flags,
3923                                                                  actions,
3924                                                                  item_flags,
3925                                                                  error);
3926                         if (ret < 0)
3927                                 return ret;
3928                         /* Count all modify-header actions as one action. */
3929                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3930                                 ++actions_n;
3931                         action_flags |= actions->type ==
3932                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
3933                                                 MLX5_FLOW_ACTION_SET_TTL :
3934                                                 MLX5_FLOW_ACTION_DEC_TTL;
3935                         break;
3936                 case RTE_FLOW_ACTION_TYPE_JUMP:
3937                         ret = flow_dv_validate_action_jump(actions,
3938                                                            action_flags,
3939                                                            attr, external,
3940                                                            error);
3941                         if (ret)
3942                                 return ret;
3943                         ++actions_n;
3944                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3945                         break;
3946                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3947                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3948                         ret = flow_dv_validate_action_modify_tcp_seq
3949                                                                 (action_flags,
3950                                                                  actions,
3951                                                                  item_flags,
3952                                                                  error);
3953                         if (ret < 0)
3954                                 return ret;
3955                         /* Count all modify-header actions as one action. */
3956                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3957                                 ++actions_n;
3958                         action_flags |= actions->type ==
3959                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
3960                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
3961                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
3962                         break;
3963                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
3964                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
3965                         ret = flow_dv_validate_action_modify_tcp_ack
3966                                                                 (action_flags,
3967                                                                  actions,
3968                                                                  item_flags,
3969                                                                  error);
3970                         if (ret < 0)
3971                                 return ret;
3972                         /* Count all modify-header actions as one action. */
3973                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3974                                 ++actions_n;
3975                         action_flags |= actions->type ==
3976                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
3977                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
3978                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
3979                         break;
3980                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
3981                         break;
3982                 default:
3983                         return rte_flow_error_set(error, ENOTSUP,
3984                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3985                                                   actions,
3986                                                   "action not supported");
3987                 }
3988         }
3989         if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
3990             (action_flags & MLX5_FLOW_VLAN_ACTIONS))
3991                 return rte_flow_error_set(error, ENOTSUP,
3992                                           RTE_FLOW_ERROR_TYPE_ACTION,
3993                                           actions,
3994                                           "can't have vxlan and vlan"
3995                                           " actions in the same rule");
3996         /* Eswitch has few restrictions on using items and actions */
3997         if (attr->transfer) {
3998                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3999                         return rte_flow_error_set(error, ENOTSUP,
4000                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4001                                                   NULL,
4002                                                   "unsupported action FLAG");
4003                 if (action_flags & MLX5_FLOW_ACTION_MARK)
4004                         return rte_flow_error_set(error, ENOTSUP,
4005                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4006                                                   NULL,
4007                                                   "unsupported action MARK");
4008                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
4009                         return rte_flow_error_set(error, ENOTSUP,
4010                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4011                                                   NULL,
4012                                                   "unsupported action QUEUE");
4013                 if (action_flags & MLX5_FLOW_ACTION_RSS)
4014                         return rte_flow_error_set(error, ENOTSUP,
4015                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4016                                                   NULL,
4017                                                   "unsupported action RSS");
4018                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4019                         return rte_flow_error_set(error, EINVAL,
4020                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4021                                                   actions,
4022                                                   "no fate action is found");
4023         } else {
4024                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
4025                         return rte_flow_error_set(error, EINVAL,
4026                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4027                                                   actions,
4028                                                   "no fate action is found");
4029         }
4030         return 0;
4031 }
4032
4033 /**
4034  * Internal preparation function. Allocates the DV flow size,
4035  * this size is constant.
4036  *
4037  * @param[in] attr
4038  *   Pointer to the flow attributes.
4039  * @param[in] items
4040  *   Pointer to the list of items.
4041  * @param[in] actions
4042  *   Pointer to the list of actions.
4043  * @param[out] error
4044  *   Pointer to the error structure.
4045  *
4046  * @return
4047  *   Pointer to mlx5_flow object on success,
4048  *   otherwise NULL and rte_errno is set.
4049  */
4050 static struct mlx5_flow *
4051 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
4052                 const struct rte_flow_item items[] __rte_unused,
4053                 const struct rte_flow_action actions[] __rte_unused,
4054                 struct rte_flow_error *error)
4055 {
4056         uint32_t size = sizeof(struct mlx5_flow);
4057         struct mlx5_flow *flow;
4058
4059         flow = rte_calloc(__func__, 1, size, 0);
4060         if (!flow) {
4061                 rte_flow_error_set(error, ENOMEM,
4062                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4063                                    "not enough memory to create flow");
4064                 return NULL;
4065         }
4066         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
4067         return flow;
4068 }
4069
4070 #ifndef NDEBUG
4071 /**
4072  * Sanity check for match mask and value. Similar to check_valid_spec() in
4073  * kernel driver. If unmasked bit is present in value, it returns failure.
4074  *
4075  * @param match_mask
4076  *   pointer to match mask buffer.
4077  * @param match_value
4078  *   pointer to match value buffer.
4079  *
4080  * @return
4081  *   0 if valid, -EINVAL otherwise.
4082  */
4083 static int
4084 flow_dv_check_valid_spec(void *match_mask, void *match_value)
4085 {
4086         uint8_t *m = match_mask;
4087         uint8_t *v = match_value;
4088         unsigned int i;
4089
4090         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
4091                 if (v[i] & ~m[i]) {
4092                         DRV_LOG(ERR,
4093                                 "match_value differs from match_criteria"
4094                                 " %p[%u] != %p[%u]",
4095                                 match_value, i, match_mask, i);
4096                         return -EINVAL;
4097                 }
4098         }
4099         return 0;
4100 }
4101 #endif
4102
4103 /**
4104  * Add Ethernet item to matcher and to the value.
4105  *
4106  * @param[in, out] matcher
4107  *   Flow matcher.
4108  * @param[in, out] key
4109  *   Flow matcher value.
4110  * @param[in] item
4111  *   Flow pattern to translate.
4112  * @param[in] inner
4113  *   Item is inner pattern.
4114  */
4115 static void
4116 flow_dv_translate_item_eth(void *matcher, void *key,
4117                            const struct rte_flow_item *item, int inner)
4118 {
4119         const struct rte_flow_item_eth *eth_m = item->mask;
4120         const struct rte_flow_item_eth *eth_v = item->spec;
4121         const struct rte_flow_item_eth nic_mask = {
4122                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4123                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4124                 .type = RTE_BE16(0xffff),
4125         };
4126         void *headers_m;
4127         void *headers_v;
4128         char *l24_v;
4129         unsigned int i;
4130
4131         if (!eth_v)
4132                 return;
4133         if (!eth_m)
4134                 eth_m = &nic_mask;
4135         if (inner) {
4136                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4137                                          inner_headers);
4138                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4139         } else {
4140                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4141                                          outer_headers);
4142                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4143         }
4144         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
4145                &eth_m->dst, sizeof(eth_m->dst));
4146         /* The value must be in the range of the mask. */
4147         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
4148         for (i = 0; i < sizeof(eth_m->dst); ++i)
4149                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
4150         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
4151                &eth_m->src, sizeof(eth_m->src));
4152         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
4153         /* The value must be in the range of the mask. */
4154         for (i = 0; i < sizeof(eth_m->dst); ++i)
4155                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
4156         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4157                  rte_be_to_cpu_16(eth_m->type));
4158         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
4159         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
4160 }
4161
4162 /**
4163  * Add VLAN item to matcher and to the value.
4164  *
4165  * @param[in, out] dev_flow
4166  *   Flow descriptor.
4167  * @param[in, out] matcher
4168  *   Flow matcher.
4169  * @param[in, out] key
4170  *   Flow matcher value.
4171  * @param[in] item
4172  *   Flow pattern to translate.
4173  * @param[in] inner
4174  *   Item is inner pattern.
4175  */
4176 static void
4177 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
4178                             void *matcher, void *key,
4179                             const struct rte_flow_item *item,
4180                             int inner)
4181 {
4182         const struct rte_flow_item_vlan *vlan_m = item->mask;
4183         const struct rte_flow_item_vlan *vlan_v = item->spec;
4184         void *headers_m;
4185         void *headers_v;
4186         uint16_t tci_m;
4187         uint16_t tci_v;
4188
4189         if (!vlan_v)
4190                 return;
4191         if (!vlan_m)
4192                 vlan_m = &rte_flow_item_vlan_mask;
4193         if (inner) {
4194                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4195                                          inner_headers);
4196                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4197         } else {
4198                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4199                                          outer_headers);
4200                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4201                 /*
4202                  * This is workaround, masks are not supported,
4203                  * and pre-validated.
4204                  */
4205                 dev_flow->dv.vf_vlan.tag =
4206                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
4207         }
4208         tci_m = rte_be_to_cpu_16(vlan_m->tci);
4209         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
4210         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
4211         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
4212         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
4213         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
4214         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
4215         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
4216         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
4217         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
4218         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4219                  rte_be_to_cpu_16(vlan_m->inner_type));
4220         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
4221                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
4222 }
4223
4224 /**
4225  * Add IPV4 item to matcher and to the value.
4226  *
4227  * @param[in, out] matcher
4228  *   Flow matcher.
4229  * @param[in, out] key
4230  *   Flow matcher value.
4231  * @param[in] item
4232  *   Flow pattern to translate.
4233  * @param[in] inner
4234  *   Item is inner pattern.
4235  * @param[in] group
4236  *   The group to insert the rule.
4237  */
4238 static void
4239 flow_dv_translate_item_ipv4(void *matcher, void *key,
4240                             const struct rte_flow_item *item,
4241                             int inner, uint32_t group)
4242 {
4243         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
4244         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
4245         const struct rte_flow_item_ipv4 nic_mask = {
4246                 .hdr = {
4247                         .src_addr = RTE_BE32(0xffffffff),
4248                         .dst_addr = RTE_BE32(0xffffffff),
4249                         .type_of_service = 0xff,
4250                         .next_proto_id = 0xff,
4251                 },
4252         };
4253         void *headers_m;
4254         void *headers_v;
4255         char *l24_m;
4256         char *l24_v;
4257         uint8_t tos;
4258
4259         if (inner) {
4260                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4261                                          inner_headers);
4262                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4263         } else {
4264                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4265                                          outer_headers);
4266                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4267         }
4268         if (group == 0)
4269                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4270         else
4271                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
4272         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
4273         if (!ipv4_v)
4274                 return;
4275         if (!ipv4_m)
4276                 ipv4_m = &nic_mask;
4277         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4278                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4279         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4280                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4281         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
4282         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
4283         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4284                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4285         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4286                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4287         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
4288         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
4289         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
4290         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
4291                  ipv4_m->hdr.type_of_service);
4292         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
4293         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
4294                  ipv4_m->hdr.type_of_service >> 2);
4295         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
4296         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4297                  ipv4_m->hdr.next_proto_id);
4298         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4299                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
4300 }
4301
4302 /**
4303  * Add IPV6 item to matcher and to the value.
4304  *
4305  * @param[in, out] matcher
4306  *   Flow matcher.
4307  * @param[in, out] key
4308  *   Flow matcher value.
4309  * @param[in] item
4310  *   Flow pattern to translate.
4311  * @param[in] inner
4312  *   Item is inner pattern.
4313  * @param[in] group
4314  *   The group to insert the rule.
4315  */
4316 static void
4317 flow_dv_translate_item_ipv6(void *matcher, void *key,
4318                             const struct rte_flow_item *item,
4319                             int inner, uint32_t group)
4320 {
4321         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
4322         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
4323         const struct rte_flow_item_ipv6 nic_mask = {
4324                 .hdr = {
4325                         .src_addr =
4326                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4327                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4328                         .dst_addr =
4329                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4330                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4331                         .vtc_flow = RTE_BE32(0xffffffff),
4332                         .proto = 0xff,
4333                         .hop_limits = 0xff,
4334                 },
4335         };
4336         void *headers_m;
4337         void *headers_v;
4338         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4339         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4340         char *l24_m;
4341         char *l24_v;
4342         uint32_t vtc_m;
4343         uint32_t vtc_v;
4344         int i;
4345         int size;
4346
4347         if (inner) {
4348                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4349                                          inner_headers);
4350                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4351         } else {
4352                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4353                                          outer_headers);
4354                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4355         }
4356         if (group == 0)
4357                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4358         else
4359                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
4360         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
4361         if (!ipv6_v)
4362                 return;
4363         if (!ipv6_m)
4364                 ipv6_m = &nic_mask;
4365         size = sizeof(ipv6_m->hdr.dst_addr);
4366         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4367                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4368         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4369                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4370         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
4371         for (i = 0; i < size; ++i)
4372                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
4373         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4374                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4375         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4376                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4377         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
4378         for (i = 0; i < size; ++i)
4379                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
4380         /* TOS. */
4381         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
4382         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
4383         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
4384         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
4385         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
4386         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
4387         /* Label. */
4388         if (inner) {
4389                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
4390                          vtc_m);
4391                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
4392                          vtc_v);
4393         } else {
4394                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
4395                          vtc_m);
4396                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
4397                          vtc_v);
4398         }
4399         /* Protocol. */
4400         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4401                  ipv6_m->hdr.proto);
4402         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4403                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
4404 }
4405
4406 /**
4407  * Add TCP item to matcher and to the value.
4408  *
4409  * @param[in, out] matcher
4410  *   Flow matcher.
4411  * @param[in, out] key
4412  *   Flow matcher value.
4413  * @param[in] item
4414  *   Flow pattern to translate.
4415  * @param[in] inner
4416  *   Item is inner pattern.
4417  */
4418 static void
4419 flow_dv_translate_item_tcp(void *matcher, void *key,
4420                            const struct rte_flow_item *item,
4421                            int inner)
4422 {
4423         const struct rte_flow_item_tcp *tcp_m = item->mask;
4424         const struct rte_flow_item_tcp *tcp_v = item->spec;
4425         void *headers_m;
4426         void *headers_v;
4427
4428         if (inner) {
4429                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4430                                          inner_headers);
4431                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4432         } else {
4433                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4434                                          outer_headers);
4435                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4436         }
4437         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4438         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
4439         if (!tcp_v)
4440                 return;
4441         if (!tcp_m)
4442                 tcp_m = &rte_flow_item_tcp_mask;
4443         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
4444                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
4445         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
4446                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
4447         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
4448                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
4449         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
4450                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
4451         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
4452                  tcp_m->hdr.tcp_flags);
4453         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
4454                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
4455 }
4456
4457 /**
4458  * Add UDP item to matcher and to the value.
4459  *
4460  * @param[in, out] matcher
4461  *   Flow matcher.
4462  * @param[in, out] key
4463  *   Flow matcher value.
4464  * @param[in] item
4465  *   Flow pattern to translate.
4466  * @param[in] inner
4467  *   Item is inner pattern.
4468  */
4469 static void
4470 flow_dv_translate_item_udp(void *matcher, void *key,
4471                            const struct rte_flow_item *item,
4472                            int inner)
4473 {
4474         const struct rte_flow_item_udp *udp_m = item->mask;
4475         const struct rte_flow_item_udp *udp_v = item->spec;
4476         void *headers_m;
4477         void *headers_v;
4478
4479         if (inner) {
4480                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4481                                          inner_headers);
4482                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4483         } else {
4484                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4485                                          outer_headers);
4486                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4487         }
4488         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4489         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
4490         if (!udp_v)
4491                 return;
4492         if (!udp_m)
4493                 udp_m = &rte_flow_item_udp_mask;
4494         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
4495                  rte_be_to_cpu_16(udp_m->hdr.src_port));
4496         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
4497                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
4498         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
4499                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
4500         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4501                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
4502 }
4503
4504 /**
4505  * Add GRE optional Key item to matcher and to the value.
4506  *
4507  * @param[in, out] matcher
4508  *   Flow matcher.
4509  * @param[in, out] key
4510  *   Flow matcher value.
4511  * @param[in] item
4512  *   Flow pattern to translate.
4513  * @param[in] inner
4514  *   Item is inner pattern.
4515  */
4516 static void
4517 flow_dv_translate_item_gre_key(void *matcher, void *key,
4518                                    const struct rte_flow_item *item)
4519 {
4520         const rte_be32_t *key_m = item->mask;
4521         const rte_be32_t *key_v = item->spec;
4522         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4523         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4524         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
4525
4526         if (!key_v)
4527                 return;
4528         if (!key_m)
4529                 key_m = &gre_key_default_mask;
4530         /* GRE K bit must be on and should already be validated */
4531         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
4532         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
4533         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
4534                  rte_be_to_cpu_32(*key_m) >> 8);
4535         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
4536                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
4537         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
4538                  rte_be_to_cpu_32(*key_m) & 0xFF);
4539         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
4540                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
4541 }
4542
4543 /**
4544  * Add GRE item to matcher and to the value.
4545  *
4546  * @param[in, out] matcher
4547  *   Flow matcher.
4548  * @param[in, out] key
4549  *   Flow matcher value.
4550  * @param[in] item
4551  *   Flow pattern to translate.
4552  * @param[in] inner
4553  *   Item is inner pattern.
4554  */
4555 static void
4556 flow_dv_translate_item_gre(void *matcher, void *key,
4557                            const struct rte_flow_item *item,
4558                            int inner)
4559 {
4560         const struct rte_flow_item_gre *gre_m = item->mask;
4561         const struct rte_flow_item_gre *gre_v = item->spec;
4562         void *headers_m;
4563         void *headers_v;
4564         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4565         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4566         struct {
4567                 union {
4568                         __extension__
4569                         struct {
4570                                 uint16_t version:3;
4571                                 uint16_t rsvd0:9;
4572                                 uint16_t s_present:1;
4573                                 uint16_t k_present:1;
4574                                 uint16_t rsvd_bit1:1;
4575                                 uint16_t c_present:1;
4576                         };
4577                         uint16_t value;
4578                 };
4579         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
4580
4581         if (inner) {
4582                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4583                                          inner_headers);
4584                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4585         } else {
4586                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4587                                          outer_headers);
4588                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4589         }
4590         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4591         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
4592         if (!gre_v)
4593                 return;
4594         if (!gre_m)
4595                 gre_m = &rte_flow_item_gre_mask;
4596         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
4597                  rte_be_to_cpu_16(gre_m->protocol));
4598         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4599                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
4600         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
4601         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
4602         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
4603                  gre_crks_rsvd0_ver_m.c_present);
4604         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
4605                  gre_crks_rsvd0_ver_v.c_present &
4606                  gre_crks_rsvd0_ver_m.c_present);
4607         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
4608                  gre_crks_rsvd0_ver_m.k_present);
4609         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
4610                  gre_crks_rsvd0_ver_v.k_present &
4611                  gre_crks_rsvd0_ver_m.k_present);
4612         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
4613                  gre_crks_rsvd0_ver_m.s_present);
4614         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
4615                  gre_crks_rsvd0_ver_v.s_present &
4616                  gre_crks_rsvd0_ver_m.s_present);
4617 }
4618
4619 /**
4620  * Add NVGRE item to matcher and to the value.
4621  *
4622  * @param[in, out] matcher
4623  *   Flow matcher.
4624  * @param[in, out] key
4625  *   Flow matcher value.
4626  * @param[in] item
4627  *   Flow pattern to translate.
4628  * @param[in] inner
4629  *   Item is inner pattern.
4630  */
4631 static void
4632 flow_dv_translate_item_nvgre(void *matcher, void *key,
4633                              const struct rte_flow_item *item,
4634                              int inner)
4635 {
4636         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
4637         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
4638         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4639         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4640         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
4641         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
4642         char *gre_key_m;
4643         char *gre_key_v;
4644         int size;
4645         int i;
4646
4647         /* For NVGRE, GRE header fields must be set with defined values. */
4648         const struct rte_flow_item_gre gre_spec = {
4649                 .c_rsvd0_ver = RTE_BE16(0x2000),
4650                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
4651         };
4652         const struct rte_flow_item_gre gre_mask = {
4653                 .c_rsvd0_ver = RTE_BE16(0xB000),
4654                 .protocol = RTE_BE16(UINT16_MAX),
4655         };
4656         const struct rte_flow_item gre_item = {
4657                 .spec = &gre_spec,
4658                 .mask = &gre_mask,
4659                 .last = NULL,
4660         };
4661         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
4662         if (!nvgre_v)
4663                 return;
4664         if (!nvgre_m)
4665                 nvgre_m = &rte_flow_item_nvgre_mask;
4666         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
4667         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
4668         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
4669         memcpy(gre_key_m, tni_flow_id_m, size);
4670         for (i = 0; i < size; ++i)
4671                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
4672 }
4673
4674 /**
4675  * Add VXLAN item to matcher and to the value.
4676  *
4677  * @param[in, out] matcher
4678  *   Flow matcher.
4679  * @param[in, out] key
4680  *   Flow matcher value.
4681  * @param[in] item
4682  *   Flow pattern to translate.
4683  * @param[in] inner
4684  *   Item is inner pattern.
4685  */
4686 static void
4687 flow_dv_translate_item_vxlan(void *matcher, void *key,
4688                              const struct rte_flow_item *item,
4689                              int inner)
4690 {
4691         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
4692         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
4693         void *headers_m;
4694         void *headers_v;
4695         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4696         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4697         char *vni_m;
4698         char *vni_v;
4699         uint16_t dport;
4700         int size;
4701         int i;
4702
4703         if (inner) {
4704                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4705                                          inner_headers);
4706                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4707         } else {
4708                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4709                                          outer_headers);
4710                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4711         }
4712         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
4713                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
4714         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4715                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4716                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4717         }
4718         if (!vxlan_v)
4719                 return;
4720         if (!vxlan_m)
4721                 vxlan_m = &rte_flow_item_vxlan_mask;
4722         size = sizeof(vxlan_m->vni);
4723         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
4724         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
4725         memcpy(vni_m, vxlan_m->vni, size);
4726         for (i = 0; i < size; ++i)
4727                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
4728 }
4729
4730 /**
4731  * Add Geneve item to matcher and to the value.
4732  *
4733  * @param[in, out] matcher
4734  *   Flow matcher.
4735  * @param[in, out] key
4736  *   Flow matcher value.
4737  * @param[in] item
4738  *   Flow pattern to translate.
4739  * @param[in] inner
4740  *   Item is inner pattern.
4741  */
4742
4743 static void
4744 flow_dv_translate_item_geneve(void *matcher, void *key,
4745                               const struct rte_flow_item *item, int inner)
4746 {
4747         const struct rte_flow_item_geneve *geneve_m = item->mask;
4748         const struct rte_flow_item_geneve *geneve_v = item->spec;
4749         void *headers_m;
4750         void *headers_v;
4751         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4752         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4753         uint16_t dport;
4754         uint16_t gbhdr_m;
4755         uint16_t gbhdr_v;
4756         char *vni_m;
4757         char *vni_v;
4758         size_t size, i;
4759
4760         if (inner) {
4761                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4762                                          inner_headers);
4763                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4764         } else {
4765                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4766                                          outer_headers);
4767                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4768         }
4769         dport = MLX5_UDP_PORT_GENEVE;
4770         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4771                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4772                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4773         }
4774         if (!geneve_v)
4775                 return;
4776         if (!geneve_m)
4777                 geneve_m = &rte_flow_item_geneve_mask;
4778         size = sizeof(geneve_m->vni);
4779         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
4780         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
4781         memcpy(vni_m, geneve_m->vni, size);
4782         for (i = 0; i < size; ++i)
4783                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
4784         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
4785                  rte_be_to_cpu_16(geneve_m->protocol));
4786         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
4787                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
4788         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
4789         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
4790         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
4791                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4792         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
4793                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4794         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
4795                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4796         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
4797                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
4798                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4799 }
4800
4801 /**
4802  * Add MPLS item to matcher and to the value.
4803  *
4804  * @param[in, out] matcher
4805  *   Flow matcher.
4806  * @param[in, out] key
4807  *   Flow matcher value.
4808  * @param[in] item
4809  *   Flow pattern to translate.
4810  * @param[in] prev_layer
4811  *   The protocol layer indicated in previous item.
4812  * @param[in] inner
4813  *   Item is inner pattern.
4814  */
4815 static void
4816 flow_dv_translate_item_mpls(void *matcher, void *key,
4817                             const struct rte_flow_item *item,
4818                             uint64_t prev_layer,
4819                             int inner)
4820 {
4821         const uint32_t *in_mpls_m = item->mask;
4822         const uint32_t *in_mpls_v = item->spec;
4823         uint32_t *out_mpls_m = 0;
4824         uint32_t *out_mpls_v = 0;
4825         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4826         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4827         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4828                                      misc_parameters_2);
4829         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4830         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4831         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4832
4833         switch (prev_layer) {
4834         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4835                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4836                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4837                          MLX5_UDP_PORT_MPLS);
4838                 break;
4839         case MLX5_FLOW_LAYER_GRE:
4840                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4841                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4842                          RTE_ETHER_TYPE_MPLS);
4843                 break;
4844         default:
4845                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4846                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4847                          IPPROTO_MPLS);
4848                 break;
4849         }
4850         if (!in_mpls_v)
4851                 return;
4852         if (!in_mpls_m)
4853                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4854         switch (prev_layer) {
4855         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4856                 out_mpls_m =
4857                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4858                                                  outer_first_mpls_over_udp);
4859                 out_mpls_v =
4860                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4861                                                  outer_first_mpls_over_udp);
4862                 break;
4863         case MLX5_FLOW_LAYER_GRE:
4864                 out_mpls_m =
4865                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4866                                                  outer_first_mpls_over_gre);
4867                 out_mpls_v =
4868                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4869                                                  outer_first_mpls_over_gre);
4870                 break;
4871         default:
4872                 /* Inner MPLS not over GRE is not supported. */
4873                 if (!inner) {
4874                         out_mpls_m =
4875                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4876                                                          misc2_m,
4877                                                          outer_first_mpls);
4878                         out_mpls_v =
4879                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4880                                                          misc2_v,
4881                                                          outer_first_mpls);
4882                 }
4883                 break;
4884         }
4885         if (out_mpls_m && out_mpls_v) {
4886                 *out_mpls_m = *in_mpls_m;
4887                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4888         }
4889 }
4890
4891 /**
4892  * Add META item to matcher
4893  *
4894  * @param[in, out] matcher
4895  *   Flow matcher.
4896  * @param[in, out] key
4897  *   Flow matcher value.
4898  * @param[in] item
4899  *   Flow pattern to translate.
4900  * @param[in] inner
4901  *   Item is inner pattern.
4902  */
4903 static void
4904 flow_dv_translate_item_meta(void *matcher, void *key,
4905                             const struct rte_flow_item *item)
4906 {
4907         const struct rte_flow_item_meta *meta_m;
4908         const struct rte_flow_item_meta *meta_v;
4909         void *misc2_m =
4910                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4911         void *misc2_v =
4912                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4913
4914         meta_m = (const void *)item->mask;
4915         if (!meta_m)
4916                 meta_m = &rte_flow_item_meta_mask;
4917         meta_v = (const void *)item->spec;
4918         if (meta_v) {
4919                 MLX5_SET(fte_match_set_misc2, misc2_m,
4920                          metadata_reg_a, meta_m->data);
4921                 MLX5_SET(fte_match_set_misc2, misc2_v,
4922                          metadata_reg_a, meta_v->data & meta_m->data);
4923         }
4924 }
4925
4926 /**
4927  * Add vport metadata Reg C0 item to matcher
4928  *
4929  * @param[in, out] matcher
4930  *   Flow matcher.
4931  * @param[in, out] key
4932  *   Flow matcher value.
4933  * @param[in] reg
4934  *   Flow pattern to translate.
4935  */
4936 static void
4937 flow_dv_translate_item_meta_vport(void *matcher, void *key,
4938                                   uint32_t value, uint32_t mask)
4939 {
4940         void *misc2_m =
4941                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4942         void *misc2_v =
4943                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4944
4945         MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, mask);
4946         MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, value);
4947 }
4948
4949 /**
4950  * Add tag item to matcher
4951  *
4952  * @param[in, out] matcher
4953  *   Flow matcher.
4954  * @param[in, out] key
4955  *   Flow matcher value.
4956  * @param[in] item
4957  *   Flow pattern to translate.
4958  */
4959 static void
4960 flow_dv_translate_item_tag(void *matcher, void *key,
4961                            const struct rte_flow_item *item)
4962 {
4963         void *misc2_m =
4964                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4965         void *misc2_v =
4966                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4967         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
4968         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
4969         enum modify_reg reg = tag_v->id;
4970         rte_be32_t value = tag_v->data;
4971         rte_be32_t mask = tag_m->data;
4972
4973         switch (reg) {
4974         case REG_A:
4975                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
4976                                 rte_be_to_cpu_32(mask));
4977                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
4978                                 rte_be_to_cpu_32(value));
4979                 break;
4980         case REG_B:
4981                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b,
4982                                  rte_be_to_cpu_32(mask));
4983                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b,
4984                                 rte_be_to_cpu_32(value));
4985                 break;
4986         case REG_C_0:
4987                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0,
4988                                  rte_be_to_cpu_32(mask));
4989                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0,
4990                                 rte_be_to_cpu_32(value));
4991                 break;
4992         case REG_C_1:
4993                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1,
4994                                  rte_be_to_cpu_32(mask));
4995                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1,
4996                                 rte_be_to_cpu_32(value));
4997                 break;
4998         case REG_C_2:
4999                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2,
5000                                  rte_be_to_cpu_32(mask));
5001                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2,
5002                                 rte_be_to_cpu_32(value));
5003                 break;
5004         case REG_C_3:
5005                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3,
5006                                  rte_be_to_cpu_32(mask));
5007                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3,
5008                                 rte_be_to_cpu_32(value));
5009                 break;
5010         case REG_C_4:
5011                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4,
5012                                  rte_be_to_cpu_32(mask));
5013                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4,
5014                                 rte_be_to_cpu_32(value));
5015                 break;
5016         case REG_C_5:
5017                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5,
5018                                  rte_be_to_cpu_32(mask));
5019                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5,
5020                                 rte_be_to_cpu_32(value));
5021                 break;
5022         case REG_C_6:
5023                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6,
5024                                  rte_be_to_cpu_32(mask));
5025                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6,
5026                                 rte_be_to_cpu_32(value));
5027                 break;
5028         case REG_C_7:
5029                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7,
5030                                  rte_be_to_cpu_32(mask));
5031                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7,
5032                                 rte_be_to_cpu_32(value));
5033                 break;
5034         }
5035 }
5036
5037 /**
5038  * Add source vport match to the specified matcher.
5039  *
5040  * @param[in, out] matcher
5041  *   Flow matcher.
5042  * @param[in, out] key
5043  *   Flow matcher value.
5044  * @param[in] port
5045  *   Source vport value to match
5046  * @param[in] mask
5047  *   Mask
5048  */
5049 static void
5050 flow_dv_translate_item_source_vport(void *matcher, void *key,
5051                                     int16_t port, uint16_t mask)
5052 {
5053         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5054         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5055
5056         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
5057         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
5058 }
5059
5060 /**
5061  * Translate port-id item to eswitch match on  port-id.
5062  *
5063  * @param[in] dev
5064  *   The devich to configure through.
5065  * @param[in, out] matcher
5066  *   Flow matcher.
5067  * @param[in, out] key
5068  *   Flow matcher value.
5069  * @param[in] item
5070  *   Flow pattern to translate.
5071  *
5072  * @return
5073  *   0 on success, a negative errno value otherwise.
5074  */
5075 static int
5076 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
5077                                void *key, const struct rte_flow_item *item)
5078 {
5079         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
5080         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
5081         struct mlx5_priv *priv;
5082         uint16_t mask, id;
5083
5084         mask = pid_m ? pid_m->id : 0xffff;
5085         id = pid_v ? pid_v->id : dev->data->port_id;
5086         priv = mlx5_port_to_eswitch_info(id);
5087         if (!priv)
5088                 return -rte_errno;
5089         /* Translate to vport field or to metadata, depending on mode. */
5090         if (priv->vport_meta_mask)
5091                 flow_dv_translate_item_meta_vport(matcher, key,
5092                                                   priv->vport_meta_tag,
5093                                                   priv->vport_meta_mask);
5094         else
5095                 flow_dv_translate_item_source_vport(matcher, key,
5096                                                     priv->vport_id, mask);
5097         return 0;
5098 }
5099
5100 /**
5101  * Add ICMP6 item to matcher and to the value.
5102  *
5103  * @param[in, out] matcher
5104  *   Flow matcher.
5105  * @param[in, out] key
5106  *   Flow matcher value.
5107  * @param[in] item
5108  *   Flow pattern to translate.
5109  * @param[in] inner
5110  *   Item is inner pattern.
5111  */
5112 static void
5113 flow_dv_translate_item_icmp6(void *matcher, void *key,
5114                               const struct rte_flow_item *item,
5115                               int inner)
5116 {
5117         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
5118         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
5119         void *headers_m;
5120         void *headers_v;
5121         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5122                                      misc_parameters_3);
5123         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5124         if (inner) {
5125                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5126                                          inner_headers);
5127                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5128         } else {
5129                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5130                                          outer_headers);
5131                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5132         }
5133         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5134         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
5135         if (!icmp6_v)
5136                 return;
5137         if (!icmp6_m)
5138                 icmp6_m = &rte_flow_item_icmp6_mask;
5139         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
5140         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
5141                  icmp6_v->type & icmp6_m->type);
5142         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
5143         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
5144                  icmp6_v->code & icmp6_m->code);
5145 }
5146
5147 /**
5148  * Add ICMP item to matcher and to the value.
5149  *
5150  * @param[in, out] matcher
5151  *   Flow matcher.
5152  * @param[in, out] key
5153  *   Flow matcher value.
5154  * @param[in] item
5155  *   Flow pattern to translate.
5156  * @param[in] inner
5157  *   Item is inner pattern.
5158  */
5159 static void
5160 flow_dv_translate_item_icmp(void *matcher, void *key,
5161                             const struct rte_flow_item *item,
5162                             int inner)
5163 {
5164         const struct rte_flow_item_icmp *icmp_m = item->mask;
5165         const struct rte_flow_item_icmp *icmp_v = item->spec;
5166         void *headers_m;
5167         void *headers_v;
5168         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5169                                      misc_parameters_3);
5170         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5171         if (inner) {
5172                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5173                                          inner_headers);
5174                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5175         } else {
5176                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5177                                          outer_headers);
5178                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5179         }
5180         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5181         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
5182         if (!icmp_v)
5183                 return;
5184         if (!icmp_m)
5185                 icmp_m = &rte_flow_item_icmp_mask;
5186         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
5187                  icmp_m->hdr.icmp_type);
5188         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
5189                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
5190         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
5191                  icmp_m->hdr.icmp_code);
5192         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
5193                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
5194 }
5195
5196 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
5197
5198 #define HEADER_IS_ZERO(match_criteria, headers)                              \
5199         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
5200                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
5201
5202 /**
5203  * Calculate flow matcher enable bitmap.
5204  *
5205  * @param match_criteria
5206  *   Pointer to flow matcher criteria.
5207  *
5208  * @return
5209  *   Bitmap of enabled fields.
5210  */
5211 static uint8_t
5212 flow_dv_matcher_enable(uint32_t *match_criteria)
5213 {
5214         uint8_t match_criteria_enable;
5215
5216         match_criteria_enable =
5217                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
5218                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
5219         match_criteria_enable |=
5220                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
5221                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
5222         match_criteria_enable |=
5223                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
5224                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
5225         match_criteria_enable |=
5226                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
5227                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
5228         match_criteria_enable |=
5229                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
5230                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
5231         return match_criteria_enable;
5232 }
5233
5234
5235 /**
5236  * Get a flow table.
5237  *
5238  * @param dev[in, out]
5239  *   Pointer to rte_eth_dev structure.
5240  * @param[in] table_id
5241  *   Table id to use.
5242  * @param[in] egress
5243  *   Direction of the table.
5244  * @param[in] transfer
5245  *   E-Switch or NIC flow.
5246  * @param[out] error
5247  *   pointer to error structure.
5248  *
5249  * @return
5250  *   Returns tables resource based on the index, NULL in case of failed.
5251  */
5252 static struct mlx5_flow_tbl_resource *
5253 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
5254                          uint32_t table_id, uint8_t egress,
5255                          uint8_t transfer,
5256                          struct rte_flow_error *error)
5257 {
5258         struct mlx5_priv *priv = dev->data->dev_private;
5259         struct mlx5_ibv_shared *sh = priv->sh;
5260         struct mlx5_flow_tbl_resource *tbl;
5261
5262 #ifdef HAVE_MLX5DV_DR
5263         if (transfer) {
5264                 tbl = &sh->fdb_tbl[table_id];
5265                 if (!tbl->obj)
5266                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5267                                 (sh->fdb_domain, table_id);
5268         } else if (egress) {
5269                 tbl = &sh->tx_tbl[table_id];
5270                 if (!tbl->obj)
5271                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5272                                 (sh->tx_domain, table_id);
5273         } else {
5274                 tbl = &sh->rx_tbl[table_id];
5275                 if (!tbl->obj)
5276                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5277                                 (sh->rx_domain, table_id);
5278         }
5279         if (!tbl->obj) {
5280                 rte_flow_error_set(error, ENOMEM,
5281                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5282                                    NULL, "cannot create table");
5283                 return NULL;
5284         }
5285         rte_atomic32_inc(&tbl->refcnt);
5286         return tbl;
5287 #else
5288         (void)error;
5289         (void)tbl;
5290         if (transfer)
5291                 return &sh->fdb_tbl[table_id];
5292         else if (egress)
5293                 return &sh->tx_tbl[table_id];
5294         else
5295                 return &sh->rx_tbl[table_id];
5296 #endif
5297 }
5298
5299 /**
5300  * Release a flow table.
5301  *
5302  * @param[in] tbl
5303  *   Table resource to be released.
5304  *
5305  * @return
5306  *   Returns 0 if table was released, else return 1;
5307  */
5308 static int
5309 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
5310 {
5311         if (!tbl)
5312                 return 0;
5313         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
5314                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
5315                 tbl->obj = NULL;
5316                 return 0;
5317         }
5318         return 1;
5319 }
5320
5321 /**
5322  * Register the flow matcher.
5323  *
5324  * @param dev[in, out]
5325  *   Pointer to rte_eth_dev structure.
5326  * @param[in, out] matcher
5327  *   Pointer to flow matcher.
5328  * @parm[in, out] dev_flow
5329  *   Pointer to the dev_flow.
5330  * @param[out] error
5331  *   pointer to error structure.
5332  *
5333  * @return
5334  *   0 on success otherwise -errno and errno is set.
5335  */
5336 static int
5337 flow_dv_matcher_register(struct rte_eth_dev *dev,
5338                          struct mlx5_flow_dv_matcher *matcher,
5339                          struct mlx5_flow *dev_flow,
5340                          struct rte_flow_error *error)
5341 {
5342         struct mlx5_priv *priv = dev->data->dev_private;
5343         struct mlx5_ibv_shared *sh = priv->sh;
5344         struct mlx5_flow_dv_matcher *cache_matcher;
5345         struct mlx5dv_flow_matcher_attr dv_attr = {
5346                 .type = IBV_FLOW_ATTR_NORMAL,
5347                 .match_mask = (void *)&matcher->mask,
5348         };
5349         struct mlx5_flow_tbl_resource *tbl = NULL;
5350
5351         /* Lookup from cache. */
5352         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
5353                 if (matcher->crc == cache_matcher->crc &&
5354                     matcher->priority == cache_matcher->priority &&
5355                     matcher->egress == cache_matcher->egress &&
5356                     matcher->group == cache_matcher->group &&
5357                     matcher->transfer == cache_matcher->transfer &&
5358                     !memcmp((const void *)matcher->mask.buf,
5359                             (const void *)cache_matcher->mask.buf,
5360                             cache_matcher->mask.size)) {
5361                         DRV_LOG(DEBUG,
5362                                 "priority %hd use %s matcher %p: refcnt %d++",
5363                                 cache_matcher->priority,
5364                                 cache_matcher->egress ? "tx" : "rx",
5365                                 (void *)cache_matcher,
5366                                 rte_atomic32_read(&cache_matcher->refcnt));
5367                         rte_atomic32_inc(&cache_matcher->refcnt);
5368                         dev_flow->dv.matcher = cache_matcher;
5369                         return 0;
5370                 }
5371         }
5372         /* Register new matcher. */
5373         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
5374         if (!cache_matcher)
5375                 return rte_flow_error_set(error, ENOMEM,
5376                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5377                                           "cannot allocate matcher memory");
5378         tbl = flow_dv_tbl_resource_get(dev, matcher->group,
5379                                        matcher->egress, matcher->transfer,
5380                                        error);
5381         if (!tbl) {
5382                 rte_free(cache_matcher);
5383                 return rte_flow_error_set(error, ENOMEM,
5384                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5385                                           NULL, "cannot create table");
5386         }
5387         *cache_matcher = *matcher;
5388         dv_attr.match_criteria_enable =
5389                 flow_dv_matcher_enable(cache_matcher->mask.buf);
5390         dv_attr.priority = matcher->priority;
5391         if (matcher->egress)
5392                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
5393         cache_matcher->matcher_object =
5394                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
5395         if (!cache_matcher->matcher_object) {
5396                 rte_free(cache_matcher);
5397 #ifdef HAVE_MLX5DV_DR
5398                 flow_dv_tbl_resource_release(tbl);
5399 #endif
5400                 return rte_flow_error_set(error, ENOMEM,
5401                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5402                                           NULL, "cannot create matcher");
5403         }
5404         rte_atomic32_inc(&cache_matcher->refcnt);
5405         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
5406         dev_flow->dv.matcher = cache_matcher;
5407         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
5408                 cache_matcher->priority,
5409                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
5410                 rte_atomic32_read(&cache_matcher->refcnt));
5411         rte_atomic32_inc(&tbl->refcnt);
5412         return 0;
5413 }
5414
5415 /**
5416  * Find existing tag resource or create and register a new one.
5417  *
5418  * @param dev[in, out]
5419  *   Pointer to rte_eth_dev structure.
5420  * @param[in, out] resource
5421  *   Pointer to tag resource.
5422  * @parm[in, out] dev_flow
5423  *   Pointer to the dev_flow.
5424  * @param[out] error
5425  *   pointer to error structure.
5426  *
5427  * @return
5428  *   0 on success otherwise -errno and errno is set.
5429  */
5430 static int
5431 flow_dv_tag_resource_register
5432                         (struct rte_eth_dev *dev,
5433                          struct mlx5_flow_dv_tag_resource *resource,
5434                          struct mlx5_flow *dev_flow,
5435                          struct rte_flow_error *error)
5436 {
5437         struct mlx5_priv *priv = dev->data->dev_private;
5438         struct mlx5_ibv_shared *sh = priv->sh;
5439         struct mlx5_flow_dv_tag_resource *cache_resource;
5440
5441         /* Lookup a matching resource from cache. */
5442         LIST_FOREACH(cache_resource, &sh->tags, next) {
5443                 if (resource->tag == cache_resource->tag) {
5444                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
5445                                 (void *)cache_resource,
5446                                 rte_atomic32_read(&cache_resource->refcnt));
5447                         rte_atomic32_inc(&cache_resource->refcnt);
5448                         dev_flow->flow->tag_resource = cache_resource;
5449                         return 0;
5450                 }
5451         }
5452         /* Register new  resource. */
5453         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
5454         if (!cache_resource)
5455                 return rte_flow_error_set(error, ENOMEM,
5456                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5457                                           "cannot allocate resource memory");
5458         *cache_resource = *resource;
5459         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
5460                 (resource->tag);
5461         if (!cache_resource->action) {
5462                 rte_free(cache_resource);
5463                 return rte_flow_error_set(error, ENOMEM,
5464                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5465                                           NULL, "cannot create action");
5466         }
5467         rte_atomic32_init(&cache_resource->refcnt);
5468         rte_atomic32_inc(&cache_resource->refcnt);
5469         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
5470         dev_flow->flow->tag_resource = cache_resource;
5471         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
5472                 (void *)cache_resource,
5473                 rte_atomic32_read(&cache_resource->refcnt));
5474         return 0;
5475 }
5476
5477 /**
5478  * Release the tag.
5479  *
5480  * @param dev
5481  *   Pointer to Ethernet device.
5482  * @param flow
5483  *   Pointer to mlx5_flow.
5484  *
5485  * @return
5486  *   1 while a reference on it exists, 0 when freed.
5487  */
5488 static int
5489 flow_dv_tag_release(struct rte_eth_dev *dev,
5490                     struct mlx5_flow_dv_tag_resource *tag)
5491 {
5492         assert(tag);
5493         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
5494                 dev->data->port_id, (void *)tag,
5495                 rte_atomic32_read(&tag->refcnt));
5496         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
5497                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
5498                 LIST_REMOVE(tag, next);
5499                 DRV_LOG(DEBUG, "port %u tag %p: removed",
5500                         dev->data->port_id, (void *)tag);
5501                 rte_free(tag);
5502                 return 0;
5503         }
5504         return 1;
5505 }
5506
5507 /**
5508  * Translate port ID action to vport.
5509  *
5510  * @param[in] dev
5511  *   Pointer to rte_eth_dev structure.
5512  * @param[in] action
5513  *   Pointer to the port ID action.
5514  * @param[out] dst_port_id
5515  *   The target port ID.
5516  * @param[out] error
5517  *   Pointer to the error structure.
5518  *
5519  * @return
5520  *   0 on success, a negative errno value otherwise and rte_errno is set.
5521  */
5522 static int
5523 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
5524                                  const struct rte_flow_action *action,
5525                                  uint32_t *dst_port_id,
5526                                  struct rte_flow_error *error)
5527 {
5528         uint32_t port;
5529         struct mlx5_priv *priv;
5530         const struct rte_flow_action_port_id *conf =
5531                         (const struct rte_flow_action_port_id *)action->conf;
5532
5533         port = conf->original ? dev->data->port_id : conf->id;
5534         priv = mlx5_port_to_eswitch_info(port);
5535         if (!priv)
5536                 return rte_flow_error_set(error, -rte_errno,
5537                                           RTE_FLOW_ERROR_TYPE_ACTION,
5538                                           NULL,
5539                                           "No eswitch info was found for port");
5540         if (priv->vport_meta_mask)
5541                 *dst_port_id = priv->vport_meta_tag;
5542         else
5543                 *dst_port_id = priv->vport_id;
5544         return 0;
5545 }
5546
5547 /**
5548  * Add Tx queue matcher
5549  *
5550  * @param[in] dev
5551  *   Pointer to the dev struct.
5552  * @param[in, out] matcher
5553  *   Flow matcher.
5554  * @param[in, out] key
5555  *   Flow matcher value.
5556  * @param[in] item
5557  *   Flow pattern to translate.
5558  * @param[in] inner
5559  *   Item is inner pattern.
5560  */
5561 static void
5562 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
5563                                 void *matcher, void *key,
5564                                 const struct rte_flow_item *item)
5565 {
5566         const struct mlx5_rte_flow_item_tx_queue *queue_m;
5567         const struct mlx5_rte_flow_item_tx_queue *queue_v;
5568         void *misc_m =
5569                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5570         void *misc_v =
5571                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5572         struct mlx5_txq_ctrl *txq;
5573         uint32_t queue;
5574
5575
5576         queue_m = (const void *)item->mask;
5577         if (!queue_m)
5578                 return;
5579         queue_v = (const void *)item->spec;
5580         if (!queue_v)
5581                 return;
5582         txq = mlx5_txq_get(dev, queue_v->queue);
5583         if (!txq)
5584                 return;
5585         queue = txq->obj->sq->id;
5586         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
5587         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
5588                  queue & queue_m->queue);
5589         mlx5_txq_release(dev, queue_v->queue);
5590 }
5591
5592 /**
5593  * Fill the flow with DV spec.
5594  *
5595  * @param[in] dev
5596  *   Pointer to rte_eth_dev structure.
5597  * @param[in, out] dev_flow
5598  *   Pointer to the sub flow.
5599  * @param[in] attr
5600  *   Pointer to the flow attributes.
5601  * @param[in] items
5602  *   Pointer to the list of items.
5603  * @param[in] actions
5604  *   Pointer to the list of actions.
5605  * @param[out] error
5606  *   Pointer to the error structure.
5607  *
5608  * @return
5609  *   0 on success, a negative errno value otherwise and rte_errno is set.
5610  */
5611 static int
5612 flow_dv_translate(struct rte_eth_dev *dev,
5613                   struct mlx5_flow *dev_flow,
5614                   const struct rte_flow_attr *attr,
5615                   const struct rte_flow_item items[],
5616                   const struct rte_flow_action actions[],
5617                   struct rte_flow_error *error)
5618 {
5619         struct mlx5_priv *priv = dev->data->dev_private;
5620         struct rte_flow *flow = dev_flow->flow;
5621         uint64_t item_flags = 0;
5622         uint64_t last_item = 0;
5623         uint64_t action_flags = 0;
5624         uint64_t priority = attr->priority;
5625         struct mlx5_flow_dv_matcher matcher = {
5626                 .mask = {
5627                         .size = sizeof(matcher.mask.buf),
5628                 },
5629         };
5630         int actions_n = 0;
5631         bool actions_end = false;
5632         struct mlx5_flow_dv_modify_hdr_resource res = {
5633                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
5634                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
5635         };
5636         union flow_dv_attr flow_attr = { .attr = 0 };
5637         struct mlx5_flow_dv_tag_resource tag_resource;
5638         uint32_t modify_action_position = UINT32_MAX;
5639         void *match_mask = matcher.mask.buf;
5640         void *match_value = dev_flow->dv.value.buf;
5641         uint8_t next_protocol = 0xff;
5642         struct rte_vlan_hdr vlan = { 0 };
5643         uint32_t table;
5644         int ret = 0;
5645
5646         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
5647                                        &table, error);
5648         if (ret)
5649                 return ret;
5650         flow->group = table;
5651         if (attr->transfer)
5652                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
5653         if (priority == MLX5_FLOW_PRIO_RSVD)
5654                 priority = priv->config.flow_prio - 1;
5655         for (; !actions_end ; actions++) {
5656                 const struct rte_flow_action_queue *queue;
5657                 const struct rte_flow_action_rss *rss;
5658                 const struct rte_flow_action *action = actions;
5659                 const struct rte_flow_action_count *count = action->conf;
5660                 const uint8_t *rss_key;
5661                 const struct rte_flow_action_jump *jump_data;
5662                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
5663                 struct mlx5_flow_tbl_resource *tbl;
5664                 uint32_t port_id = 0;
5665                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
5666                 int action_type = actions->type;
5667                 const struct rte_flow_action *found_action = NULL;
5668
5669                 switch (action_type) {
5670                 case RTE_FLOW_ACTION_TYPE_VOID:
5671                         break;
5672                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5673                         if (flow_dv_translate_action_port_id(dev, action,
5674                                                              &port_id, error))
5675                                 return -rte_errno;
5676                         port_id_resource.port_id = port_id;
5677                         if (flow_dv_port_id_action_resource_register
5678                             (dev, &port_id_resource, dev_flow, error))
5679                                 return -rte_errno;
5680                         dev_flow->dv.actions[actions_n++] =
5681                                 dev_flow->dv.port_id_action->action;
5682                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5683                         break;
5684                 case RTE_FLOW_ACTION_TYPE_FLAG:
5685                         tag_resource.tag =
5686                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
5687                         if (!flow->tag_resource)
5688                                 if (flow_dv_tag_resource_register
5689                                     (dev, &tag_resource, dev_flow, error))
5690                                         return errno;
5691                         dev_flow->dv.actions[actions_n++] =
5692                                 flow->tag_resource->action;
5693                         action_flags |= MLX5_FLOW_ACTION_FLAG;
5694                         break;
5695                 case RTE_FLOW_ACTION_TYPE_MARK:
5696                         tag_resource.tag = mlx5_flow_mark_set
5697                               (((const struct rte_flow_action_mark *)
5698                                (actions->conf))->id);
5699                         if (!flow->tag_resource)
5700                                 if (flow_dv_tag_resource_register
5701                                     (dev, &tag_resource, dev_flow, error))
5702                                         return errno;
5703                         dev_flow->dv.actions[actions_n++] =
5704                                 flow->tag_resource->action;
5705                         action_flags |= MLX5_FLOW_ACTION_MARK;
5706                         break;
5707                 case RTE_FLOW_ACTION_TYPE_DROP:
5708                         action_flags |= MLX5_FLOW_ACTION_DROP;
5709                         break;
5710                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5711                         queue = actions->conf;
5712                         flow->rss.queue_num = 1;
5713                         (*flow->queue)[0] = queue->index;
5714                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5715                         break;
5716                 case RTE_FLOW_ACTION_TYPE_RSS:
5717                         rss = actions->conf;
5718                         if (flow->queue)
5719                                 memcpy((*flow->queue), rss->queue,
5720                                        rss->queue_num * sizeof(uint16_t));
5721                         flow->rss.queue_num = rss->queue_num;
5722                         /* NULL RSS key indicates default RSS key. */
5723                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
5724                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
5725                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
5726                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
5727                         flow->rss.level = rss->level;
5728                         action_flags |= MLX5_FLOW_ACTION_RSS;
5729                         break;
5730                 case RTE_FLOW_ACTION_TYPE_COUNT:
5731                         if (!priv->config.devx) {
5732                                 rte_errno = ENOTSUP;
5733                                 goto cnt_err;
5734                         }
5735                         flow->counter = flow_dv_counter_alloc(dev,
5736                                                               count->shared,
5737                                                               count->id,
5738                                                               flow->group);
5739                         if (flow->counter == NULL)
5740                                 goto cnt_err;
5741                         dev_flow->dv.actions[actions_n++] =
5742                                 flow->counter->action;
5743                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5744                         break;
5745 cnt_err:
5746                         if (rte_errno == ENOTSUP)
5747                                 return rte_flow_error_set
5748                                               (error, ENOTSUP,
5749                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5750                                                NULL,
5751                                                "count action not supported");
5752                         else
5753                                 return rte_flow_error_set
5754                                                 (error, rte_errno,
5755                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5756                                                  action,
5757                                                  "cannot create counter"
5758                                                   " object.");
5759                         break;
5760                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5761                         dev_flow->dv.actions[actions_n++] =
5762                                                 priv->sh->pop_vlan_action;
5763                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5764                         break;
5765                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5766                         flow_dev_get_vlan_info_from_items(items, &vlan);
5767                         vlan.eth_proto = rte_be_to_cpu_16
5768                              ((((const struct rte_flow_action_of_push_vlan *)
5769                                                    actions->conf)->ethertype));
5770                         found_action = mlx5_flow_find_action
5771                                         (actions + 1,
5772                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
5773                         if (found_action)
5774                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5775                         found_action = mlx5_flow_find_action
5776                                         (actions + 1,
5777                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
5778                         if (found_action)
5779                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5780                         if (flow_dv_create_action_push_vlan
5781                                             (dev, attr, &vlan, dev_flow, error))
5782                                 return -rte_errno;
5783                         dev_flow->dv.actions[actions_n++] =
5784                                            dev_flow->dv.push_vlan_res->action;
5785                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5786                         break;
5787                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5788                         /* of_vlan_push action handled this action */
5789                         assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
5790                         break;
5791                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5792                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
5793                                 break;
5794                         flow_dev_get_vlan_info_from_items(items, &vlan);
5795                         mlx5_update_vlan_vid_pcp(actions, &vlan);
5796                         /* If no VLAN push - this is a modify header action */
5797                         if (flow_dv_convert_action_modify_vlan_vid
5798                                                         (&res, actions, error))
5799                                 return -rte_errno;
5800                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5801                         break;
5802                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5803                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5804                         if (flow_dv_create_action_l2_encap(dev, actions,
5805                                                            dev_flow,
5806                                                            attr->transfer,
5807                                                            error))
5808                                 return -rte_errno;
5809                         dev_flow->dv.actions[actions_n++] =
5810                                 dev_flow->dv.encap_decap->verbs_action;
5811                         action_flags |= actions->type ==
5812                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
5813                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
5814                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
5815                         break;
5816                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5817                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5818                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
5819                                                            attr->transfer,
5820                                                            error))
5821                                 return -rte_errno;
5822                         dev_flow->dv.actions[actions_n++] =
5823                                 dev_flow->dv.encap_decap->verbs_action;
5824                         action_flags |= actions->type ==
5825                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
5826                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
5827                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
5828                         break;
5829                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5830                         /* Handle encap with preceding decap. */
5831                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
5832                                 if (flow_dv_create_action_raw_encap
5833                                         (dev, actions, dev_flow, attr, error))
5834                                         return -rte_errno;
5835                                 dev_flow->dv.actions[actions_n++] =
5836                                         dev_flow->dv.encap_decap->verbs_action;
5837                         } else {
5838                                 /* Handle encap without preceding decap. */
5839                                 if (flow_dv_create_action_l2_encap
5840                                     (dev, actions, dev_flow, attr->transfer,
5841                                      error))
5842                                         return -rte_errno;
5843                                 dev_flow->dv.actions[actions_n++] =
5844                                         dev_flow->dv.encap_decap->verbs_action;
5845                         }
5846                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
5847                         break;
5848                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5849                         /* Check if this decap is followed by encap. */
5850                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
5851                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
5852                                action++) {
5853                         }
5854                         /* Handle decap only if it isn't followed by encap. */
5855                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5856                                 if (flow_dv_create_action_l2_decap
5857                                     (dev, dev_flow, attr->transfer, error))
5858                                         return -rte_errno;
5859                                 dev_flow->dv.actions[actions_n++] =
5860                                         dev_flow->dv.encap_decap->verbs_action;
5861                         }
5862                         /* If decap is followed by encap, handle it at encap. */
5863                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
5864                         break;
5865                 case RTE_FLOW_ACTION_TYPE_JUMP:
5866                         jump_data = action->conf;
5867                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
5868                                                        jump_data->group, &table,
5869                                                        error);
5870                         if (ret)
5871                                 return ret;
5872                         tbl = flow_dv_tbl_resource_get(dev, table,
5873                                                        attr->egress,
5874                                                        attr->transfer, error);
5875                         if (!tbl)
5876                                 return rte_flow_error_set
5877                                                 (error, errno,
5878                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5879                                                  NULL,
5880                                                  "cannot create jump action.");
5881                         jump_tbl_resource.tbl = tbl;
5882                         if (flow_dv_jump_tbl_resource_register
5883                             (dev, &jump_tbl_resource, dev_flow, error)) {
5884                                 flow_dv_tbl_resource_release(tbl);
5885                                 return rte_flow_error_set
5886                                                 (error, errno,
5887                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5888                                                  NULL,
5889                                                  "cannot create jump action.");
5890                         }
5891                         dev_flow->dv.actions[actions_n++] =
5892                                 dev_flow->dv.jump->action;
5893                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5894                         break;
5895                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5896                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5897                         if (flow_dv_convert_action_modify_mac(&res, actions,
5898                                                               error))
5899                                 return -rte_errno;
5900                         action_flags |= actions->type ==
5901                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5902                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
5903                                         MLX5_FLOW_ACTION_SET_MAC_DST;
5904                         break;
5905                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5906                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5907                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
5908                                                                error))
5909                                 return -rte_errno;
5910                         action_flags |= actions->type ==
5911                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5912                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
5913                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
5914                         break;
5915                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5916                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5917                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
5918                                                                error))
5919                                 return -rte_errno;
5920                         action_flags |= actions->type ==
5921                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5922                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
5923                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
5924                         break;
5925                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5926                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5927                         if (flow_dv_convert_action_modify_tp(&res, actions,
5928                                                              items, &flow_attr,
5929                                                              error))
5930                                 return -rte_errno;
5931                         action_flags |= actions->type ==
5932                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5933                                         MLX5_FLOW_ACTION_SET_TP_SRC :
5934                                         MLX5_FLOW_ACTION_SET_TP_DST;
5935                         break;
5936                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5937                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
5938                                                                   &flow_attr,
5939                                                                   error))
5940                                 return -rte_errno;
5941                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
5942                         break;
5943                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5944                         if (flow_dv_convert_action_modify_ttl(&res, actions,
5945                                                              items, &flow_attr,
5946                                                              error))
5947                                 return -rte_errno;
5948                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
5949                         break;
5950                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5951                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5952                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
5953                                                                   error))
5954                                 return -rte_errno;
5955                         action_flags |= actions->type ==
5956                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5957                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
5958                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5959                         break;
5960
5961                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5962                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5963                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
5964                                                                   error))
5965                                 return -rte_errno;
5966                         action_flags |= actions->type ==
5967                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5968                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
5969                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
5970                         break;
5971                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5972                         if (flow_dv_convert_action_set_reg(&res, actions,
5973                                                            error))
5974                                 return -rte_errno;
5975                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5976                         break;
5977                 case RTE_FLOW_ACTION_TYPE_END:
5978                         actions_end = true;
5979                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
5980                                 /* create modify action if needed. */
5981                                 if (flow_dv_modify_hdr_resource_register
5982                                                                 (dev, &res,
5983                                                                  dev_flow,
5984                                                                  error))
5985                                         return -rte_errno;
5986                                 dev_flow->dv.actions[modify_action_position] =
5987                                         dev_flow->dv.modify_hdr->verbs_action;
5988                         }
5989                         break;
5990                 default:
5991                         break;
5992                 }
5993                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
5994                     modify_action_position == UINT32_MAX)
5995                         modify_action_position = actions_n++;
5996         }
5997         dev_flow->dv.actions_n = actions_n;
5998         dev_flow->actions = action_flags;
5999         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6000                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6001                 int item_type = items->type;
6002
6003                 switch (item_type) {
6004                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6005                         flow_dv_translate_item_port_id(dev, match_mask,
6006                                                        match_value, items);
6007                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6008                         break;
6009                 case RTE_FLOW_ITEM_TYPE_ETH:
6010                         flow_dv_translate_item_eth(match_mask, match_value,
6011                                                    items, tunnel);
6012                         matcher.priority = MLX5_PRIORITY_MAP_L2;
6013                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6014                                              MLX5_FLOW_LAYER_OUTER_L2;
6015                         break;
6016                 case RTE_FLOW_ITEM_TYPE_VLAN:
6017                         flow_dv_translate_item_vlan(dev_flow,
6018                                                     match_mask, match_value,
6019                                                     items, tunnel);
6020                         matcher.priority = MLX5_PRIORITY_MAP_L2;
6021                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
6022                                               MLX5_FLOW_LAYER_INNER_VLAN) :
6023                                              (MLX5_FLOW_LAYER_OUTER_L2 |
6024                                               MLX5_FLOW_LAYER_OUTER_VLAN);
6025                         break;
6026                 case RTE_FLOW_ITEM_TYPE_IPV4:
6027                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6028                                                   &item_flags, &tunnel);
6029                         flow_dv_translate_item_ipv4(match_mask, match_value,
6030                                                     items, tunnel, flow->group);
6031                         matcher.priority = MLX5_PRIORITY_MAP_L3;
6032                         dev_flow->dv.hash_fields |=
6033                                 mlx5_flow_hashfields_adjust
6034                                         (dev_flow, tunnel,
6035                                          MLX5_IPV4_LAYER_TYPES,
6036                                          MLX5_IPV4_IBV_RX_HASH);
6037                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6038                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6039                         if (items->mask != NULL &&
6040                             ((const struct rte_flow_item_ipv4 *)
6041                              items->mask)->hdr.next_proto_id) {
6042                                 next_protocol =
6043                                         ((const struct rte_flow_item_ipv4 *)
6044                                          (items->spec))->hdr.next_proto_id;
6045                                 next_protocol &=
6046                                         ((const struct rte_flow_item_ipv4 *)
6047                                          (items->mask))->hdr.next_proto_id;
6048                         } else {
6049                                 /* Reset for inner layer. */
6050                                 next_protocol = 0xff;
6051                         }
6052                         break;
6053                 case RTE_FLOW_ITEM_TYPE_IPV6:
6054                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6055                                                   &item_flags, &tunnel);
6056                         flow_dv_translate_item_ipv6(match_mask, match_value,
6057                                                     items, tunnel, flow->group);
6058                         matcher.priority = MLX5_PRIORITY_MAP_L3;
6059                         dev_flow->dv.hash_fields |=
6060                                 mlx5_flow_hashfields_adjust
6061                                         (dev_flow, tunnel,
6062                                          MLX5_IPV6_LAYER_TYPES,
6063                                          MLX5_IPV6_IBV_RX_HASH);
6064                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6065                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6066                         if (items->mask != NULL &&
6067                             ((const struct rte_flow_item_ipv6 *)
6068                              items->mask)->hdr.proto) {
6069                                 next_protocol =
6070                                         ((const struct rte_flow_item_ipv6 *)
6071                                          items->spec)->hdr.proto;
6072                                 next_protocol &=
6073                                         ((const struct rte_flow_item_ipv6 *)
6074                                          items->mask)->hdr.proto;
6075                         } else {
6076                                 /* Reset for inner layer. */
6077                                 next_protocol = 0xff;
6078                         }
6079                         break;
6080                 case RTE_FLOW_ITEM_TYPE_TCP:
6081                         flow_dv_translate_item_tcp(match_mask, match_value,
6082                                                    items, tunnel);
6083                         matcher.priority = MLX5_PRIORITY_MAP_L4;
6084                         dev_flow->dv.hash_fields |=
6085                                 mlx5_flow_hashfields_adjust
6086                                         (dev_flow, tunnel, ETH_RSS_TCP,
6087                                          IBV_RX_HASH_SRC_PORT_TCP |
6088                                          IBV_RX_HASH_DST_PORT_TCP);
6089                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6090                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6091                         break;
6092                 case RTE_FLOW_ITEM_TYPE_UDP:
6093                         flow_dv_translate_item_udp(match_mask, match_value,
6094                                                    items, tunnel);
6095                         matcher.priority = MLX5_PRIORITY_MAP_L4;
6096                         dev_flow->dv.hash_fields |=
6097                                 mlx5_flow_hashfields_adjust
6098                                         (dev_flow, tunnel, ETH_RSS_UDP,
6099                                          IBV_RX_HASH_SRC_PORT_UDP |
6100                                          IBV_RX_HASH_DST_PORT_UDP);
6101                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6102                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6103                         break;
6104                 case RTE_FLOW_ITEM_TYPE_GRE:
6105                         flow_dv_translate_item_gre(match_mask, match_value,
6106                                                    items, tunnel);
6107                         last_item = MLX5_FLOW_LAYER_GRE;
6108                         break;
6109                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6110                         flow_dv_translate_item_gre_key(match_mask,
6111                                                        match_value, items);
6112                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6113                         break;
6114                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6115                         flow_dv_translate_item_nvgre(match_mask, match_value,
6116                                                      items, tunnel);
6117                         last_item = MLX5_FLOW_LAYER_GRE;
6118                         break;
6119                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6120                         flow_dv_translate_item_vxlan(match_mask, match_value,
6121                                                      items, tunnel);
6122                         last_item = MLX5_FLOW_LAYER_VXLAN;
6123                         break;
6124                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6125                         flow_dv_translate_item_vxlan(match_mask, match_value,
6126                                                      items, tunnel);
6127                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6128                         break;
6129                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6130                         flow_dv_translate_item_geneve(match_mask, match_value,
6131                                                       items, tunnel);
6132                         last_item = MLX5_FLOW_LAYER_GENEVE;
6133                         break;
6134                 case RTE_FLOW_ITEM_TYPE_MPLS:
6135                         flow_dv_translate_item_mpls(match_mask, match_value,
6136                                                     items, last_item, tunnel);
6137                         last_item = MLX5_FLOW_LAYER_MPLS;
6138                         break;
6139                 case RTE_FLOW_ITEM_TYPE_META:
6140                         flow_dv_translate_item_meta(match_mask, match_value,
6141                                                     items);
6142                         last_item = MLX5_FLOW_ITEM_METADATA;
6143                         break;
6144                 case RTE_FLOW_ITEM_TYPE_ICMP:
6145                         flow_dv_translate_item_icmp(match_mask, match_value,
6146                                                     items, tunnel);
6147                         last_item = MLX5_FLOW_LAYER_ICMP;
6148                         break;
6149                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6150                         flow_dv_translate_item_icmp6(match_mask, match_value,
6151                                                       items, tunnel);
6152                         last_item = MLX5_FLOW_LAYER_ICMP6;
6153                         break;
6154                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6155                         flow_dv_translate_item_tag(match_mask, match_value,
6156                                                    items);
6157                         last_item = MLX5_FLOW_ITEM_TAG;
6158                         break;
6159                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6160                         flow_dv_translate_item_tx_queue(dev, match_mask,
6161                                                         match_value,
6162                                                         items);
6163                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
6164                         break;
6165                 default:
6166                         break;
6167                 }
6168                 item_flags |= last_item;
6169         }
6170         /*
6171          * In case of ingress traffic when E-Switch mode is enabled,
6172          * we have two cases where we need to set the source port manually.
6173          * The first one, is in case of Nic steering rule, and the second is
6174          * E-Switch rule where no port_id item was found. In both cases
6175          * the source port is set according the current port in use.
6176          */
6177         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
6178             (priv->representor || priv->master)) {
6179                 if (flow_dv_translate_item_port_id(dev, match_mask,
6180                                                    match_value, NULL))
6181                         return -rte_errno;
6182         }
6183         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
6184                                          dev_flow->dv.value.buf));
6185         dev_flow->layers = item_flags;
6186         /* Register matcher. */
6187         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
6188                                     matcher.mask.size);
6189         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
6190                                                      matcher.priority);
6191         matcher.egress = attr->egress;
6192         matcher.group = flow->group;
6193         matcher.transfer = attr->transfer;
6194         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
6195                 return -rte_errno;
6196         return 0;
6197 }
6198
6199 /**
6200  * Apply the flow to the NIC.
6201  *
6202  * @param[in] dev
6203  *   Pointer to the Ethernet device structure.
6204  * @param[in, out] flow
6205  *   Pointer to flow structure.
6206  * @param[out] error
6207  *   Pointer to error structure.
6208  *
6209  * @return
6210  *   0 on success, a negative errno value otherwise and rte_errno is set.
6211  */
6212 static int
6213 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
6214               struct rte_flow_error *error)
6215 {
6216         struct mlx5_flow_dv *dv;
6217         struct mlx5_flow *dev_flow;
6218         struct mlx5_priv *priv = dev->data->dev_private;
6219         int n;
6220         int err;
6221
6222         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6223                 dv = &dev_flow->dv;
6224                 n = dv->actions_n;
6225                 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
6226                         if (flow->transfer) {
6227                                 dv->actions[n++] = priv->sh->esw_drop_action;
6228                         } else {
6229                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
6230                                 if (!dv->hrxq) {
6231                                         rte_flow_error_set
6232                                                 (error, errno,
6233                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6234                                                  NULL,
6235                                                  "cannot get drop hash queue");
6236                                         goto error;
6237                                 }
6238                                 dv->actions[n++] = dv->hrxq->action;
6239                         }
6240                 } else if (dev_flow->actions &
6241                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
6242                         struct mlx5_hrxq *hrxq;
6243
6244                         hrxq = mlx5_hrxq_get(dev, flow->key,
6245                                              MLX5_RSS_HASH_KEY_LEN,
6246                                              dv->hash_fields,
6247                                              (*flow->queue),
6248                                              flow->rss.queue_num);
6249                         if (!hrxq) {
6250                                 hrxq = mlx5_hrxq_new
6251                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
6252                                          dv->hash_fields, (*flow->queue),
6253                                          flow->rss.queue_num,
6254                                          !!(dev_flow->layers &
6255                                             MLX5_FLOW_LAYER_TUNNEL));
6256                         }
6257                         if (!hrxq) {
6258                                 rte_flow_error_set
6259                                         (error, rte_errno,
6260                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6261                                          "cannot get hash queue");
6262                                 goto error;
6263                         }
6264                         dv->hrxq = hrxq;
6265                         dv->actions[n++] = dv->hrxq->action;
6266                 }
6267                 dv->flow =
6268                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
6269                                                   (void *)&dv->value, n,
6270                                                   dv->actions);
6271                 if (!dv->flow) {
6272                         rte_flow_error_set(error, errno,
6273                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6274                                            NULL,
6275                                            "hardware refuses to create flow");
6276                         goto error;
6277                 }
6278                 if (priv->vmwa_context &&
6279                     dev_flow->dv.vf_vlan.tag &&
6280                     !dev_flow->dv.vf_vlan.created) {
6281                         /*
6282                          * The rule contains the VLAN pattern.
6283                          * For VF we are going to create VLAN
6284                          * interface to make hypervisor set correct
6285                          * e-Switch vport context.
6286                          */
6287                         mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
6288                 }
6289         }
6290         return 0;
6291 error:
6292         err = rte_errno; /* Save rte_errno before cleanup. */
6293         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6294                 struct mlx5_flow_dv *dv = &dev_flow->dv;
6295                 if (dv->hrxq) {
6296                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6297                                 mlx5_hrxq_drop_release(dev);
6298                         else
6299                                 mlx5_hrxq_release(dev, dv->hrxq);
6300                         dv->hrxq = NULL;
6301                 }
6302                 if (dev_flow->dv.vf_vlan.tag &&
6303                     dev_flow->dv.vf_vlan.created)
6304                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6305         }
6306         rte_errno = err; /* Restore rte_errno. */
6307         return -rte_errno;
6308 }
6309
6310 /**
6311  * Release the flow matcher.
6312  *
6313  * @param dev
6314  *   Pointer to Ethernet device.
6315  * @param flow
6316  *   Pointer to mlx5_flow.
6317  *
6318  * @return
6319  *   1 while a reference on it exists, 0 when freed.
6320  */
6321 static int
6322 flow_dv_matcher_release(struct rte_eth_dev *dev,
6323                         struct mlx5_flow *flow)
6324 {
6325         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
6326         struct mlx5_priv *priv = dev->data->dev_private;
6327         struct mlx5_ibv_shared *sh = priv->sh;
6328         struct mlx5_flow_tbl_resource *tbl;
6329
6330         assert(matcher->matcher_object);
6331         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
6332                 dev->data->port_id, (void *)matcher,
6333                 rte_atomic32_read(&matcher->refcnt));
6334         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
6335                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
6336                            (matcher->matcher_object));
6337                 LIST_REMOVE(matcher, next);
6338                 if (matcher->egress)
6339                         tbl = &sh->tx_tbl[matcher->group];
6340                 else
6341                         tbl = &sh->rx_tbl[matcher->group];
6342                 flow_dv_tbl_resource_release(tbl);
6343                 rte_free(matcher);
6344                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
6345                         dev->data->port_id, (void *)matcher);
6346                 return 0;
6347         }
6348         return 1;
6349 }
6350
6351 /**
6352  * Release an encap/decap resource.
6353  *
6354  * @param flow
6355  *   Pointer to mlx5_flow.
6356  *
6357  * @return
6358  *   1 while a reference on it exists, 0 when freed.
6359  */
6360 static int
6361 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
6362 {
6363         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
6364                                                 flow->dv.encap_decap;
6365
6366         assert(cache_resource->verbs_action);
6367         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
6368                 (void *)cache_resource,
6369                 rte_atomic32_read(&cache_resource->refcnt));
6370         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6371                 claim_zero(mlx5_glue->destroy_flow_action
6372                                 (cache_resource->verbs_action));
6373                 LIST_REMOVE(cache_resource, next);
6374                 rte_free(cache_resource);
6375                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
6376                         (void *)cache_resource);
6377                 return 0;
6378         }
6379         return 1;
6380 }
6381
6382 /**
6383  * Release an jump to table action resource.
6384  *
6385  * @param flow
6386  *   Pointer to mlx5_flow.
6387  *
6388  * @return
6389  *   1 while a reference on it exists, 0 when freed.
6390  */
6391 static int
6392 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
6393 {
6394         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
6395                                                 flow->dv.jump;
6396
6397         assert(cache_resource->action);
6398         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
6399                 (void *)cache_resource,
6400                 rte_atomic32_read(&cache_resource->refcnt));
6401         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6402                 claim_zero(mlx5_glue->destroy_flow_action
6403                                 (cache_resource->action));
6404                 LIST_REMOVE(cache_resource, next);
6405                 flow_dv_tbl_resource_release(cache_resource->tbl);
6406                 rte_free(cache_resource);
6407                 DRV_LOG(DEBUG, "jump table resource %p: removed",
6408                         (void *)cache_resource);
6409                 return 0;
6410         }
6411         return 1;
6412 }
6413
6414 /**
6415  * Release a modify-header resource.
6416  *
6417  * @param flow
6418  *   Pointer to mlx5_flow.
6419  *
6420  * @return
6421  *   1 while a reference on it exists, 0 when freed.
6422  */
6423 static int
6424 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
6425 {
6426         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
6427                                                 flow->dv.modify_hdr;
6428
6429         assert(cache_resource->verbs_action);
6430         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
6431                 (void *)cache_resource,
6432                 rte_atomic32_read(&cache_resource->refcnt));
6433         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6434                 claim_zero(mlx5_glue->destroy_flow_action
6435                                 (cache_resource->verbs_action));
6436                 LIST_REMOVE(cache_resource, next);
6437                 rte_free(cache_resource);
6438                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
6439                         (void *)cache_resource);
6440                 return 0;
6441         }
6442         return 1;
6443 }
6444
6445 /**
6446  * Release port ID action resource.
6447  *
6448  * @param flow
6449  *   Pointer to mlx5_flow.
6450  *
6451  * @return
6452  *   1 while a reference on it exists, 0 when freed.
6453  */
6454 static int
6455 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
6456 {
6457         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
6458                 flow->dv.port_id_action;
6459
6460         assert(cache_resource->action);
6461         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
6462                 (void *)cache_resource,
6463                 rte_atomic32_read(&cache_resource->refcnt));
6464         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6465                 claim_zero(mlx5_glue->destroy_flow_action
6466                                 (cache_resource->action));
6467                 LIST_REMOVE(cache_resource, next);
6468                 rte_free(cache_resource);
6469                 DRV_LOG(DEBUG, "port id action resource %p: removed",
6470                         (void *)cache_resource);
6471                 return 0;
6472         }
6473         return 1;
6474 }
6475
6476 /**
6477  * Release push vlan action resource.
6478  *
6479  * @param flow
6480  *   Pointer to mlx5_flow.
6481  *
6482  * @return
6483  *   1 while a reference on it exists, 0 when freed.
6484  */
6485 static int
6486 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
6487 {
6488         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
6489                 flow->dv.push_vlan_res;
6490
6491         assert(cache_resource->action);
6492         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
6493                 (void *)cache_resource,
6494                 rte_atomic32_read(&cache_resource->refcnt));
6495         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6496                 claim_zero(mlx5_glue->destroy_flow_action
6497                                 (cache_resource->action));
6498                 LIST_REMOVE(cache_resource, next);
6499                 rte_free(cache_resource);
6500                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
6501                         (void *)cache_resource);
6502                 return 0;
6503         }
6504         return 1;
6505 }
6506
6507 /**
6508  * Remove the flow from the NIC but keeps it in memory.
6509  *
6510  * @param[in] dev
6511  *   Pointer to Ethernet device.
6512  * @param[in, out] flow
6513  *   Pointer to flow structure.
6514  */
6515 static void
6516 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6517 {
6518         struct mlx5_flow_dv *dv;
6519         struct mlx5_flow *dev_flow;
6520
6521         if (!flow)
6522                 return;
6523         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6524                 dv = &dev_flow->dv;
6525                 if (dv->flow) {
6526                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
6527                         dv->flow = NULL;
6528                 }
6529                 if (dv->hrxq) {
6530                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6531                                 mlx5_hrxq_drop_release(dev);
6532                         else
6533                                 mlx5_hrxq_release(dev, dv->hrxq);
6534                         dv->hrxq = NULL;
6535                 }
6536                 if (dev_flow->dv.vf_vlan.tag &&
6537                     dev_flow->dv.vf_vlan.created)
6538                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6539         }
6540 }
6541
6542 /**
6543  * Remove the flow from the NIC and the memory.
6544  *
6545  * @param[in] dev
6546  *   Pointer to the Ethernet device structure.
6547  * @param[in, out] flow
6548  *   Pointer to flow structure.
6549  */
6550 static void
6551 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6552 {
6553         struct mlx5_flow *dev_flow;
6554
6555         if (!flow)
6556                 return;
6557         flow_dv_remove(dev, flow);
6558         if (flow->counter) {
6559                 flow_dv_counter_release(dev, flow->counter);
6560                 flow->counter = NULL;
6561         }
6562         if (flow->tag_resource) {
6563                 flow_dv_tag_release(dev, flow->tag_resource);
6564                 flow->tag_resource = NULL;
6565         }
6566         while (!LIST_EMPTY(&flow->dev_flows)) {
6567                 dev_flow = LIST_FIRST(&flow->dev_flows);
6568                 LIST_REMOVE(dev_flow, next);
6569                 if (dev_flow->dv.matcher)
6570                         flow_dv_matcher_release(dev, dev_flow);
6571                 if (dev_flow->dv.encap_decap)
6572                         flow_dv_encap_decap_resource_release(dev_flow);
6573                 if (dev_flow->dv.modify_hdr)
6574                         flow_dv_modify_hdr_resource_release(dev_flow);
6575                 if (dev_flow->dv.jump)
6576                         flow_dv_jump_tbl_resource_release(dev_flow);
6577                 if (dev_flow->dv.port_id_action)
6578                         flow_dv_port_id_action_resource_release(dev_flow);
6579                 if (dev_flow->dv.push_vlan_res)
6580                         flow_dv_push_vlan_action_resource_release(dev_flow);
6581                 rte_free(dev_flow);
6582         }
6583 }
6584
6585 /**
6586  * Query a dv flow  rule for its statistics via devx.
6587  *
6588  * @param[in] dev
6589  *   Pointer to Ethernet device.
6590  * @param[in] flow
6591  *   Pointer to the sub flow.
6592  * @param[out] data
6593  *   data retrieved by the query.
6594  * @param[out] error
6595  *   Perform verbose error reporting if not NULL.
6596  *
6597  * @return
6598  *   0 on success, a negative errno value otherwise and rte_errno is set.
6599  */
6600 static int
6601 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
6602                     void *data, struct rte_flow_error *error)
6603 {
6604         struct mlx5_priv *priv = dev->data->dev_private;
6605         struct rte_flow_query_count *qc = data;
6606
6607         if (!priv->config.devx)
6608                 return rte_flow_error_set(error, ENOTSUP,
6609                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6610                                           NULL,
6611                                           "counters are not supported");
6612         if (flow->counter) {
6613                 uint64_t pkts, bytes;
6614                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
6615                                                &bytes);
6616
6617                 if (err)
6618                         return rte_flow_error_set(error, -err,
6619                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6620                                         NULL, "cannot read counters");
6621                 qc->hits_set = 1;
6622                 qc->bytes_set = 1;
6623                 qc->hits = pkts - flow->counter->hits;
6624                 qc->bytes = bytes - flow->counter->bytes;
6625                 if (qc->reset) {
6626                         flow->counter->hits = pkts;
6627                         flow->counter->bytes = bytes;
6628                 }
6629                 return 0;
6630         }
6631         return rte_flow_error_set(error, EINVAL,
6632                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6633                                   NULL,
6634                                   "counters are not available");
6635 }
6636
6637 /**
6638  * Query a flow.
6639  *
6640  * @see rte_flow_query()
6641  * @see rte_flow_ops
6642  */
6643 static int
6644 flow_dv_query(struct rte_eth_dev *dev,
6645               struct rte_flow *flow __rte_unused,
6646               const struct rte_flow_action *actions __rte_unused,
6647               void *data __rte_unused,
6648               struct rte_flow_error *error __rte_unused)
6649 {
6650         int ret = -EINVAL;
6651
6652         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6653                 switch (actions->type) {
6654                 case RTE_FLOW_ACTION_TYPE_VOID:
6655                         break;
6656                 case RTE_FLOW_ACTION_TYPE_COUNT:
6657                         ret = flow_dv_query_count(dev, flow, data, error);
6658                         break;
6659                 default:
6660                         return rte_flow_error_set(error, ENOTSUP,
6661                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6662                                                   actions,
6663                                                   "action not supported");
6664                 }
6665         }
6666         return ret;
6667 }
6668
6669 /*
6670  * Mutex-protected thunk to flow_dv_translate().
6671  */
6672 static int
6673 flow_d_translate(struct rte_eth_dev *dev,
6674                  struct mlx5_flow *dev_flow,
6675                  const struct rte_flow_attr *attr,
6676                  const struct rte_flow_item items[],
6677                  const struct rte_flow_action actions[],
6678                  struct rte_flow_error *error)
6679 {
6680         int ret;
6681
6682         flow_d_shared_lock(dev);
6683         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
6684         flow_d_shared_unlock(dev);
6685         return ret;
6686 }
6687
6688 /*
6689  * Mutex-protected thunk to flow_dv_apply().
6690  */
6691 static int
6692 flow_d_apply(struct rte_eth_dev *dev,
6693              struct rte_flow *flow,
6694              struct rte_flow_error *error)
6695 {
6696         int ret;
6697
6698         flow_d_shared_lock(dev);
6699         ret = flow_dv_apply(dev, flow, error);
6700         flow_d_shared_unlock(dev);
6701         return ret;
6702 }
6703
6704 /*
6705  * Mutex-protected thunk to flow_dv_remove().
6706  */
6707 static void
6708 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6709 {
6710         flow_d_shared_lock(dev);
6711         flow_dv_remove(dev, flow);
6712         flow_d_shared_unlock(dev);
6713 }
6714
6715 /*
6716  * Mutex-protected thunk to flow_dv_destroy().
6717  */
6718 static void
6719 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6720 {
6721         flow_d_shared_lock(dev);
6722         flow_dv_destroy(dev, flow);
6723         flow_d_shared_unlock(dev);
6724 }
6725
6726 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
6727         .validate = flow_dv_validate,
6728         .prepare = flow_dv_prepare,
6729         .translate = flow_d_translate,
6730         .apply = flow_d_apply,
6731         .remove = flow_d_remove,
6732         .destroy = flow_d_destroy,
6733         .query = flow_dv_query,
6734 };
6735
6736 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */