net/mlx5: update meta register matcher set
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29 #include <rte_vxlan.h>
30
31 #include "mlx5.h"
32 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #include "mlx5_prm.h"
36 #include "mlx5_rxtx.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 /**
74  * Initialize flow attributes structure according to flow items' types.
75  *
76  * @param[in] item
77  *   Pointer to item specification.
78  * @param[out] attr
79  *   Pointer to flow attributes structure.
80  */
81 static void
82 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
83 {
84         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
85                 switch (item->type) {
86                 case RTE_FLOW_ITEM_TYPE_IPV4:
87                         attr->ipv4 = 1;
88                         break;
89                 case RTE_FLOW_ITEM_TYPE_IPV6:
90                         attr->ipv6 = 1;
91                         break;
92                 case RTE_FLOW_ITEM_TYPE_UDP:
93                         attr->udp = 1;
94                         break;
95                 case RTE_FLOW_ITEM_TYPE_TCP:
96                         attr->tcp = 1;
97                         break;
98                 default:
99                         break;
100                 }
101         }
102         attr->valid = 1;
103 }
104
105 struct field_modify_info {
106         uint32_t size; /* Size of field in protocol header, in bytes. */
107         uint32_t offset; /* Offset of field in protocol header, in bytes. */
108         enum mlx5_modification_field id;
109 };
110
111 struct field_modify_info modify_eth[] = {
112         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
113         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
114         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
115         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
116         {0, 0, 0},
117 };
118
119 struct field_modify_info modify_vlan_out_first_vid[] = {
120         /* Size in bits !!! */
121         {12, 0, MLX5_MODI_OUT_FIRST_VID},
122         {0, 0, 0},
123 };
124
125 struct field_modify_info modify_ipv4[] = {
126         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
127         {4, 12, MLX5_MODI_OUT_SIPV4},
128         {4, 16, MLX5_MODI_OUT_DIPV4},
129         {0, 0, 0},
130 };
131
132 struct field_modify_info modify_ipv6[] = {
133         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
134         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
135         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
136         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
137         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
138         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
139         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
140         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
141         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
142         {0, 0, 0},
143 };
144
145 struct field_modify_info modify_udp[] = {
146         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
147         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
148         {0, 0, 0},
149 };
150
151 struct field_modify_info modify_tcp[] = {
152         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
153         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
154         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
155         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
156         {0, 0, 0},
157 };
158
159 static void
160 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
161                           uint8_t next_protocol, uint64_t *item_flags,
162                           int *tunnel)
163 {
164         assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
165                item->type == RTE_FLOW_ITEM_TYPE_IPV6);
166         if (next_protocol == IPPROTO_IPIP) {
167                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
168                 *tunnel = 1;
169         }
170         if (next_protocol == IPPROTO_IPV6) {
171                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
172                 *tunnel = 1;
173         }
174 }
175
176 /**
177  * Acquire the synchronizing object to protect multithreaded access
178  * to shared dv context. Lock occurs only if context is actually
179  * shared, i.e. we have multiport IB device and representors are
180  * created.
181  *
182  * @param[in] dev
183  *   Pointer to the rte_eth_dev structure.
184  */
185 static void
186 flow_d_shared_lock(struct rte_eth_dev *dev)
187 {
188         struct mlx5_priv *priv = dev->data->dev_private;
189         struct mlx5_ibv_shared *sh = priv->sh;
190
191         if (sh->dv_refcnt > 1) {
192                 int ret;
193
194                 ret = pthread_mutex_lock(&sh->dv_mutex);
195                 assert(!ret);
196                 (void)ret;
197         }
198 }
199
200 static void
201 flow_d_shared_unlock(struct rte_eth_dev *dev)
202 {
203         struct mlx5_priv *priv = dev->data->dev_private;
204         struct mlx5_ibv_shared *sh = priv->sh;
205
206         if (sh->dv_refcnt > 1) {
207                 int ret;
208
209                 ret = pthread_mutex_unlock(&sh->dv_mutex);
210                 assert(!ret);
211                 (void)ret;
212         }
213 }
214
215 /* Update VLAN's VID/PCP based on input rte_flow_action.
216  *
217  * @param[in] action
218  *   Pointer to struct rte_flow_action.
219  * @param[out] vlan
220  *   Pointer to struct rte_vlan_hdr.
221  */
222 static void
223 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
224                          struct rte_vlan_hdr *vlan)
225 {
226         uint16_t vlan_tci;
227         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
228                 vlan_tci =
229                     ((const struct rte_flow_action_of_set_vlan_pcp *)
230                                                action->conf)->vlan_pcp;
231                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
232                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
233                 vlan->vlan_tci |= vlan_tci;
234         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
235                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
236                 vlan->vlan_tci |= rte_be_to_cpu_16
237                     (((const struct rte_flow_action_of_set_vlan_vid *)
238                                              action->conf)->vlan_vid);
239         }
240 }
241
242 /**
243  * Fetch 1, 2, 3 or 4 byte field from the byte array
244  * and return as unsigned integer in host-endian format.
245  *
246  * @param[in] data
247  *   Pointer to data array.
248  * @param[in] size
249  *   Size of field to extract.
250  *
251  * @return
252  *   converted field in host endian format.
253  */
254 static inline uint32_t
255 flow_dv_fetch_field(const uint8_t *data, uint32_t size)
256 {
257         uint32_t ret;
258
259         switch (size) {
260         case 1:
261                 ret = *data;
262                 break;
263         case 2:
264                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
265                 break;
266         case 3:
267                 ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
268                 ret = (ret << 8) | *(data + sizeof(uint16_t));
269                 break;
270         case 4:
271                 ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
272                 break;
273         default:
274                 assert(false);
275                 ret = 0;
276                 break;
277         }
278         return ret;
279 }
280
281 /**
282  * Convert modify-header action to DV specification.
283  *
284  * Data length of each action is determined by provided field description
285  * and the item mask. Data bit offset and width of each action is determined
286  * by provided item mask.
287  *
288  * @param[in] item
289  *   Pointer to item specification.
290  * @param[in] field
291  *   Pointer to field modification information.
292  *     For MLX5_MODIFICATION_TYPE_SET specifies destination field.
293  *     For MLX5_MODIFICATION_TYPE_ADD specifies destination field.
294  *     For MLX5_MODIFICATION_TYPE_COPY specifies source field.
295  * @param[in] dcopy
296  *   Destination field info for MLX5_MODIFICATION_TYPE_COPY in @type.
297  *   Negative offset value sets the same offset as source offset.
298  *   size field is ignored, value is taken from source field.
299  * @param[in,out] resource
300  *   Pointer to the modify-header resource.
301  * @param[in] type
302  *   Type of modification.
303  * @param[out] error
304  *   Pointer to the error structure.
305  *
306  * @return
307  *   0 on success, a negative errno value otherwise and rte_errno is set.
308  */
309 static int
310 flow_dv_convert_modify_action(struct rte_flow_item *item,
311                               struct field_modify_info *field,
312                               struct field_modify_info *dcopy,
313                               struct mlx5_flow_dv_modify_hdr_resource *resource,
314                               uint32_t type, struct rte_flow_error *error)
315 {
316         uint32_t i = resource->actions_num;
317         struct mlx5_modification_cmd *actions = resource->actions;
318
319         /*
320          * The item and mask are provided in big-endian format.
321          * The fields should be presented as in big-endian format either.
322          * Mask must be always present, it defines the actual field width.
323          */
324         assert(item->mask);
325         assert(field->size);
326         do {
327                 unsigned int size_b;
328                 unsigned int off_b;
329                 uint32_t mask;
330                 uint32_t data;
331
332                 if (i >= MLX5_MODIFY_NUM)
333                         return rte_flow_error_set(error, EINVAL,
334                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
335                                  "too many items to modify");
336                 /* Fetch variable byte size mask from the array. */
337                 mask = flow_dv_fetch_field((const uint8_t *)item->mask +
338                                            field->offset, field->size);
339                 if (!mask) {
340                         ++field;
341                         continue;
342                 }
343                 /* Deduce actual data width in bits from mask value. */
344                 off_b = rte_bsf32(mask);
345                 size_b = sizeof(uint32_t) * CHAR_BIT -
346                          off_b - __builtin_clz(mask);
347                 assert(size_b);
348                 size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
349                 actions[i].action_type = type;
350                 actions[i].field = field->id;
351                 actions[i].offset = off_b;
352                 actions[i].length = size_b;
353                 /* Convert entire record to expected big-endian format. */
354                 actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
355                 if (type == MLX5_MODIFICATION_TYPE_COPY) {
356                         assert(dcopy);
357                         actions[i].dst_field = dcopy->id;
358                         actions[i].dst_offset =
359                                 (int)dcopy->offset < 0 ? off_b : dcopy->offset;
360                         /* Convert entire record to big-endian format. */
361                         actions[i].data1 = rte_cpu_to_be_32(actions[i].data1);
362                 } else {
363                         assert(item->spec);
364                         data = flow_dv_fetch_field((const uint8_t *)item->spec +
365                                                    field->offset, field->size);
366                         /* Shift out the trailing masked bits from data. */
367                         data = (data & mask) >> off_b;
368                         actions[i].data1 = rte_cpu_to_be_32(data);
369                 }
370                 ++i;
371                 ++field;
372         } while (field->size);
373         resource->actions_num = i;
374         if (!resource->actions_num)
375                 return rte_flow_error_set(error, EINVAL,
376                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
377                                           "invalid modification flow item");
378         return 0;
379 }
380
381 /**
382  * Convert modify-header set IPv4 address action to DV specification.
383  *
384  * @param[in,out] resource
385  *   Pointer to the modify-header resource.
386  * @param[in] action
387  *   Pointer to action specification.
388  * @param[out] error
389  *   Pointer to the error structure.
390  *
391  * @return
392  *   0 on success, a negative errno value otherwise and rte_errno is set.
393  */
394 static int
395 flow_dv_convert_action_modify_ipv4
396                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
397                          const struct rte_flow_action *action,
398                          struct rte_flow_error *error)
399 {
400         const struct rte_flow_action_set_ipv4 *conf =
401                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
402         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
403         struct rte_flow_item_ipv4 ipv4;
404         struct rte_flow_item_ipv4 ipv4_mask;
405
406         memset(&ipv4, 0, sizeof(ipv4));
407         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
408         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
409                 ipv4.hdr.src_addr = conf->ipv4_addr;
410                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
411         } else {
412                 ipv4.hdr.dst_addr = conf->ipv4_addr;
413                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
414         }
415         item.spec = &ipv4;
416         item.mask = &ipv4_mask;
417         return flow_dv_convert_modify_action(&item, modify_ipv4, NULL, resource,
418                                              MLX5_MODIFICATION_TYPE_SET, error);
419 }
420
421 /**
422  * Convert modify-header set IPv6 address action to DV specification.
423  *
424  * @param[in,out] resource
425  *   Pointer to the modify-header resource.
426  * @param[in] action
427  *   Pointer to action specification.
428  * @param[out] error
429  *   Pointer to the error structure.
430  *
431  * @return
432  *   0 on success, a negative errno value otherwise and rte_errno is set.
433  */
434 static int
435 flow_dv_convert_action_modify_ipv6
436                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
437                          const struct rte_flow_action *action,
438                          struct rte_flow_error *error)
439 {
440         const struct rte_flow_action_set_ipv6 *conf =
441                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
442         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
443         struct rte_flow_item_ipv6 ipv6;
444         struct rte_flow_item_ipv6 ipv6_mask;
445
446         memset(&ipv6, 0, sizeof(ipv6));
447         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
448         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
449                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
450                        sizeof(ipv6.hdr.src_addr));
451                 memcpy(&ipv6_mask.hdr.src_addr,
452                        &rte_flow_item_ipv6_mask.hdr.src_addr,
453                        sizeof(ipv6.hdr.src_addr));
454         } else {
455                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
456                        sizeof(ipv6.hdr.dst_addr));
457                 memcpy(&ipv6_mask.hdr.dst_addr,
458                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
459                        sizeof(ipv6.hdr.dst_addr));
460         }
461         item.spec = &ipv6;
462         item.mask = &ipv6_mask;
463         return flow_dv_convert_modify_action(&item, modify_ipv6, NULL, resource,
464                                              MLX5_MODIFICATION_TYPE_SET, error);
465 }
466
467 /**
468  * Convert modify-header set MAC address action to DV specification.
469  *
470  * @param[in,out] resource
471  *   Pointer to the modify-header resource.
472  * @param[in] action
473  *   Pointer to action specification.
474  * @param[out] error
475  *   Pointer to the error structure.
476  *
477  * @return
478  *   0 on success, a negative errno value otherwise and rte_errno is set.
479  */
480 static int
481 flow_dv_convert_action_modify_mac
482                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
483                          const struct rte_flow_action *action,
484                          struct rte_flow_error *error)
485 {
486         const struct rte_flow_action_set_mac *conf =
487                 (const struct rte_flow_action_set_mac *)(action->conf);
488         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
489         struct rte_flow_item_eth eth;
490         struct rte_flow_item_eth eth_mask;
491
492         memset(&eth, 0, sizeof(eth));
493         memset(&eth_mask, 0, sizeof(eth_mask));
494         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
495                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
496                        sizeof(eth.src.addr_bytes));
497                 memcpy(&eth_mask.src.addr_bytes,
498                        &rte_flow_item_eth_mask.src.addr_bytes,
499                        sizeof(eth_mask.src.addr_bytes));
500         } else {
501                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
502                        sizeof(eth.dst.addr_bytes));
503                 memcpy(&eth_mask.dst.addr_bytes,
504                        &rte_flow_item_eth_mask.dst.addr_bytes,
505                        sizeof(eth_mask.dst.addr_bytes));
506         }
507         item.spec = &eth;
508         item.mask = &eth_mask;
509         return flow_dv_convert_modify_action(&item, modify_eth, NULL, resource,
510                                              MLX5_MODIFICATION_TYPE_SET, error);
511 }
512
513 /**
514  * Convert modify-header set VLAN VID action to DV specification.
515  *
516  * @param[in,out] resource
517  *   Pointer to the modify-header resource.
518  * @param[in] action
519  *   Pointer to action specification.
520  * @param[out] error
521  *   Pointer to the error structure.
522  *
523  * @return
524  *   0 on success, a negative errno value otherwise and rte_errno is set.
525  */
526 static int
527 flow_dv_convert_action_modify_vlan_vid
528                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
529                          const struct rte_flow_action *action,
530                          struct rte_flow_error *error)
531 {
532         const struct rte_flow_action_of_set_vlan_vid *conf =
533                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
534         int i = resource->actions_num;
535         struct mlx5_modification_cmd *actions = &resource->actions[i];
536         struct field_modify_info *field = modify_vlan_out_first_vid;
537
538         if (i >= MLX5_MODIFY_NUM)
539                 return rte_flow_error_set(error, EINVAL,
540                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
541                          "too many items to modify");
542         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
543         actions[i].field = field->id;
544         actions[i].length = field->size;
545         actions[i].offset = field->offset;
546         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
547         actions[i].data1 = conf->vlan_vid;
548         actions[i].data1 = actions[i].data1 << 16;
549         resource->actions_num = ++i;
550         return 0;
551 }
552
553 /**
554  * Convert modify-header set TP action to DV specification.
555  *
556  * @param[in,out] resource
557  *   Pointer to the modify-header resource.
558  * @param[in] action
559  *   Pointer to action specification.
560  * @param[in] items
561  *   Pointer to rte_flow_item objects list.
562  * @param[in] attr
563  *   Pointer to flow attributes structure.
564  * @param[out] error
565  *   Pointer to the error structure.
566  *
567  * @return
568  *   0 on success, a negative errno value otherwise and rte_errno is set.
569  */
570 static int
571 flow_dv_convert_action_modify_tp
572                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
573                          const struct rte_flow_action *action,
574                          const struct rte_flow_item *items,
575                          union flow_dv_attr *attr,
576                          struct rte_flow_error *error)
577 {
578         const struct rte_flow_action_set_tp *conf =
579                 (const struct rte_flow_action_set_tp *)(action->conf);
580         struct rte_flow_item item;
581         struct rte_flow_item_udp udp;
582         struct rte_flow_item_udp udp_mask;
583         struct rte_flow_item_tcp tcp;
584         struct rte_flow_item_tcp tcp_mask;
585         struct field_modify_info *field;
586
587         if (!attr->valid)
588                 flow_dv_attr_init(items, attr);
589         if (attr->udp) {
590                 memset(&udp, 0, sizeof(udp));
591                 memset(&udp_mask, 0, sizeof(udp_mask));
592                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
593                         udp.hdr.src_port = conf->port;
594                         udp_mask.hdr.src_port =
595                                         rte_flow_item_udp_mask.hdr.src_port;
596                 } else {
597                         udp.hdr.dst_port = conf->port;
598                         udp_mask.hdr.dst_port =
599                                         rte_flow_item_udp_mask.hdr.dst_port;
600                 }
601                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
602                 item.spec = &udp;
603                 item.mask = &udp_mask;
604                 field = modify_udp;
605         }
606         if (attr->tcp) {
607                 memset(&tcp, 0, sizeof(tcp));
608                 memset(&tcp_mask, 0, sizeof(tcp_mask));
609                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
610                         tcp.hdr.src_port = conf->port;
611                         tcp_mask.hdr.src_port =
612                                         rte_flow_item_tcp_mask.hdr.src_port;
613                 } else {
614                         tcp.hdr.dst_port = conf->port;
615                         tcp_mask.hdr.dst_port =
616                                         rte_flow_item_tcp_mask.hdr.dst_port;
617                 }
618                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
619                 item.spec = &tcp;
620                 item.mask = &tcp_mask;
621                 field = modify_tcp;
622         }
623         return flow_dv_convert_modify_action(&item, field, NULL, resource,
624                                              MLX5_MODIFICATION_TYPE_SET, error);
625 }
626
627 /**
628  * Convert modify-header set TTL action to DV specification.
629  *
630  * @param[in,out] resource
631  *   Pointer to the modify-header resource.
632  * @param[in] action
633  *   Pointer to action specification.
634  * @param[in] items
635  *   Pointer to rte_flow_item objects list.
636  * @param[in] attr
637  *   Pointer to flow attributes structure.
638  * @param[out] error
639  *   Pointer to the error structure.
640  *
641  * @return
642  *   0 on success, a negative errno value otherwise and rte_errno is set.
643  */
644 static int
645 flow_dv_convert_action_modify_ttl
646                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
647                          const struct rte_flow_action *action,
648                          const struct rte_flow_item *items,
649                          union flow_dv_attr *attr,
650                          struct rte_flow_error *error)
651 {
652         const struct rte_flow_action_set_ttl *conf =
653                 (const struct rte_flow_action_set_ttl *)(action->conf);
654         struct rte_flow_item item;
655         struct rte_flow_item_ipv4 ipv4;
656         struct rte_flow_item_ipv4 ipv4_mask;
657         struct rte_flow_item_ipv6 ipv6;
658         struct rte_flow_item_ipv6 ipv6_mask;
659         struct field_modify_info *field;
660
661         if (!attr->valid)
662                 flow_dv_attr_init(items, attr);
663         if (attr->ipv4) {
664                 memset(&ipv4, 0, sizeof(ipv4));
665                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
666                 ipv4.hdr.time_to_live = conf->ttl_value;
667                 ipv4_mask.hdr.time_to_live = 0xFF;
668                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
669                 item.spec = &ipv4;
670                 item.mask = &ipv4_mask;
671                 field = modify_ipv4;
672         }
673         if (attr->ipv6) {
674                 memset(&ipv6, 0, sizeof(ipv6));
675                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
676                 ipv6.hdr.hop_limits = conf->ttl_value;
677                 ipv6_mask.hdr.hop_limits = 0xFF;
678                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
679                 item.spec = &ipv6;
680                 item.mask = &ipv6_mask;
681                 field = modify_ipv6;
682         }
683         return flow_dv_convert_modify_action(&item, field, NULL, resource,
684                                              MLX5_MODIFICATION_TYPE_SET, error);
685 }
686
687 /**
688  * Convert modify-header decrement TTL action to DV specification.
689  *
690  * @param[in,out] resource
691  *   Pointer to the modify-header resource.
692  * @param[in] action
693  *   Pointer to action specification.
694  * @param[in] items
695  *   Pointer to rte_flow_item objects list.
696  * @param[in] attr
697  *   Pointer to flow attributes structure.
698  * @param[out] error
699  *   Pointer to the error structure.
700  *
701  * @return
702  *   0 on success, a negative errno value otherwise and rte_errno is set.
703  */
704 static int
705 flow_dv_convert_action_modify_dec_ttl
706                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
707                          const struct rte_flow_item *items,
708                          union flow_dv_attr *attr,
709                          struct rte_flow_error *error)
710 {
711         struct rte_flow_item item;
712         struct rte_flow_item_ipv4 ipv4;
713         struct rte_flow_item_ipv4 ipv4_mask;
714         struct rte_flow_item_ipv6 ipv6;
715         struct rte_flow_item_ipv6 ipv6_mask;
716         struct field_modify_info *field;
717
718         if (!attr->valid)
719                 flow_dv_attr_init(items, attr);
720         if (attr->ipv4) {
721                 memset(&ipv4, 0, sizeof(ipv4));
722                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
723                 ipv4.hdr.time_to_live = 0xFF;
724                 ipv4_mask.hdr.time_to_live = 0xFF;
725                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
726                 item.spec = &ipv4;
727                 item.mask = &ipv4_mask;
728                 field = modify_ipv4;
729         }
730         if (attr->ipv6) {
731                 memset(&ipv6, 0, sizeof(ipv6));
732                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
733                 ipv6.hdr.hop_limits = 0xFF;
734                 ipv6_mask.hdr.hop_limits = 0xFF;
735                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
736                 item.spec = &ipv6;
737                 item.mask = &ipv6_mask;
738                 field = modify_ipv6;
739         }
740         return flow_dv_convert_modify_action(&item, field, NULL, resource,
741                                              MLX5_MODIFICATION_TYPE_ADD, error);
742 }
743
744 /**
745  * Convert modify-header increment/decrement TCP Sequence number
746  * to DV specification.
747  *
748  * @param[in,out] resource
749  *   Pointer to the modify-header resource.
750  * @param[in] action
751  *   Pointer to action specification.
752  * @param[out] error
753  *   Pointer to the error structure.
754  *
755  * @return
756  *   0 on success, a negative errno value otherwise and rte_errno is set.
757  */
758 static int
759 flow_dv_convert_action_modify_tcp_seq
760                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
761                          const struct rte_flow_action *action,
762                          struct rte_flow_error *error)
763 {
764         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
765         uint64_t value = rte_be_to_cpu_32(*conf);
766         struct rte_flow_item item;
767         struct rte_flow_item_tcp tcp;
768         struct rte_flow_item_tcp tcp_mask;
769
770         memset(&tcp, 0, sizeof(tcp));
771         memset(&tcp_mask, 0, sizeof(tcp_mask));
772         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
773                 /*
774                  * The HW has no decrement operation, only increment operation.
775                  * To simulate decrement X from Y using increment operation
776                  * we need to add UINT32_MAX X times to Y.
777                  * Each adding of UINT32_MAX decrements Y by 1.
778                  */
779                 value *= UINT32_MAX;
780         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
781         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
782         item.type = RTE_FLOW_ITEM_TYPE_TCP;
783         item.spec = &tcp;
784         item.mask = &tcp_mask;
785         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
786                                              MLX5_MODIFICATION_TYPE_ADD, error);
787 }
788
789 /**
790  * Convert modify-header increment/decrement TCP Acknowledgment number
791  * to DV specification.
792  *
793  * @param[in,out] resource
794  *   Pointer to the modify-header resource.
795  * @param[in] action
796  *   Pointer to action specification.
797  * @param[out] error
798  *   Pointer to the error structure.
799  *
800  * @return
801  *   0 on success, a negative errno value otherwise and rte_errno is set.
802  */
803 static int
804 flow_dv_convert_action_modify_tcp_ack
805                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
806                          const struct rte_flow_action *action,
807                          struct rte_flow_error *error)
808 {
809         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
810         uint64_t value = rte_be_to_cpu_32(*conf);
811         struct rte_flow_item item;
812         struct rte_flow_item_tcp tcp;
813         struct rte_flow_item_tcp tcp_mask;
814
815         memset(&tcp, 0, sizeof(tcp));
816         memset(&tcp_mask, 0, sizeof(tcp_mask));
817         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
818                 /*
819                  * The HW has no decrement operation, only increment operation.
820                  * To simulate decrement X from Y using increment operation
821                  * we need to add UINT32_MAX X times to Y.
822                  * Each adding of UINT32_MAX decrements Y by 1.
823                  */
824                 value *= UINT32_MAX;
825         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
826         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
827         item.type = RTE_FLOW_ITEM_TYPE_TCP;
828         item.spec = &tcp;
829         item.mask = &tcp_mask;
830         return flow_dv_convert_modify_action(&item, modify_tcp, NULL, resource,
831                                              MLX5_MODIFICATION_TYPE_ADD, error);
832 }
833
834 static enum mlx5_modification_field reg_to_field[] = {
835         [REG_A] = MLX5_MODI_META_DATA_REG_A,
836         [REG_B] = MLX5_MODI_META_DATA_REG_B,
837         [REG_C_0] = MLX5_MODI_META_REG_C_0,
838         [REG_C_1] = MLX5_MODI_META_REG_C_1,
839         [REG_C_2] = MLX5_MODI_META_REG_C_2,
840         [REG_C_3] = MLX5_MODI_META_REG_C_3,
841         [REG_C_4] = MLX5_MODI_META_REG_C_4,
842         [REG_C_5] = MLX5_MODI_META_REG_C_5,
843         [REG_C_6] = MLX5_MODI_META_REG_C_6,
844         [REG_C_7] = MLX5_MODI_META_REG_C_7,
845 };
846
847 /**
848  * Convert register set to DV specification.
849  *
850  * @param[in,out] resource
851  *   Pointer to the modify-header resource.
852  * @param[in] action
853  *   Pointer to action specification.
854  * @param[out] error
855  *   Pointer to the error structure.
856  *
857  * @return
858  *   0 on success, a negative errno value otherwise and rte_errno is set.
859  */
860 static int
861 flow_dv_convert_action_set_reg
862                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
863                          const struct rte_flow_action *action,
864                          struct rte_flow_error *error)
865 {
866         const struct mlx5_rte_flow_action_set_tag *conf = action->conf;
867         struct mlx5_modification_cmd *actions = resource->actions;
868         uint32_t i = resource->actions_num;
869
870         if (i >= MLX5_MODIFY_NUM)
871                 return rte_flow_error_set(error, EINVAL,
872                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
873                                           "too many items to modify");
874         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
875         actions[i].field = reg_to_field[conf->id];
876         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
877         actions[i].data1 = conf->data;
878         ++i;
879         resource->actions_num = i;
880         if (!resource->actions_num)
881                 return rte_flow_error_set(error, EINVAL,
882                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
883                                           "invalid modification flow item");
884         return 0;
885 }
886
887 /**
888  * Convert internal COPY_REG action to DV specification.
889  *
890  * @param[in] dev
891  *   Pointer to the rte_eth_dev structure.
892  * @param[in,out] res
893  *   Pointer to the modify-header resource.
894  * @param[in] action
895  *   Pointer to action specification.
896  * @param[out] error
897  *   Pointer to the error structure.
898  *
899  * @return
900  *   0 on success, a negative errno value otherwise and rte_errno is set.
901  */
902 static int
903 flow_dv_convert_action_copy_mreg(struct rte_eth_dev *dev  __rte_unused,
904                                  struct mlx5_flow_dv_modify_hdr_resource *res,
905                                  const struct rte_flow_action *action,
906                                  struct rte_flow_error *error)
907 {
908         const struct mlx5_flow_action_copy_mreg *conf = action->conf;
909         uint32_t mask = RTE_BE32(UINT32_MAX);
910         struct rte_flow_item item = {
911                 .spec = NULL,
912                 .mask = &mask,
913         };
914         struct field_modify_info reg_src[] = {
915                 {4, 0, reg_to_field[conf->src]},
916                 {0, 0, 0},
917         };
918         struct field_modify_info reg_dst = {
919                 .offset = (uint32_t)-1, /* Same as src. */
920                 .id = reg_to_field[conf->dst],
921         };
922         return flow_dv_convert_modify_action(&item,
923                                              reg_src, &reg_dst, res,
924                                              MLX5_MODIFICATION_TYPE_COPY,
925                                              error);
926 }
927
928 /**
929  * Validate META item.
930  *
931  * @param[in] dev
932  *   Pointer to the rte_eth_dev structure.
933  * @param[in] item
934  *   Item specification.
935  * @param[in] attr
936  *   Attributes of flow that includes this item.
937  * @param[out] error
938  *   Pointer to error structure.
939  *
940  * @return
941  *   0 on success, a negative errno value otherwise and rte_errno is set.
942  */
943 static int
944 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
945                            const struct rte_flow_item *item,
946                            const struct rte_flow_attr *attr,
947                            struct rte_flow_error *error)
948 {
949         const struct rte_flow_item_meta *spec = item->spec;
950         const struct rte_flow_item_meta *mask = item->mask;
951         const struct rte_flow_item_meta nic_mask = {
952                 .data = UINT32_MAX
953         };
954         int ret;
955
956         if (!spec)
957                 return rte_flow_error_set(error, EINVAL,
958                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
959                                           item->spec,
960                                           "data cannot be empty");
961         if (!spec->data)
962                 return rte_flow_error_set(error, EINVAL,
963                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
964                                           NULL,
965                                           "data cannot be zero");
966         if (!mask)
967                 mask = &rte_flow_item_meta_mask;
968         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
969                                         (const uint8_t *)&nic_mask,
970                                         sizeof(struct rte_flow_item_meta),
971                                         error);
972         if (ret < 0)
973                 return ret;
974         if (attr->ingress)
975                 return rte_flow_error_set(error, ENOTSUP,
976                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
977                                           NULL,
978                                           "pattern not supported for ingress");
979         return 0;
980 }
981
982 /**
983  * Validate vport item.
984  *
985  * @param[in] dev
986  *   Pointer to the rte_eth_dev structure.
987  * @param[in] item
988  *   Item specification.
989  * @param[in] attr
990  *   Attributes of flow that includes this item.
991  * @param[in] item_flags
992  *   Bit-fields that holds the items detected until now.
993  * @param[out] error
994  *   Pointer to error structure.
995  *
996  * @return
997  *   0 on success, a negative errno value otherwise and rte_errno is set.
998  */
999 static int
1000 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
1001                               const struct rte_flow_item *item,
1002                               const struct rte_flow_attr *attr,
1003                               uint64_t item_flags,
1004                               struct rte_flow_error *error)
1005 {
1006         const struct rte_flow_item_port_id *spec = item->spec;
1007         const struct rte_flow_item_port_id *mask = item->mask;
1008         const struct rte_flow_item_port_id switch_mask = {
1009                         .id = 0xffffffff,
1010         };
1011         struct mlx5_priv *esw_priv;
1012         struct mlx5_priv *dev_priv;
1013         int ret;
1014
1015         if (!attr->transfer)
1016                 return rte_flow_error_set(error, EINVAL,
1017                                           RTE_FLOW_ERROR_TYPE_ITEM,
1018                                           NULL,
1019                                           "match on port id is valid only"
1020                                           " when transfer flag is enabled");
1021         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
1022                 return rte_flow_error_set(error, ENOTSUP,
1023                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1024                                           "multiple source ports are not"
1025                                           " supported");
1026         if (!mask)
1027                 mask = &switch_mask;
1028         if (mask->id != 0xffffffff)
1029                 return rte_flow_error_set(error, ENOTSUP,
1030                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1031                                            mask,
1032                                            "no support for partial mask on"
1033                                            " \"id\" field");
1034         ret = mlx5_flow_item_acceptable
1035                                 (item, (const uint8_t *)mask,
1036                                  (const uint8_t *)&rte_flow_item_port_id_mask,
1037                                  sizeof(struct rte_flow_item_port_id),
1038                                  error);
1039         if (ret)
1040                 return ret;
1041         if (!spec)
1042                 return 0;
1043         esw_priv = mlx5_port_to_eswitch_info(spec->id);
1044         if (!esw_priv)
1045                 return rte_flow_error_set(error, rte_errno,
1046                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1047                                           "failed to obtain E-Switch info for"
1048                                           " port");
1049         dev_priv = mlx5_dev_to_eswitch_info(dev);
1050         if (!dev_priv)
1051                 return rte_flow_error_set(error, rte_errno,
1052                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1053                                           NULL,
1054                                           "failed to obtain E-Switch info");
1055         if (esw_priv->domain_id != dev_priv->domain_id)
1056                 return rte_flow_error_set(error, EINVAL,
1057                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
1058                                           "cannot match on a port from a"
1059                                           " different E-Switch");
1060         return 0;
1061 }
1062
1063 /**
1064  * Validate the pop VLAN action.
1065  *
1066  * @param[in] dev
1067  *   Pointer to the rte_eth_dev structure.
1068  * @param[in] action_flags
1069  *   Holds the actions detected until now.
1070  * @param[in] action
1071  *   Pointer to the pop vlan action.
1072  * @param[in] item_flags
1073  *   The items found in this flow rule.
1074  * @param[in] attr
1075  *   Pointer to flow attributes.
1076  * @param[out] error
1077  *   Pointer to error structure.
1078  *
1079  * @return
1080  *   0 on success, a negative errno value otherwise and rte_errno is set.
1081  */
1082 static int
1083 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
1084                                  uint64_t action_flags,
1085                                  const struct rte_flow_action *action,
1086                                  uint64_t item_flags,
1087                                  const struct rte_flow_attr *attr,
1088                                  struct rte_flow_error *error)
1089 {
1090         struct mlx5_priv *priv = dev->data->dev_private;
1091
1092         (void)action;
1093         (void)attr;
1094         if (!priv->sh->pop_vlan_action)
1095                 return rte_flow_error_set(error, ENOTSUP,
1096                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1097                                           NULL,
1098                                           "pop vlan action is not supported");
1099         /*
1100          * Check for inconsistencies:
1101          *  fail strip_vlan in a flow that matches packets without VLAN tags.
1102          *  fail strip_vlan in a flow that matches packets without explicitly a
1103          *  matching on VLAN tag ?
1104          */
1105         if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
1106                 return rte_flow_error_set(error, ENOTSUP,
1107                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1108                                           NULL,
1109                                           "no support for multiple vlan pop "
1110                                           "actions");
1111         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1112                 return rte_flow_error_set(error, ENOTSUP,
1113                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1114                                           NULL,
1115                                           "cannot pop vlan without a "
1116                                           "match on (outer) vlan in the flow");
1117         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1118                 return rte_flow_error_set(error, EINVAL,
1119                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1120                                           "wrong action order, port_id should "
1121                                           "be after pop VLAN action");
1122         return 0;
1123 }
1124
1125 /**
1126  * Get VLAN default info from vlan match info.
1127  *
1128  * @param[in] dev
1129  *   Pointer to the rte_eth_dev structure.
1130  * @param[in] item
1131  *   the list of item specifications.
1132  * @param[out] vlan
1133  *   pointer VLAN info to fill to.
1134  * @param[out] error
1135  *   Pointer to error structure.
1136  *
1137  * @return
1138  *   0 on success, a negative errno value otherwise and rte_errno is set.
1139  */
1140 static void
1141 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1142                                   struct rte_vlan_hdr *vlan)
1143 {
1144         const struct rte_flow_item_vlan nic_mask = {
1145                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1146                                 MLX5DV_FLOW_VLAN_VID_MASK),
1147                 .inner_type = RTE_BE16(0xffff),
1148         };
1149
1150         if (items == NULL)
1151                 return;
1152         for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
1153                items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
1154                 ;
1155         if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1156                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1157                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1158
1159                 if (!vlan_m)
1160                         vlan_m = &nic_mask;
1161                 /* Only full match values are accepted */
1162                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1163                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1164                         vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
1165                         vlan->vlan_tci |=
1166                                 rte_be_to_cpu_16(vlan_v->tci &
1167                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1168                 }
1169                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1170                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1171                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1172                         vlan->vlan_tci |=
1173                                 rte_be_to_cpu_16(vlan_v->tci &
1174                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1175                 }
1176                 if (vlan_m->inner_type == nic_mask.inner_type)
1177                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1178                                                            vlan_m->inner_type);
1179         }
1180 }
1181
1182 /**
1183  * Validate the push VLAN action.
1184  *
1185  * @param[in] action_flags
1186  *   Holds the actions detected until now.
1187  * @param[in] action
1188  *   Pointer to the encap action.
1189  * @param[in] attr
1190  *   Pointer to flow attributes
1191  * @param[out] error
1192  *   Pointer to error structure.
1193  *
1194  * @return
1195  *   0 on success, a negative errno value otherwise and rte_errno is set.
1196  */
1197 static int
1198 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1199                                   uint64_t item_flags,
1200                                   const struct rte_flow_action *action,
1201                                   const struct rte_flow_attr *attr,
1202                                   struct rte_flow_error *error)
1203 {
1204         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1205
1206         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1207             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1208                 return rte_flow_error_set(error, EINVAL,
1209                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1210                                           "invalid vlan ethertype");
1211         if (action_flags &
1212                 (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1213                 return rte_flow_error_set(error, ENOTSUP,
1214                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1215                                           "no support for multiple VLAN "
1216                                           "actions");
1217         if (!mlx5_flow_find_action
1218                         (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) &&
1219             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1220                 return rte_flow_error_set(error, ENOTSUP,
1221                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
1222                                 "push VLAN needs to match on VLAN in order to "
1223                                 "get VLAN VID information because there is "
1224                                 "no followed set VLAN VID action");
1225         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1226                 return rte_flow_error_set(error, EINVAL,
1227                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1228                                           "wrong action order, port_id should "
1229                                           "be after push VLAN");
1230         (void)attr;
1231         return 0;
1232 }
1233
1234 /**
1235  * Validate the set VLAN PCP.
1236  *
1237  * @param[in] action_flags
1238  *   Holds the actions detected until now.
1239  * @param[in] actions
1240  *   Pointer to the list of actions remaining in the flow rule.
1241  * @param[in] attr
1242  *   Pointer to flow attributes
1243  * @param[out] error
1244  *   Pointer to error structure.
1245  *
1246  * @return
1247  *   0 on success, a negative errno value otherwise and rte_errno is set.
1248  */
1249 static int
1250 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1251                                      const struct rte_flow_action actions[],
1252                                      struct rte_flow_error *error)
1253 {
1254         const struct rte_flow_action *action = actions;
1255         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1256
1257         if (conf->vlan_pcp > 7)
1258                 return rte_flow_error_set(error, EINVAL,
1259                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1260                                           "VLAN PCP value is too big");
1261         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1262                 return rte_flow_error_set(error, ENOTSUP,
1263                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1264                                           "set VLAN PCP action must follow "
1265                                           "the push VLAN action");
1266         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1267                 return rte_flow_error_set(error, ENOTSUP,
1268                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1269                                           "Multiple VLAN PCP modification are "
1270                                           "not supported");
1271         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1272                 return rte_flow_error_set(error, EINVAL,
1273                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1274                                           "wrong action order, port_id should "
1275                                           "be after set VLAN PCP");
1276         return 0;
1277 }
1278
1279 /**
1280  * Validate the set VLAN VID.
1281  *
1282  * @param[in] item_flags
1283  *   Holds the items detected in this rule.
1284  * @param[in] actions
1285  *   Pointer to the list of actions remaining in the flow rule.
1286  * @param[in] attr
1287  *   Pointer to flow attributes
1288  * @param[out] error
1289  *   Pointer to error structure.
1290  *
1291  * @return
1292  *   0 on success, a negative errno value otherwise and rte_errno is set.
1293  */
1294 static int
1295 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1296                                      uint64_t action_flags,
1297                                      const struct rte_flow_action actions[],
1298                                      struct rte_flow_error *error)
1299 {
1300         const struct rte_flow_action *action = actions;
1301         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1302
1303         if (conf->vlan_vid > RTE_BE16(0xFFE))
1304                 return rte_flow_error_set(error, EINVAL,
1305                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1306                                           "VLAN VID value is too big");
1307         /* there is an of_push_vlan action before us */
1308         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
1309                 if (mlx5_flow_find_action(actions + 1,
1310                                           RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1311                         return rte_flow_error_set(error, ENOTSUP,
1312                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
1313                                         "Multiple VLAN VID modifications are "
1314                                         "not supported");
1315                 else
1316                         return 0;
1317         }
1318
1319         /*
1320          * Action is on an existing VLAN header:
1321          *    Need to verify this is a single modify CID action.
1322          *   Rule mast include a match on outer VLAN.
1323          */
1324         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1325                 return rte_flow_error_set(error, ENOTSUP,
1326                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1327                                           "Multiple VLAN VID modifications are "
1328                                           "not supported");
1329         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1330                 return rte_flow_error_set(error, EINVAL,
1331                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1332                                           "match on VLAN is required in order "
1333                                           "to set VLAN VID");
1334         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1335                 return rte_flow_error_set(error, EINVAL,
1336                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1337                                           "wrong action order, port_id should "
1338                                           "be after set VLAN VID");
1339         return 0;
1340 }
1341
1342 /**
1343  * Validate count action.
1344  *
1345  * @param[in] dev
1346  *   device otr.
1347  * @param[out] error
1348  *   Pointer to error structure.
1349  *
1350  * @return
1351  *   0 on success, a negative errno value otherwise and rte_errno is set.
1352  */
1353 static int
1354 flow_dv_validate_action_count(struct rte_eth_dev *dev,
1355                               struct rte_flow_error *error)
1356 {
1357         struct mlx5_priv *priv = dev->data->dev_private;
1358
1359         if (!priv->config.devx)
1360                 goto notsup_err;
1361 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
1362         return 0;
1363 #endif
1364 notsup_err:
1365         return rte_flow_error_set
1366                       (error, ENOTSUP,
1367                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1368                        NULL,
1369                        "count action not supported");
1370 }
1371
1372 /**
1373  * Validate the L2 encap action.
1374  *
1375  * @param[in] action_flags
1376  *   Holds the actions detected until now.
1377  * @param[in] action
1378  *   Pointer to the encap action.
1379  * @param[in] attr
1380  *   Pointer to flow attributes
1381  * @param[out] error
1382  *   Pointer to error structure.
1383  *
1384  * @return
1385  *   0 on success, a negative errno value otherwise and rte_errno is set.
1386  */
1387 static int
1388 flow_dv_validate_action_l2_encap(uint64_t action_flags,
1389                                  const struct rte_flow_action *action,
1390                                  const struct rte_flow_attr *attr,
1391                                  struct rte_flow_error *error)
1392 {
1393         if (!(action->conf))
1394                 return rte_flow_error_set(error, EINVAL,
1395                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1396                                           "configuration cannot be null");
1397         if (action_flags & MLX5_FLOW_ACTION_DROP)
1398                 return rte_flow_error_set(error, EINVAL,
1399                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1400                                           "can't drop and encap in same flow");
1401         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1402                 return rte_flow_error_set(error, EINVAL,
1403                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1404                                           "can only have a single encap or"
1405                                           " decap action in a flow");
1406         if (!attr->transfer && attr->ingress)
1407                 return rte_flow_error_set(error, ENOTSUP,
1408                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1409                                           NULL,
1410                                           "encap action not supported for "
1411                                           "ingress");
1412         return 0;
1413 }
1414
1415 /**
1416  * Validate the L2 decap action.
1417  *
1418  * @param[in] action_flags
1419  *   Holds the actions detected until now.
1420  * @param[in] attr
1421  *   Pointer to flow attributes
1422  * @param[out] error
1423  *   Pointer to error structure.
1424  *
1425  * @return
1426  *   0 on success, a negative errno value otherwise and rte_errno is set.
1427  */
1428 static int
1429 flow_dv_validate_action_l2_decap(uint64_t action_flags,
1430                                  const struct rte_flow_attr *attr,
1431                                  struct rte_flow_error *error)
1432 {
1433         if (action_flags & MLX5_FLOW_ACTION_DROP)
1434                 return rte_flow_error_set(error, EINVAL,
1435                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1436                                           "can't drop and decap in same flow");
1437         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1438                 return rte_flow_error_set(error, EINVAL,
1439                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1440                                           "can only have a single encap or"
1441                                           " decap action in a flow");
1442         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1443                 return rte_flow_error_set(error, EINVAL,
1444                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1445                                           "can't have decap action after"
1446                                           " modify action");
1447         if (attr->egress)
1448                 return rte_flow_error_set(error, ENOTSUP,
1449                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1450                                           NULL,
1451                                           "decap action not supported for "
1452                                           "egress");
1453         return 0;
1454 }
1455
1456 /**
1457  * Validate the raw encap action.
1458  *
1459  * @param[in] action_flags
1460  *   Holds the actions detected until now.
1461  * @param[in] action
1462  *   Pointer to the encap action.
1463  * @param[in] attr
1464  *   Pointer to flow attributes
1465  * @param[out] error
1466  *   Pointer to error structure.
1467  *
1468  * @return
1469  *   0 on success, a negative errno value otherwise and rte_errno is set.
1470  */
1471 static int
1472 flow_dv_validate_action_raw_encap(uint64_t action_flags,
1473                                   const struct rte_flow_action *action,
1474                                   const struct rte_flow_attr *attr,
1475                                   struct rte_flow_error *error)
1476 {
1477         const struct rte_flow_action_raw_encap *raw_encap =
1478                 (const struct rte_flow_action_raw_encap *)action->conf;
1479         if (!(action->conf))
1480                 return rte_flow_error_set(error, EINVAL,
1481                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1482                                           "configuration cannot be null");
1483         if (action_flags & MLX5_FLOW_ACTION_DROP)
1484                 return rte_flow_error_set(error, EINVAL,
1485                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1486                                           "can't drop and encap in same flow");
1487         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1488                 return rte_flow_error_set(error, EINVAL,
1489                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1490                                           "can only have a single encap"
1491                                           " action in a flow");
1492         /* encap without preceding decap is not supported for ingress */
1493         if (!attr->transfer &&  attr->ingress &&
1494             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
1495                 return rte_flow_error_set(error, ENOTSUP,
1496                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1497                                           NULL,
1498                                           "encap action not supported for "
1499                                           "ingress");
1500         if (!raw_encap->size || !raw_encap->data)
1501                 return rte_flow_error_set(error, EINVAL,
1502                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1503                                           "raw encap data cannot be empty");
1504         return 0;
1505 }
1506
1507 /**
1508  * Validate the raw decap action.
1509  *
1510  * @param[in] action_flags
1511  *   Holds the actions detected until now.
1512  * @param[in] action
1513  *   Pointer to the encap action.
1514  * @param[in] attr
1515  *   Pointer to flow attributes
1516  * @param[out] error
1517  *   Pointer to error structure.
1518  *
1519  * @return
1520  *   0 on success, a negative errno value otherwise and rte_errno is set.
1521  */
1522 static int
1523 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1524                                   const struct rte_flow_action *action,
1525                                   const struct rte_flow_attr *attr,
1526                                   struct rte_flow_error *error)
1527 {
1528         if (action_flags & MLX5_FLOW_ACTION_DROP)
1529                 return rte_flow_error_set(error, EINVAL,
1530                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1531                                           "can't drop and decap in same flow");
1532         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1533                 return rte_flow_error_set(error, EINVAL,
1534                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1535                                           "can't have encap action before"
1536                                           " decap action");
1537         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1538                 return rte_flow_error_set(error, EINVAL,
1539                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1540                                           "can only have a single decap"
1541                                           " action in a flow");
1542         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1543                 return rte_flow_error_set(error, EINVAL,
1544                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1545                                           "can't have decap action after"
1546                                           " modify action");
1547         /* decap action is valid on egress only if it is followed by encap */
1548         if (attr->egress) {
1549                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1550                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1551                        action++) {
1552                 }
1553                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1554                         return rte_flow_error_set
1555                                         (error, ENOTSUP,
1556                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1557                                          NULL, "decap action not supported"
1558                                          " for egress");
1559         }
1560         return 0;
1561 }
1562
1563 /**
1564  * Find existing encap/decap resource or create and register a new one.
1565  *
1566  * @param dev[in, out]
1567  *   Pointer to rte_eth_dev structure.
1568  * @param[in, out] resource
1569  *   Pointer to encap/decap resource.
1570  * @parm[in, out] dev_flow
1571  *   Pointer to the dev_flow.
1572  * @param[out] error
1573  *   pointer to error structure.
1574  *
1575  * @return
1576  *   0 on success otherwise -errno and errno is set.
1577  */
1578 static int
1579 flow_dv_encap_decap_resource_register
1580                         (struct rte_eth_dev *dev,
1581                          struct mlx5_flow_dv_encap_decap_resource *resource,
1582                          struct mlx5_flow *dev_flow,
1583                          struct rte_flow_error *error)
1584 {
1585         struct mlx5_priv *priv = dev->data->dev_private;
1586         struct mlx5_ibv_shared *sh = priv->sh;
1587         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1588         struct mlx5dv_dr_domain *domain;
1589
1590         resource->flags = dev_flow->group ? 0 : 1;
1591         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1592                 domain = sh->fdb_domain;
1593         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1594                 domain = sh->rx_domain;
1595         else
1596                 domain = sh->tx_domain;
1597
1598         /* Lookup a matching resource from cache. */
1599         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1600                 if (resource->reformat_type == cache_resource->reformat_type &&
1601                     resource->ft_type == cache_resource->ft_type &&
1602                     resource->flags == cache_resource->flags &&
1603                     resource->size == cache_resource->size &&
1604                     !memcmp((const void *)resource->buf,
1605                             (const void *)cache_resource->buf,
1606                             resource->size)) {
1607                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1608                                 (void *)cache_resource,
1609                                 rte_atomic32_read(&cache_resource->refcnt));
1610                         rte_atomic32_inc(&cache_resource->refcnt);
1611                         dev_flow->dv.encap_decap = cache_resource;
1612                         return 0;
1613                 }
1614         }
1615         /* Register new encap/decap resource. */
1616         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1617         if (!cache_resource)
1618                 return rte_flow_error_set(error, ENOMEM,
1619                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1620                                           "cannot allocate resource memory");
1621         *cache_resource = *resource;
1622         cache_resource->verbs_action =
1623                 mlx5_glue->dv_create_flow_action_packet_reformat
1624                         (sh->ctx, cache_resource->reformat_type,
1625                          cache_resource->ft_type, domain, cache_resource->flags,
1626                          cache_resource->size,
1627                          (cache_resource->size ? cache_resource->buf : NULL));
1628         if (!cache_resource->verbs_action) {
1629                 rte_free(cache_resource);
1630                 return rte_flow_error_set(error, ENOMEM,
1631                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1632                                           NULL, "cannot create action");
1633         }
1634         rte_atomic32_init(&cache_resource->refcnt);
1635         rte_atomic32_inc(&cache_resource->refcnt);
1636         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1637         dev_flow->dv.encap_decap = cache_resource;
1638         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1639                 (void *)cache_resource,
1640                 rte_atomic32_read(&cache_resource->refcnt));
1641         return 0;
1642 }
1643
1644 /**
1645  * Find existing table jump resource or create and register a new one.
1646  *
1647  * @param dev[in, out]
1648  *   Pointer to rte_eth_dev structure.
1649  * @param[in, out] resource
1650  *   Pointer to jump table resource.
1651  * @parm[in, out] dev_flow
1652  *   Pointer to the dev_flow.
1653  * @param[out] error
1654  *   pointer to error structure.
1655  *
1656  * @return
1657  *   0 on success otherwise -errno and errno is set.
1658  */
1659 static int
1660 flow_dv_jump_tbl_resource_register
1661                         (struct rte_eth_dev *dev,
1662                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1663                          struct mlx5_flow *dev_flow,
1664                          struct rte_flow_error *error)
1665 {
1666         struct mlx5_priv *priv = dev->data->dev_private;
1667         struct mlx5_ibv_shared *sh = priv->sh;
1668         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1669
1670         /* Lookup a matching resource from cache. */
1671         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1672                 if (resource->tbl == cache_resource->tbl) {
1673                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1674                                 (void *)cache_resource,
1675                                 rte_atomic32_read(&cache_resource->refcnt));
1676                         rte_atomic32_inc(&cache_resource->refcnt);
1677                         dev_flow->dv.jump = cache_resource;
1678                         return 0;
1679                 }
1680         }
1681         /* Register new jump table resource. */
1682         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1683         if (!cache_resource)
1684                 return rte_flow_error_set(error, ENOMEM,
1685                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1686                                           "cannot allocate resource memory");
1687         *cache_resource = *resource;
1688         cache_resource->action =
1689                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1690                 (resource->tbl->obj);
1691         if (!cache_resource->action) {
1692                 rte_free(cache_resource);
1693                 return rte_flow_error_set(error, ENOMEM,
1694                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1695                                           NULL, "cannot create action");
1696         }
1697         rte_atomic32_init(&cache_resource->refcnt);
1698         rte_atomic32_inc(&cache_resource->refcnt);
1699         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1700         dev_flow->dv.jump = cache_resource;
1701         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1702                 (void *)cache_resource,
1703                 rte_atomic32_read(&cache_resource->refcnt));
1704         return 0;
1705 }
1706
1707 /**
1708  * Find existing table port ID resource or create and register a new one.
1709  *
1710  * @param dev[in, out]
1711  *   Pointer to rte_eth_dev structure.
1712  * @param[in, out] resource
1713  *   Pointer to port ID action resource.
1714  * @parm[in, out] dev_flow
1715  *   Pointer to the dev_flow.
1716  * @param[out] error
1717  *   pointer to error structure.
1718  *
1719  * @return
1720  *   0 on success otherwise -errno and errno is set.
1721  */
1722 static int
1723 flow_dv_port_id_action_resource_register
1724                         (struct rte_eth_dev *dev,
1725                          struct mlx5_flow_dv_port_id_action_resource *resource,
1726                          struct mlx5_flow *dev_flow,
1727                          struct rte_flow_error *error)
1728 {
1729         struct mlx5_priv *priv = dev->data->dev_private;
1730         struct mlx5_ibv_shared *sh = priv->sh;
1731         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1732
1733         /* Lookup a matching resource from cache. */
1734         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1735                 if (resource->port_id == cache_resource->port_id) {
1736                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1737                                 "refcnt %d++",
1738                                 (void *)cache_resource,
1739                                 rte_atomic32_read(&cache_resource->refcnt));
1740                         rte_atomic32_inc(&cache_resource->refcnt);
1741                         dev_flow->dv.port_id_action = cache_resource;
1742                         return 0;
1743                 }
1744         }
1745         /* Register new port id action resource. */
1746         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1747         if (!cache_resource)
1748                 return rte_flow_error_set(error, ENOMEM,
1749                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1750                                           "cannot allocate resource memory");
1751         *cache_resource = *resource;
1752         cache_resource->action =
1753                 mlx5_glue->dr_create_flow_action_dest_vport
1754                         (priv->sh->fdb_domain, resource->port_id);
1755         if (!cache_resource->action) {
1756                 rte_free(cache_resource);
1757                 return rte_flow_error_set(error, ENOMEM,
1758                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1759                                           NULL, "cannot create action");
1760         }
1761         rte_atomic32_init(&cache_resource->refcnt);
1762         rte_atomic32_inc(&cache_resource->refcnt);
1763         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1764         dev_flow->dv.port_id_action = cache_resource;
1765         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1766                 (void *)cache_resource,
1767                 rte_atomic32_read(&cache_resource->refcnt));
1768         return 0;
1769 }
1770
1771 /**
1772  * Find existing push vlan resource or create and register a new one.
1773  *
1774  * @param dev[in, out]
1775  *   Pointer to rte_eth_dev structure.
1776  * @param[in, out] resource
1777  *   Pointer to port ID action resource.
1778  * @parm[in, out] dev_flow
1779  *   Pointer to the dev_flow.
1780  * @param[out] error
1781  *   pointer to error structure.
1782  *
1783  * @return
1784  *   0 on success otherwise -errno and errno is set.
1785  */
1786 static int
1787 flow_dv_push_vlan_action_resource_register
1788                        (struct rte_eth_dev *dev,
1789                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
1790                         struct mlx5_flow *dev_flow,
1791                         struct rte_flow_error *error)
1792 {
1793         struct mlx5_priv *priv = dev->data->dev_private;
1794         struct mlx5_ibv_shared *sh = priv->sh;
1795         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
1796         struct mlx5dv_dr_domain *domain;
1797
1798         /* Lookup a matching resource from cache. */
1799         LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
1800                 if (resource->vlan_tag == cache_resource->vlan_tag &&
1801                     resource->ft_type == cache_resource->ft_type) {
1802                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
1803                                 "refcnt %d++",
1804                                 (void *)cache_resource,
1805                                 rte_atomic32_read(&cache_resource->refcnt));
1806                         rte_atomic32_inc(&cache_resource->refcnt);
1807                         dev_flow->dv.push_vlan_res = cache_resource;
1808                         return 0;
1809                 }
1810         }
1811         /* Register new push_vlan action resource. */
1812         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1813         if (!cache_resource)
1814                 return rte_flow_error_set(error, ENOMEM,
1815                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1816                                           "cannot allocate resource memory");
1817         *cache_resource = *resource;
1818         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1819                 domain = sh->fdb_domain;
1820         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1821                 domain = sh->rx_domain;
1822         else
1823                 domain = sh->tx_domain;
1824         cache_resource->action =
1825                 mlx5_glue->dr_create_flow_action_push_vlan(domain,
1826                                                            resource->vlan_tag);
1827         if (!cache_resource->action) {
1828                 rte_free(cache_resource);
1829                 return rte_flow_error_set(error, ENOMEM,
1830                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1831                                           NULL, "cannot create action");
1832         }
1833         rte_atomic32_init(&cache_resource->refcnt);
1834         rte_atomic32_inc(&cache_resource->refcnt);
1835         LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
1836         dev_flow->dv.push_vlan_res = cache_resource;
1837         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
1838                 (void *)cache_resource,
1839                 rte_atomic32_read(&cache_resource->refcnt));
1840         return 0;
1841 }
1842 /**
1843  * Get the size of specific rte_flow_item_type
1844  *
1845  * @param[in] item_type
1846  *   Tested rte_flow_item_type.
1847  *
1848  * @return
1849  *   sizeof struct item_type, 0 if void or irrelevant.
1850  */
1851 static size_t
1852 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1853 {
1854         size_t retval;
1855
1856         switch (item_type) {
1857         case RTE_FLOW_ITEM_TYPE_ETH:
1858                 retval = sizeof(struct rte_flow_item_eth);
1859                 break;
1860         case RTE_FLOW_ITEM_TYPE_VLAN:
1861                 retval = sizeof(struct rte_flow_item_vlan);
1862                 break;
1863         case RTE_FLOW_ITEM_TYPE_IPV4:
1864                 retval = sizeof(struct rte_flow_item_ipv4);
1865                 break;
1866         case RTE_FLOW_ITEM_TYPE_IPV6:
1867                 retval = sizeof(struct rte_flow_item_ipv6);
1868                 break;
1869         case RTE_FLOW_ITEM_TYPE_UDP:
1870                 retval = sizeof(struct rte_flow_item_udp);
1871                 break;
1872         case RTE_FLOW_ITEM_TYPE_TCP:
1873                 retval = sizeof(struct rte_flow_item_tcp);
1874                 break;
1875         case RTE_FLOW_ITEM_TYPE_VXLAN:
1876                 retval = sizeof(struct rte_flow_item_vxlan);
1877                 break;
1878         case RTE_FLOW_ITEM_TYPE_GRE:
1879                 retval = sizeof(struct rte_flow_item_gre);
1880                 break;
1881         case RTE_FLOW_ITEM_TYPE_NVGRE:
1882                 retval = sizeof(struct rte_flow_item_nvgre);
1883                 break;
1884         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1885                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1886                 break;
1887         case RTE_FLOW_ITEM_TYPE_MPLS:
1888                 retval = sizeof(struct rte_flow_item_mpls);
1889                 break;
1890         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1891         default:
1892                 retval = 0;
1893                 break;
1894         }
1895         return retval;
1896 }
1897
1898 #define MLX5_ENCAP_IPV4_VERSION         0x40
1899 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1900 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1901 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1902 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1903 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1904 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1905
1906 /**
1907  * Convert the encap action data from list of rte_flow_item to raw buffer
1908  *
1909  * @param[in] items
1910  *   Pointer to rte_flow_item objects list.
1911  * @param[out] buf
1912  *   Pointer to the output buffer.
1913  * @param[out] size
1914  *   Pointer to the output buffer size.
1915  * @param[out] error
1916  *   Pointer to the error structure.
1917  *
1918  * @return
1919  *   0 on success, a negative errno value otherwise and rte_errno is set.
1920  */
1921 static int
1922 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1923                            size_t *size, struct rte_flow_error *error)
1924 {
1925         struct rte_ether_hdr *eth = NULL;
1926         struct rte_vlan_hdr *vlan = NULL;
1927         struct rte_ipv4_hdr *ipv4 = NULL;
1928         struct rte_ipv6_hdr *ipv6 = NULL;
1929         struct rte_udp_hdr *udp = NULL;
1930         struct rte_vxlan_hdr *vxlan = NULL;
1931         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1932         struct rte_gre_hdr *gre = NULL;
1933         size_t len;
1934         size_t temp_size = 0;
1935
1936         if (!items)
1937                 return rte_flow_error_set(error, EINVAL,
1938                                           RTE_FLOW_ERROR_TYPE_ACTION,
1939                                           NULL, "invalid empty data");
1940         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1941                 len = flow_dv_get_item_len(items->type);
1942                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1943                         return rte_flow_error_set(error, EINVAL,
1944                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1945                                                   (void *)items->type,
1946                                                   "items total size is too big"
1947                                                   " for encap action");
1948                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1949                 switch (items->type) {
1950                 case RTE_FLOW_ITEM_TYPE_ETH:
1951                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1952                         break;
1953                 case RTE_FLOW_ITEM_TYPE_VLAN:
1954                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1955                         if (!eth)
1956                                 return rte_flow_error_set(error, EINVAL,
1957                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1958                                                 (void *)items->type,
1959                                                 "eth header not found");
1960                         if (!eth->ether_type)
1961                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1962                         break;
1963                 case RTE_FLOW_ITEM_TYPE_IPV4:
1964                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1965                         if (!vlan && !eth)
1966                                 return rte_flow_error_set(error, EINVAL,
1967                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1968                                                 (void *)items->type,
1969                                                 "neither eth nor vlan"
1970                                                 " header found");
1971                         if (vlan && !vlan->eth_proto)
1972                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1973                         else if (eth && !eth->ether_type)
1974                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1975                         if (!ipv4->version_ihl)
1976                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1977                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1978                         if (!ipv4->time_to_live)
1979                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1980                         break;
1981                 case RTE_FLOW_ITEM_TYPE_IPV6:
1982                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1983                         if (!vlan && !eth)
1984                                 return rte_flow_error_set(error, EINVAL,
1985                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1986                                                 (void *)items->type,
1987                                                 "neither eth nor vlan"
1988                                                 " header found");
1989                         if (vlan && !vlan->eth_proto)
1990                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1991                         else if (eth && !eth->ether_type)
1992                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1993                         if (!ipv6->vtc_flow)
1994                                 ipv6->vtc_flow =
1995                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1996                         if (!ipv6->hop_limits)
1997                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1998                         break;
1999                 case RTE_FLOW_ITEM_TYPE_UDP:
2000                         udp = (struct rte_udp_hdr *)&buf[temp_size];
2001                         if (!ipv4 && !ipv6)
2002                                 return rte_flow_error_set(error, EINVAL,
2003                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2004                                                 (void *)items->type,
2005                                                 "ip header not found");
2006                         if (ipv4 && !ipv4->next_proto_id)
2007                                 ipv4->next_proto_id = IPPROTO_UDP;
2008                         else if (ipv6 && !ipv6->proto)
2009                                 ipv6->proto = IPPROTO_UDP;
2010                         break;
2011                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2012                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
2013                         if (!udp)
2014                                 return rte_flow_error_set(error, EINVAL,
2015                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2016                                                 (void *)items->type,
2017                                                 "udp header not found");
2018                         if (!udp->dst_port)
2019                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
2020                         if (!vxlan->vx_flags)
2021                                 vxlan->vx_flags =
2022                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
2023                         break;
2024                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2025                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
2026                         if (!udp)
2027                                 return rte_flow_error_set(error, EINVAL,
2028                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2029                                                 (void *)items->type,
2030                                                 "udp header not found");
2031                         if (!vxlan_gpe->proto)
2032                                 return rte_flow_error_set(error, EINVAL,
2033                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2034                                                 (void *)items->type,
2035                                                 "next protocol not found");
2036                         if (!udp->dst_port)
2037                                 udp->dst_port =
2038                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
2039                         if (!vxlan_gpe->vx_flags)
2040                                 vxlan_gpe->vx_flags =
2041                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
2042                         break;
2043                 case RTE_FLOW_ITEM_TYPE_GRE:
2044                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2045                         gre = (struct rte_gre_hdr *)&buf[temp_size];
2046                         if (!gre->proto)
2047                                 return rte_flow_error_set(error, EINVAL,
2048                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2049                                                 (void *)items->type,
2050                                                 "next protocol not found");
2051                         if (!ipv4 && !ipv6)
2052                                 return rte_flow_error_set(error, EINVAL,
2053                                                 RTE_FLOW_ERROR_TYPE_ACTION,
2054                                                 (void *)items->type,
2055                                                 "ip header not found");
2056                         if (ipv4 && !ipv4->next_proto_id)
2057                                 ipv4->next_proto_id = IPPROTO_GRE;
2058                         else if (ipv6 && !ipv6->proto)
2059                                 ipv6->proto = IPPROTO_GRE;
2060                         break;
2061                 case RTE_FLOW_ITEM_TYPE_VOID:
2062                         break;
2063                 default:
2064                         return rte_flow_error_set(error, EINVAL,
2065                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2066                                                   (void *)items->type,
2067                                                   "unsupported item type");
2068                         break;
2069                 }
2070                 temp_size += len;
2071         }
2072         *size = temp_size;
2073         return 0;
2074 }
2075
2076 static int
2077 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
2078 {
2079         struct rte_ether_hdr *eth = NULL;
2080         struct rte_vlan_hdr *vlan = NULL;
2081         struct rte_ipv6_hdr *ipv6 = NULL;
2082         struct rte_udp_hdr *udp = NULL;
2083         char *next_hdr;
2084         uint16_t proto;
2085
2086         eth = (struct rte_ether_hdr *)data;
2087         next_hdr = (char *)(eth + 1);
2088         proto = RTE_BE16(eth->ether_type);
2089
2090         /* VLAN skipping */
2091         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
2092                 vlan = (struct rte_vlan_hdr *)next_hdr;
2093                 proto = RTE_BE16(vlan->eth_proto);
2094                 next_hdr += sizeof(struct rte_vlan_hdr);
2095         }
2096
2097         /* HW calculates IPv4 csum. no need to proceed */
2098         if (proto == RTE_ETHER_TYPE_IPV4)
2099                 return 0;
2100
2101         /* non IPv4/IPv6 header. not supported */
2102         if (proto != RTE_ETHER_TYPE_IPV6) {
2103                 return rte_flow_error_set(error, ENOTSUP,
2104                                           RTE_FLOW_ERROR_TYPE_ACTION,
2105                                           NULL, "Cannot offload non IPv4/IPv6");
2106         }
2107
2108         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
2109
2110         /* ignore non UDP */
2111         if (ipv6->proto != IPPROTO_UDP)
2112                 return 0;
2113
2114         udp = (struct rte_udp_hdr *)(ipv6 + 1);
2115         udp->dgram_cksum = 0;
2116
2117         return 0;
2118 }
2119
2120 /**
2121  * Convert L2 encap action to DV specification.
2122  *
2123  * @param[in] dev
2124  *   Pointer to rte_eth_dev structure.
2125  * @param[in] action
2126  *   Pointer to action structure.
2127  * @param[in, out] dev_flow
2128  *   Pointer to the mlx5_flow.
2129  * @param[in] transfer
2130  *   Mark if the flow is E-Switch flow.
2131  * @param[out] error
2132  *   Pointer to the error structure.
2133  *
2134  * @return
2135  *   0 on success, a negative errno value otherwise and rte_errno is set.
2136  */
2137 static int
2138 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2139                                const struct rte_flow_action *action,
2140                                struct mlx5_flow *dev_flow,
2141                                uint8_t transfer,
2142                                struct rte_flow_error *error)
2143 {
2144         const struct rte_flow_item *encap_data;
2145         const struct rte_flow_action_raw_encap *raw_encap_data;
2146         struct mlx5_flow_dv_encap_decap_resource res = {
2147                 .reformat_type =
2148                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2149                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2150                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2151         };
2152
2153         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2154                 raw_encap_data =
2155                         (const struct rte_flow_action_raw_encap *)action->conf;
2156                 res.size = raw_encap_data->size;
2157                 memcpy(res.buf, raw_encap_data->data, res.size);
2158                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
2159                         return -rte_errno;
2160         } else {
2161                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2162                         encap_data =
2163                                 ((const struct rte_flow_action_vxlan_encap *)
2164                                                 action->conf)->definition;
2165                 else
2166                         encap_data =
2167                                 ((const struct rte_flow_action_nvgre_encap *)
2168                                                 action->conf)->definition;
2169                 if (flow_dv_convert_encap_data(encap_data, res.buf,
2170                                                &res.size, error))
2171                         return -rte_errno;
2172         }
2173         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2174                 return rte_flow_error_set(error, EINVAL,
2175                                           RTE_FLOW_ERROR_TYPE_ACTION,
2176                                           NULL, "can't create L2 encap action");
2177         return 0;
2178 }
2179
2180 /**
2181  * Convert L2 decap action to DV specification.
2182  *
2183  * @param[in] dev
2184  *   Pointer to rte_eth_dev structure.
2185  * @param[in, out] dev_flow
2186  *   Pointer to the mlx5_flow.
2187  * @param[in] transfer
2188  *   Mark if the flow is E-Switch flow.
2189  * @param[out] error
2190  *   Pointer to the error structure.
2191  *
2192  * @return
2193  *   0 on success, a negative errno value otherwise and rte_errno is set.
2194  */
2195 static int
2196 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
2197                                struct mlx5_flow *dev_flow,
2198                                uint8_t transfer,
2199                                struct rte_flow_error *error)
2200 {
2201         struct mlx5_flow_dv_encap_decap_resource res = {
2202                 .size = 0,
2203                 .reformat_type =
2204                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
2205                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2206                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
2207         };
2208
2209         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2210                 return rte_flow_error_set(error, EINVAL,
2211                                           RTE_FLOW_ERROR_TYPE_ACTION,
2212                                           NULL, "can't create L2 decap action");
2213         return 0;
2214 }
2215
2216 /**
2217  * Convert raw decap/encap (L3 tunnel) action to DV specification.
2218  *
2219  * @param[in] dev
2220  *   Pointer to rte_eth_dev structure.
2221  * @param[in] action
2222  *   Pointer to action structure.
2223  * @param[in, out] dev_flow
2224  *   Pointer to the mlx5_flow.
2225  * @param[in] attr
2226  *   Pointer to the flow attributes.
2227  * @param[out] error
2228  *   Pointer to the error structure.
2229  *
2230  * @return
2231  *   0 on success, a negative errno value otherwise and rte_errno is set.
2232  */
2233 static int
2234 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
2235                                 const struct rte_flow_action *action,
2236                                 struct mlx5_flow *dev_flow,
2237                                 const struct rte_flow_attr *attr,
2238                                 struct rte_flow_error *error)
2239 {
2240         const struct rte_flow_action_raw_encap *encap_data;
2241         struct mlx5_flow_dv_encap_decap_resource res;
2242
2243         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
2244         res.size = encap_data->size;
2245         memcpy(res.buf, encap_data->data, res.size);
2246         res.reformat_type = attr->egress ?
2247                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
2248                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
2249         if (attr->transfer)
2250                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2251         else
2252                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2253                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2254         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2255                 return rte_flow_error_set(error, EINVAL,
2256                                           RTE_FLOW_ERROR_TYPE_ACTION,
2257                                           NULL, "can't create encap action");
2258         return 0;
2259 }
2260
2261 /**
2262  * Create action push VLAN.
2263  *
2264  * @param[in] dev
2265  *   Pointer to rte_eth_dev structure.
2266  * @param[in] vlan_tag
2267  *   the vlan tag to push to the Ethernet header.
2268  * @param[in, out] dev_flow
2269  *   Pointer to the mlx5_flow.
2270  * @param[in] attr
2271  *   Pointer to the flow attributes.
2272  * @param[out] error
2273  *   Pointer to the error structure.
2274  *
2275  * @return
2276  *   0 on success, a negative errno value otherwise and rte_errno is set.
2277  */
2278 static int
2279 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
2280                                 const struct rte_flow_attr *attr,
2281                                 const struct rte_vlan_hdr *vlan,
2282                                 struct mlx5_flow *dev_flow,
2283                                 struct rte_flow_error *error)
2284 {
2285         struct mlx5_flow_dv_push_vlan_action_resource res;
2286
2287         res.vlan_tag =
2288                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
2289                                  vlan->vlan_tci);
2290         if (attr->transfer)
2291                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2292         else
2293                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2294                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2295         return flow_dv_push_vlan_action_resource_register
2296                                             (dev, &res, dev_flow, error);
2297 }
2298
2299 /**
2300  * Validate the modify-header actions.
2301  *
2302  * @param[in] action_flags
2303  *   Holds the actions detected until now.
2304  * @param[in] action
2305  *   Pointer to the modify action.
2306  * @param[out] error
2307  *   Pointer to error structure.
2308  *
2309  * @return
2310  *   0 on success, a negative errno value otherwise and rte_errno is set.
2311  */
2312 static int
2313 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
2314                                    const struct rte_flow_action *action,
2315                                    struct rte_flow_error *error)
2316 {
2317         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
2318                 return rte_flow_error_set(error, EINVAL,
2319                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2320                                           NULL, "action configuration not set");
2321         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2322                 return rte_flow_error_set(error, EINVAL,
2323                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2324                                           "can't have encap action before"
2325                                           " modify action");
2326         return 0;
2327 }
2328
2329 /**
2330  * Validate the modify-header MAC address actions.
2331  *
2332  * @param[in] action_flags
2333  *   Holds the actions detected until now.
2334  * @param[in] action
2335  *   Pointer to the modify action.
2336  * @param[in] item_flags
2337  *   Holds the items detected.
2338  * @param[out] error
2339  *   Pointer to error structure.
2340  *
2341  * @return
2342  *   0 on success, a negative errno value otherwise and rte_errno is set.
2343  */
2344 static int
2345 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
2346                                    const struct rte_flow_action *action,
2347                                    const uint64_t item_flags,
2348                                    struct rte_flow_error *error)
2349 {
2350         int ret = 0;
2351
2352         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2353         if (!ret) {
2354                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
2355                         return rte_flow_error_set(error, EINVAL,
2356                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2357                                                   NULL,
2358                                                   "no L2 item in pattern");
2359         }
2360         return ret;
2361 }
2362
2363 /**
2364  * Validate the modify-header IPv4 address actions.
2365  *
2366  * @param[in] action_flags
2367  *   Holds the actions detected until now.
2368  * @param[in] action
2369  *   Pointer to the modify action.
2370  * @param[in] item_flags
2371  *   Holds the items detected.
2372  * @param[out] error
2373  *   Pointer to error structure.
2374  *
2375  * @return
2376  *   0 on success, a negative errno value otherwise and rte_errno is set.
2377  */
2378 static int
2379 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
2380                                     const struct rte_flow_action *action,
2381                                     const uint64_t item_flags,
2382                                     struct rte_flow_error *error)
2383 {
2384         int ret = 0;
2385
2386         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2387         if (!ret) {
2388                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
2389                         return rte_flow_error_set(error, EINVAL,
2390                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2391                                                   NULL,
2392                                                   "no ipv4 item in pattern");
2393         }
2394         return ret;
2395 }
2396
2397 /**
2398  * Validate the modify-header IPv6 address actions.
2399  *
2400  * @param[in] action_flags
2401  *   Holds the actions detected until now.
2402  * @param[in] action
2403  *   Pointer to the modify action.
2404  * @param[in] item_flags
2405  *   Holds the items detected.
2406  * @param[out] error
2407  *   Pointer to error structure.
2408  *
2409  * @return
2410  *   0 on success, a negative errno value otherwise and rte_errno is set.
2411  */
2412 static int
2413 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
2414                                     const struct rte_flow_action *action,
2415                                     const uint64_t item_flags,
2416                                     struct rte_flow_error *error)
2417 {
2418         int ret = 0;
2419
2420         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2421         if (!ret) {
2422                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
2423                         return rte_flow_error_set(error, EINVAL,
2424                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2425                                                   NULL,
2426                                                   "no ipv6 item in pattern");
2427         }
2428         return ret;
2429 }
2430
2431 /**
2432  * Validate the modify-header TP actions.
2433  *
2434  * @param[in] action_flags
2435  *   Holds the actions detected until now.
2436  * @param[in] action
2437  *   Pointer to the modify action.
2438  * @param[in] item_flags
2439  *   Holds the items detected.
2440  * @param[out] error
2441  *   Pointer to error structure.
2442  *
2443  * @return
2444  *   0 on success, a negative errno value otherwise and rte_errno is set.
2445  */
2446 static int
2447 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
2448                                   const struct rte_flow_action *action,
2449                                   const uint64_t item_flags,
2450                                   struct rte_flow_error *error)
2451 {
2452         int ret = 0;
2453
2454         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2455         if (!ret) {
2456                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
2457                         return rte_flow_error_set(error, EINVAL,
2458                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2459                                                   NULL, "no transport layer "
2460                                                   "in pattern");
2461         }
2462         return ret;
2463 }
2464
2465 /**
2466  * Validate the modify-header actions of increment/decrement
2467  * TCP Sequence-number.
2468  *
2469  * @param[in] action_flags
2470  *   Holds the actions detected until now.
2471  * @param[in] action
2472  *   Pointer to the modify action.
2473  * @param[in] item_flags
2474  *   Holds the items detected.
2475  * @param[out] error
2476  *   Pointer to error structure.
2477  *
2478  * @return
2479  *   0 on success, a negative errno value otherwise and rte_errno is set.
2480  */
2481 static int
2482 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
2483                                        const struct rte_flow_action *action,
2484                                        const uint64_t item_flags,
2485                                        struct rte_flow_error *error)
2486 {
2487         int ret = 0;
2488
2489         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2490         if (!ret) {
2491                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2492                         return rte_flow_error_set(error, EINVAL,
2493                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2494                                                   NULL, "no TCP item in"
2495                                                   " pattern");
2496                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
2497                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
2498                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
2499                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
2500                         return rte_flow_error_set(error, EINVAL,
2501                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2502                                                   NULL,
2503                                                   "cannot decrease and increase"
2504                                                   " TCP sequence number"
2505                                                   " at the same time");
2506         }
2507         return ret;
2508 }
2509
2510 /**
2511  * Validate the modify-header actions of increment/decrement
2512  * TCP Acknowledgment number.
2513  *
2514  * @param[in] action_flags
2515  *   Holds the actions detected until now.
2516  * @param[in] action
2517  *   Pointer to the modify action.
2518  * @param[in] item_flags
2519  *   Holds the items detected.
2520  * @param[out] error
2521  *   Pointer to error structure.
2522  *
2523  * @return
2524  *   0 on success, a negative errno value otherwise and rte_errno is set.
2525  */
2526 static int
2527 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
2528                                        const struct rte_flow_action *action,
2529                                        const uint64_t item_flags,
2530                                        struct rte_flow_error *error)
2531 {
2532         int ret = 0;
2533
2534         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2535         if (!ret) {
2536                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2537                         return rte_flow_error_set(error, EINVAL,
2538                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2539                                                   NULL, "no TCP item in"
2540                                                   " pattern");
2541                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
2542                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
2543                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
2544                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
2545                         return rte_flow_error_set(error, EINVAL,
2546                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2547                                                   NULL,
2548                                                   "cannot decrease and increase"
2549                                                   " TCP acknowledgment number"
2550                                                   " at the same time");
2551         }
2552         return ret;
2553 }
2554
2555 /**
2556  * Validate the modify-header TTL actions.
2557  *
2558  * @param[in] action_flags
2559  *   Holds the actions detected until now.
2560  * @param[in] action
2561  *   Pointer to the modify action.
2562  * @param[in] item_flags
2563  *   Holds the items detected.
2564  * @param[out] error
2565  *   Pointer to error structure.
2566  *
2567  * @return
2568  *   0 on success, a negative errno value otherwise and rte_errno is set.
2569  */
2570 static int
2571 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
2572                                    const struct rte_flow_action *action,
2573                                    const uint64_t item_flags,
2574                                    struct rte_flow_error *error)
2575 {
2576         int ret = 0;
2577
2578         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2579         if (!ret) {
2580                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
2581                         return rte_flow_error_set(error, EINVAL,
2582                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2583                                                   NULL,
2584                                                   "no IP protocol in pattern");
2585         }
2586         return ret;
2587 }
2588
2589 /**
2590  * Validate jump action.
2591  *
2592  * @param[in] action
2593  *   Pointer to the jump action.
2594  * @param[in] action_flags
2595  *   Holds the actions detected until now.
2596  * @param[in] attributes
2597  *   Pointer to flow attributes
2598  * @param[in] external
2599  *   Action belongs to flow rule created by request external to PMD.
2600  * @param[out] error
2601  *   Pointer to error structure.
2602  *
2603  * @return
2604  *   0 on success, a negative errno value otherwise and rte_errno is set.
2605  */
2606 static int
2607 flow_dv_validate_action_jump(const struct rte_flow_action *action,
2608                              uint64_t action_flags,
2609                              const struct rte_flow_attr *attributes,
2610                              bool external, struct rte_flow_error *error)
2611 {
2612         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
2613                                                     MLX5_MAX_TABLES;
2614         uint32_t target_group, table;
2615         int ret = 0;
2616
2617         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2618                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2619                 return rte_flow_error_set(error, EINVAL,
2620                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2621                                           "can't have 2 fate actions in"
2622                                           " same flow");
2623         if (!action->conf)
2624                 return rte_flow_error_set(error, EINVAL,
2625                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2626                                           NULL, "action configuration not set");
2627         target_group =
2628                 ((const struct rte_flow_action_jump *)action->conf)->group;
2629         ret = mlx5_flow_group_to_table(attributes, external, target_group,
2630                                        &table, error);
2631         if (ret)
2632                 return ret;
2633         if (table >= max_group)
2634                 return rte_flow_error_set(error, EINVAL,
2635                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2636                                           "target group index out of range");
2637         if (attributes->group >= target_group)
2638                 return rte_flow_error_set(error, EINVAL,
2639                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2640                                           "target group must be higher than"
2641                                           " the current flow group");
2642         return 0;
2643 }
2644
2645 /*
2646  * Validate the port_id action.
2647  *
2648  * @param[in] dev
2649  *   Pointer to rte_eth_dev structure.
2650  * @param[in] action_flags
2651  *   Bit-fields that holds the actions detected until now.
2652  * @param[in] action
2653  *   Port_id RTE action structure.
2654  * @param[in] attr
2655  *   Attributes of flow that includes this action.
2656  * @param[out] error
2657  *   Pointer to error structure.
2658  *
2659  * @return
2660  *   0 on success, a negative errno value otherwise and rte_errno is set.
2661  */
2662 static int
2663 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2664                                 uint64_t action_flags,
2665                                 const struct rte_flow_action *action,
2666                                 const struct rte_flow_attr *attr,
2667                                 struct rte_flow_error *error)
2668 {
2669         const struct rte_flow_action_port_id *port_id;
2670         struct mlx5_priv *act_priv;
2671         struct mlx5_priv *dev_priv;
2672         uint16_t port;
2673
2674         if (!attr->transfer)
2675                 return rte_flow_error_set(error, ENOTSUP,
2676                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2677                                           NULL,
2678                                           "port id action is valid in transfer"
2679                                           " mode only");
2680         if (!action || !action->conf)
2681                 return rte_flow_error_set(error, ENOTSUP,
2682                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2683                                           NULL,
2684                                           "port id action parameters must be"
2685                                           " specified");
2686         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2687                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2688                 return rte_flow_error_set(error, EINVAL,
2689                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2690                                           "can have only one fate actions in"
2691                                           " a flow");
2692         dev_priv = mlx5_dev_to_eswitch_info(dev);
2693         if (!dev_priv)
2694                 return rte_flow_error_set(error, rte_errno,
2695                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2696                                           NULL,
2697                                           "failed to obtain E-Switch info");
2698         port_id = action->conf;
2699         port = port_id->original ? dev->data->port_id : port_id->id;
2700         act_priv = mlx5_port_to_eswitch_info(port);
2701         if (!act_priv)
2702                 return rte_flow_error_set
2703                                 (error, rte_errno,
2704                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2705                                  "failed to obtain E-Switch port id for port");
2706         if (act_priv->domain_id != dev_priv->domain_id)
2707                 return rte_flow_error_set
2708                                 (error, EINVAL,
2709                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2710                                  "port does not belong to"
2711                                  " E-Switch being configured");
2712         return 0;
2713 }
2714
2715 /**
2716  * Find existing modify-header resource or create and register a new one.
2717  *
2718  * @param dev[in, out]
2719  *   Pointer to rte_eth_dev structure.
2720  * @param[in, out] resource
2721  *   Pointer to modify-header resource.
2722  * @parm[in, out] dev_flow
2723  *   Pointer to the dev_flow.
2724  * @param[out] error
2725  *   pointer to error structure.
2726  *
2727  * @return
2728  *   0 on success otherwise -errno and errno is set.
2729  */
2730 static int
2731 flow_dv_modify_hdr_resource_register
2732                         (struct rte_eth_dev *dev,
2733                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2734                          struct mlx5_flow *dev_flow,
2735                          struct rte_flow_error *error)
2736 {
2737         struct mlx5_priv *priv = dev->data->dev_private;
2738         struct mlx5_ibv_shared *sh = priv->sh;
2739         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2740         struct mlx5dv_dr_domain *ns;
2741
2742         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2743                 ns = sh->fdb_domain;
2744         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2745                 ns = sh->tx_domain;
2746         else
2747                 ns = sh->rx_domain;
2748         resource->flags =
2749                 dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2750         /* Lookup a matching resource from cache. */
2751         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2752                 if (resource->ft_type == cache_resource->ft_type &&
2753                     resource->actions_num == cache_resource->actions_num &&
2754                     resource->flags == cache_resource->flags &&
2755                     !memcmp((const void *)resource->actions,
2756                             (const void *)cache_resource->actions,
2757                             (resource->actions_num *
2758                                             sizeof(resource->actions[0])))) {
2759                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2760                                 (void *)cache_resource,
2761                                 rte_atomic32_read(&cache_resource->refcnt));
2762                         rte_atomic32_inc(&cache_resource->refcnt);
2763                         dev_flow->dv.modify_hdr = cache_resource;
2764                         return 0;
2765                 }
2766         }
2767         /* Register new modify-header resource. */
2768         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2769         if (!cache_resource)
2770                 return rte_flow_error_set(error, ENOMEM,
2771                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2772                                           "cannot allocate resource memory");
2773         *cache_resource = *resource;
2774         cache_resource->verbs_action =
2775                 mlx5_glue->dv_create_flow_action_modify_header
2776                                         (sh->ctx, cache_resource->ft_type,
2777                                          ns, cache_resource->flags,
2778                                          cache_resource->actions_num *
2779                                          sizeof(cache_resource->actions[0]),
2780                                          (uint64_t *)cache_resource->actions);
2781         if (!cache_resource->verbs_action) {
2782                 rte_free(cache_resource);
2783                 return rte_flow_error_set(error, ENOMEM,
2784                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2785                                           NULL, "cannot create action");
2786         }
2787         rte_atomic32_init(&cache_resource->refcnt);
2788         rte_atomic32_inc(&cache_resource->refcnt);
2789         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2790         dev_flow->dv.modify_hdr = cache_resource;
2791         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2792                 (void *)cache_resource,
2793                 rte_atomic32_read(&cache_resource->refcnt));
2794         return 0;
2795 }
2796
2797 #define MLX5_CNT_CONTAINER_RESIZE 64
2798
2799 /**
2800  * Get or create a flow counter.
2801  *
2802  * @param[in] dev
2803  *   Pointer to the Ethernet device structure.
2804  * @param[in] shared
2805  *   Indicate if this counter is shared with other flows.
2806  * @param[in] id
2807  *   Counter identifier.
2808  *
2809  * @return
2810  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2811  */
2812 static struct mlx5_flow_counter *
2813 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2814                                uint32_t id)
2815 {
2816         struct mlx5_priv *priv = dev->data->dev_private;
2817         struct mlx5_flow_counter *cnt = NULL;
2818         struct mlx5_devx_obj *dcs = NULL;
2819
2820         if (!priv->config.devx) {
2821                 rte_errno = ENOTSUP;
2822                 return NULL;
2823         }
2824         if (shared) {
2825                 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2826                         if (cnt->shared && cnt->id == id) {
2827                                 cnt->ref_cnt++;
2828                                 return cnt;
2829                         }
2830                 }
2831         }
2832         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2833         if (!dcs)
2834                 return NULL;
2835         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2836         if (!cnt) {
2837                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2838                 rte_errno = ENOMEM;
2839                 return NULL;
2840         }
2841         struct mlx5_flow_counter tmpl = {
2842                 .shared = shared,
2843                 .ref_cnt = 1,
2844                 .id = id,
2845                 .dcs = dcs,
2846         };
2847         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2848         if (!tmpl.action) {
2849                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2850                 rte_errno = errno;
2851                 rte_free(cnt);
2852                 return NULL;
2853         }
2854         *cnt = tmpl;
2855         TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2856         return cnt;
2857 }
2858
2859 /**
2860  * Release a flow counter.
2861  *
2862  * @param[in] dev
2863  *   Pointer to the Ethernet device structure.
2864  * @param[in] counter
2865  *   Pointer to the counter handler.
2866  */
2867 static void
2868 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2869                                  struct mlx5_flow_counter *counter)
2870 {
2871         struct mlx5_priv *priv = dev->data->dev_private;
2872
2873         if (!counter)
2874                 return;
2875         if (--counter->ref_cnt == 0) {
2876                 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2877                 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2878                 rte_free(counter);
2879         }
2880 }
2881
2882 /**
2883  * Query a devx flow counter.
2884  *
2885  * @param[in] dev
2886  *   Pointer to the Ethernet device structure.
2887  * @param[in] cnt
2888  *   Pointer to the flow counter.
2889  * @param[out] pkts
2890  *   The statistics value of packets.
2891  * @param[out] bytes
2892  *   The statistics value of bytes.
2893  *
2894  * @return
2895  *   0 on success, otherwise a negative errno value and rte_errno is set.
2896  */
2897 static inline int
2898 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2899                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2900                      uint64_t *bytes)
2901 {
2902         return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2903                                                 0, NULL, NULL, 0);
2904 }
2905
2906 /**
2907  * Get a pool by a counter.
2908  *
2909  * @param[in] cnt
2910  *   Pointer to the counter.
2911  *
2912  * @return
2913  *   The counter pool.
2914  */
2915 static struct mlx5_flow_counter_pool *
2916 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2917 {
2918         if (!cnt->batch) {
2919                 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2920                 return (struct mlx5_flow_counter_pool *)cnt - 1;
2921         }
2922         return cnt->pool;
2923 }
2924
2925 /**
2926  * Get a pool by devx counter ID.
2927  *
2928  * @param[in] cont
2929  *   Pointer to the counter container.
2930  * @param[in] id
2931  *   The counter devx ID.
2932  *
2933  * @return
2934  *   The counter pool pointer if exists, NULL otherwise,
2935  */
2936 static struct mlx5_flow_counter_pool *
2937 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2938 {
2939         struct mlx5_flow_counter_pool *pool;
2940
2941         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2942                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2943                                 MLX5_COUNTERS_PER_POOL;
2944
2945                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2946                         return pool;
2947         };
2948         return NULL;
2949 }
2950
2951 /**
2952  * Allocate a new memory for the counter values wrapped by all the needed
2953  * management.
2954  *
2955  * @param[in] dev
2956  *   Pointer to the Ethernet device structure.
2957  * @param[in] raws_n
2958  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2959  *
2960  * @return
2961  *   The new memory management pointer on success, otherwise NULL and rte_errno
2962  *   is set.
2963  */
2964 static struct mlx5_counter_stats_mem_mng *
2965 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2966 {
2967         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2968                                         (dev->data->dev_private))->sh;
2969         struct mlx5_devx_mkey_attr mkey_attr;
2970         struct mlx5_counter_stats_mem_mng *mem_mng;
2971         volatile struct flow_counter_stats *raw_data;
2972         int size = (sizeof(struct flow_counter_stats) *
2973                         MLX5_COUNTERS_PER_POOL +
2974                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2975                         sizeof(struct mlx5_counter_stats_mem_mng);
2976         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2977         int i;
2978
2979         if (!mem) {
2980                 rte_errno = ENOMEM;
2981                 return NULL;
2982         }
2983         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2984         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2985         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2986                                                  IBV_ACCESS_LOCAL_WRITE);
2987         if (!mem_mng->umem) {
2988                 rte_errno = errno;
2989                 rte_free(mem);
2990                 return NULL;
2991         }
2992         mkey_attr.addr = (uintptr_t)mem;
2993         mkey_attr.size = size;
2994         mkey_attr.umem_id = mem_mng->umem->umem_id;
2995         mkey_attr.pd = sh->pdn;
2996         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2997         if (!mem_mng->dm) {
2998                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
2999                 rte_errno = errno;
3000                 rte_free(mem);
3001                 return NULL;
3002         }
3003         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
3004         raw_data = (volatile struct flow_counter_stats *)mem;
3005         for (i = 0; i < raws_n; ++i) {
3006                 mem_mng->raws[i].mem_mng = mem_mng;
3007                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
3008         }
3009         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
3010         return mem_mng;
3011 }
3012
3013 /**
3014  * Resize a counter container.
3015  *
3016  * @param[in] dev
3017  *   Pointer to the Ethernet device structure.
3018  * @param[in] batch
3019  *   Whether the pool is for counter that was allocated by batch command.
3020  *
3021  * @return
3022  *   The new container pointer on success, otherwise NULL and rte_errno is set.
3023  */
3024 static struct mlx5_pools_container *
3025 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
3026 {
3027         struct mlx5_priv *priv = dev->data->dev_private;
3028         struct mlx5_pools_container *cont =
3029                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
3030         struct mlx5_pools_container *new_cont =
3031                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
3032         struct mlx5_counter_stats_mem_mng *mem_mng;
3033         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
3034         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
3035         int i;
3036
3037         if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
3038                 /* The last resize still hasn't detected by the host thread. */
3039                 rte_errno = EAGAIN;
3040                 return NULL;
3041         }
3042         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
3043         if (!new_cont->pools) {
3044                 rte_errno = ENOMEM;
3045                 return NULL;
3046         }
3047         if (cont->n)
3048                 memcpy(new_cont->pools, cont->pools, cont->n *
3049                        sizeof(struct mlx5_flow_counter_pool *));
3050         mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
3051                 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
3052         if (!mem_mng) {
3053                 rte_free(new_cont->pools);
3054                 return NULL;
3055         }
3056         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
3057                 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
3058                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
3059                                  i, next);
3060         new_cont->n = resize;
3061         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
3062         TAILQ_INIT(&new_cont->pool_list);
3063         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
3064         new_cont->init_mem_mng = mem_mng;
3065         rte_cio_wmb();
3066          /* Flip the master container. */
3067         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
3068         return new_cont;
3069 }
3070
3071 /**
3072  * Query a devx flow counter.
3073  *
3074  * @param[in] dev
3075  *   Pointer to the Ethernet device structure.
3076  * @param[in] cnt
3077  *   Pointer to the flow counter.
3078  * @param[out] pkts
3079  *   The statistics value of packets.
3080  * @param[out] bytes
3081  *   The statistics value of bytes.
3082  *
3083  * @return
3084  *   0 on success, otherwise a negative errno value and rte_errno is set.
3085  */
3086 static inline int
3087 _flow_dv_query_count(struct rte_eth_dev *dev,
3088                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
3089                      uint64_t *bytes)
3090 {
3091         struct mlx5_priv *priv = dev->data->dev_private;
3092         struct mlx5_flow_counter_pool *pool =
3093                         flow_dv_counter_pool_get(cnt);
3094         int offset = cnt - &pool->counters_raw[0];
3095
3096         if (priv->counter_fallback)
3097                 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
3098
3099         rte_spinlock_lock(&pool->sl);
3100         /*
3101          * The single counters allocation may allocate smaller ID than the
3102          * current allocated in parallel to the host reading.
3103          * In this case the new counter values must be reported as 0.
3104          */
3105         if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
3106                 *pkts = 0;
3107                 *bytes = 0;
3108         } else {
3109                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
3110                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
3111         }
3112         rte_spinlock_unlock(&pool->sl);
3113         return 0;
3114 }
3115
3116 /**
3117  * Create and initialize a new counter pool.
3118  *
3119  * @param[in] dev
3120  *   Pointer to the Ethernet device structure.
3121  * @param[out] dcs
3122  *   The devX counter handle.
3123  * @param[in] batch
3124  *   Whether the pool is for counter that was allocated by batch command.
3125  *
3126  * @return
3127  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
3128  */
3129 static struct mlx5_flow_counter_pool *
3130 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
3131                     uint32_t batch)
3132 {
3133         struct mlx5_priv *priv = dev->data->dev_private;
3134         struct mlx5_flow_counter_pool *pool;
3135         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3136                                                                0);
3137         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
3138         uint32_t size;
3139
3140         if (cont->n == n_valid) {
3141                 cont = flow_dv_container_resize(dev, batch);
3142                 if (!cont)
3143                         return NULL;
3144         }
3145         size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
3146                         sizeof(struct mlx5_flow_counter);
3147         pool = rte_calloc(__func__, 1, size, 0);
3148         if (!pool) {
3149                 rte_errno = ENOMEM;
3150                 return NULL;
3151         }
3152         pool->min_dcs = dcs;
3153         pool->raw = cont->init_mem_mng->raws + n_valid %
3154                                                      MLX5_CNT_CONTAINER_RESIZE;
3155         pool->raw_hw = NULL;
3156         rte_spinlock_init(&pool->sl);
3157         /*
3158          * The generation of the new allocated counters in this pool is 0, 2 in
3159          * the pool generation makes all the counters valid for allocation.
3160          */
3161         rte_atomic64_set(&pool->query_gen, 0x2);
3162         TAILQ_INIT(&pool->counters);
3163         TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3164         cont->pools[n_valid] = pool;
3165         /* Pool initialization must be updated before host thread access. */
3166         rte_cio_wmb();
3167         rte_atomic16_add(&cont->n_valid, 1);
3168         return pool;
3169 }
3170
3171 /**
3172  * Prepare a new counter and/or a new counter pool.
3173  *
3174  * @param[in] dev
3175  *   Pointer to the Ethernet device structure.
3176  * @param[out] cnt_free
3177  *   Where to put the pointer of a new counter.
3178  * @param[in] batch
3179  *   Whether the pool is for counter that was allocated by batch command.
3180  *
3181  * @return
3182  *   The free counter pool pointer and @p cnt_free is set on success,
3183  *   NULL otherwise and rte_errno is set.
3184  */
3185 static struct mlx5_flow_counter_pool *
3186 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
3187                              struct mlx5_flow_counter **cnt_free,
3188                              uint32_t batch)
3189 {
3190         struct mlx5_priv *priv = dev->data->dev_private;
3191         struct mlx5_flow_counter_pool *pool;
3192         struct mlx5_devx_obj *dcs = NULL;
3193         struct mlx5_flow_counter *cnt;
3194         uint32_t i;
3195
3196         if (!batch) {
3197                 /* bulk_bitmap must be 0 for single counter allocation. */
3198                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
3199                 if (!dcs)
3200                         return NULL;
3201                 pool = flow_dv_find_pool_by_id
3202                         (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
3203                 if (!pool) {
3204                         pool = flow_dv_pool_create(dev, dcs, batch);
3205                         if (!pool) {
3206                                 mlx5_devx_cmd_destroy(dcs);
3207                                 return NULL;
3208                         }
3209                 } else if (dcs->id < pool->min_dcs->id) {
3210                         rte_atomic64_set(&pool->a64_dcs,
3211                                          (int64_t)(uintptr_t)dcs);
3212                 }
3213                 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
3214                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3215                 cnt->dcs = dcs;
3216                 *cnt_free = cnt;
3217                 return pool;
3218         }
3219         /* bulk_bitmap is in 128 counters units. */
3220         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
3221                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
3222         if (!dcs) {
3223                 rte_errno = ENODATA;
3224                 return NULL;
3225         }
3226         pool = flow_dv_pool_create(dev, dcs, batch);
3227         if (!pool) {
3228                 mlx5_devx_cmd_destroy(dcs);
3229                 return NULL;
3230         }
3231         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3232                 cnt = &pool->counters_raw[i];
3233                 cnt->pool = pool;
3234                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3235         }
3236         *cnt_free = &pool->counters_raw[0];
3237         return pool;
3238 }
3239
3240 /**
3241  * Search for existed shared counter.
3242  *
3243  * @param[in] cont
3244  *   Pointer to the relevant counter pool container.
3245  * @param[in] id
3246  *   The shared counter ID to search.
3247  *
3248  * @return
3249  *   NULL if not existed, otherwise pointer to the shared counter.
3250  */
3251 static struct mlx5_flow_counter *
3252 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
3253                               uint32_t id)
3254 {
3255         static struct mlx5_flow_counter *cnt;
3256         struct mlx5_flow_counter_pool *pool;
3257         int i;
3258
3259         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3260                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3261                         cnt = &pool->counters_raw[i];
3262                         if (cnt->ref_cnt && cnt->shared && cnt->id == id)
3263                                 return cnt;
3264                 }
3265         }
3266         return NULL;
3267 }
3268
3269 /**
3270  * Allocate a flow counter.
3271  *
3272  * @param[in] dev
3273  *   Pointer to the Ethernet device structure.
3274  * @param[in] shared
3275  *   Indicate if this counter is shared with other flows.
3276  * @param[in] id
3277  *   Counter identifier.
3278  * @param[in] group
3279  *   Counter flow group.
3280  *
3281  * @return
3282  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
3283  */
3284 static struct mlx5_flow_counter *
3285 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
3286                       uint16_t group)
3287 {
3288         struct mlx5_priv *priv = dev->data->dev_private;
3289         struct mlx5_flow_counter_pool *pool = NULL;
3290         struct mlx5_flow_counter *cnt_free = NULL;
3291         /*
3292          * Currently group 0 flow counter cannot be assigned to a flow if it is
3293          * not the first one in the batch counter allocation, so it is better
3294          * to allocate counters one by one for these flows in a separate
3295          * container.
3296          * A counter can be shared between different groups so need to take
3297          * shared counters from the single container.
3298          */
3299         uint32_t batch = (group && !shared) ? 1 : 0;
3300         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3301                                                                0);
3302
3303         if (priv->counter_fallback)
3304                 return flow_dv_counter_alloc_fallback(dev, shared, id);
3305         if (!priv->config.devx) {
3306                 rte_errno = ENOTSUP;
3307                 return NULL;
3308         }
3309         if (shared) {
3310                 cnt_free = flow_dv_counter_shared_search(cont, id);
3311                 if (cnt_free) {
3312                         if (cnt_free->ref_cnt + 1 == 0) {
3313                                 rte_errno = E2BIG;
3314                                 return NULL;
3315                         }
3316                         cnt_free->ref_cnt++;
3317                         return cnt_free;
3318                 }
3319         }
3320         /* Pools which has a free counters are in the start. */
3321         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3322                 /*
3323                  * The free counter reset values must be updated between the
3324                  * counter release to the counter allocation, so, at least one
3325                  * query must be done in this time. ensure it by saving the
3326                  * query generation in the release time.
3327                  * The free list is sorted according to the generation - so if
3328                  * the first one is not updated, all the others are not
3329                  * updated too.
3330                  */
3331                 cnt_free = TAILQ_FIRST(&pool->counters);
3332                 if (cnt_free && cnt_free->query_gen + 1 <
3333                     rte_atomic64_read(&pool->query_gen))
3334                         break;
3335                 cnt_free = NULL;
3336         }
3337         if (!cnt_free) {
3338                 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
3339                 if (!pool)
3340                         return NULL;
3341         }
3342         cnt_free->batch = batch;
3343         /* Create a DV counter action only in the first time usage. */
3344         if (!cnt_free->action) {
3345                 uint16_t offset;
3346                 struct mlx5_devx_obj *dcs;
3347
3348                 if (batch) {
3349                         offset = cnt_free - &pool->counters_raw[0];
3350                         dcs = pool->min_dcs;
3351                 } else {
3352                         offset = 0;
3353                         dcs = cnt_free->dcs;
3354                 }
3355                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
3356                                         (dcs->obj, offset);
3357                 if (!cnt_free->action) {
3358                         rte_errno = errno;
3359                         return NULL;
3360                 }
3361         }
3362         /* Update the counter reset values. */
3363         if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
3364                                  &cnt_free->bytes))
3365                 return NULL;
3366         cnt_free->shared = shared;
3367         cnt_free->ref_cnt = 1;
3368         cnt_free->id = id;
3369         if (!priv->sh->cmng.query_thread_on)
3370                 /* Start the asynchronous batch query by the host thread. */
3371                 mlx5_set_query_alarm(priv->sh);
3372         TAILQ_REMOVE(&pool->counters, cnt_free, next);
3373         if (TAILQ_EMPTY(&pool->counters)) {
3374                 /* Move the pool to the end of the container pool list. */
3375                 TAILQ_REMOVE(&cont->pool_list, pool, next);
3376                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3377         }
3378         return cnt_free;
3379 }
3380
3381 /**
3382  * Release a flow counter.
3383  *
3384  * @param[in] dev
3385  *   Pointer to the Ethernet device structure.
3386  * @param[in] counter
3387  *   Pointer to the counter handler.
3388  */
3389 static void
3390 flow_dv_counter_release(struct rte_eth_dev *dev,
3391                         struct mlx5_flow_counter *counter)
3392 {
3393         struct mlx5_priv *priv = dev->data->dev_private;
3394
3395         if (!counter)
3396                 return;
3397         if (priv->counter_fallback) {
3398                 flow_dv_counter_release_fallback(dev, counter);
3399                 return;
3400         }
3401         if (--counter->ref_cnt == 0) {
3402                 struct mlx5_flow_counter_pool *pool =
3403                                 flow_dv_counter_pool_get(counter);
3404
3405                 /* Put the counter in the end - the last updated one. */
3406                 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
3407                 counter->query_gen = rte_atomic64_read(&pool->query_gen);
3408         }
3409 }
3410
3411 /**
3412  * Verify the @p attributes will be correctly understood by the NIC and store
3413  * them in the @p flow if everything is correct.
3414  *
3415  * @param[in] dev
3416  *   Pointer to dev struct.
3417  * @param[in] attributes
3418  *   Pointer to flow attributes
3419  * @param[in] external
3420  *   This flow rule is created by request external to PMD.
3421  * @param[out] error
3422  *   Pointer to error structure.
3423  *
3424  * @return
3425  *   0 on success, a negative errno value otherwise and rte_errno is set.
3426  */
3427 static int
3428 flow_dv_validate_attributes(struct rte_eth_dev *dev,
3429                             const struct rte_flow_attr *attributes,
3430                             bool external __rte_unused,
3431                             struct rte_flow_error *error)
3432 {
3433         struct mlx5_priv *priv = dev->data->dev_private;
3434         uint32_t priority_max = priv->config.flow_prio - 1;
3435
3436 #ifndef HAVE_MLX5DV_DR
3437         if (attributes->group)
3438                 return rte_flow_error_set(error, ENOTSUP,
3439                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3440                                           NULL,
3441                                           "groups are not supported");
3442 #else
3443         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
3444                                                     MLX5_MAX_TABLES;
3445         uint32_t table;
3446         int ret;
3447
3448         ret = mlx5_flow_group_to_table(attributes, external,
3449                                        attributes->group,
3450                                        &table, error);
3451         if (ret)
3452                 return ret;
3453         if (table >= max_group)
3454                 return rte_flow_error_set(error, EINVAL,
3455                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
3456                                           "group index out of range");
3457 #endif
3458         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
3459             attributes->priority >= priority_max)
3460                 return rte_flow_error_set(error, ENOTSUP,
3461                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
3462                                           NULL,
3463                                           "priority out of range");
3464         if (attributes->transfer) {
3465                 if (!priv->config.dv_esw_en)
3466                         return rte_flow_error_set
3467                                 (error, ENOTSUP,
3468                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3469                                  "E-Switch dr is not supported");
3470                 if (!(priv->representor || priv->master))
3471                         return rte_flow_error_set
3472                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3473                                  NULL, "E-Switch configuration can only be"
3474                                  " done by a master or a representor device");
3475                 if (attributes->egress)
3476                         return rte_flow_error_set
3477                                 (error, ENOTSUP,
3478                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
3479                                  "egress is not supported");
3480         }
3481         if (!(attributes->egress ^ attributes->ingress))
3482                 return rte_flow_error_set(error, ENOTSUP,
3483                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
3484                                           "must specify exactly one of "
3485                                           "ingress or egress");
3486         return 0;
3487 }
3488
3489 /**
3490  * Internal validation function. For validating both actions and items.
3491  *
3492  * @param[in] dev
3493  *   Pointer to the rte_eth_dev structure.
3494  * @param[in] attr
3495  *   Pointer to the flow attributes.
3496  * @param[in] items
3497  *   Pointer to the list of items.
3498  * @param[in] actions
3499  *   Pointer to the list of actions.
3500  * @param[in] external
3501  *   This flow rule is created by request external to PMD.
3502  * @param[out] error
3503  *   Pointer to the error structure.
3504  *
3505  * @return
3506  *   0 on success, a negative errno value otherwise and rte_errno is set.
3507  */
3508 static int
3509 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3510                  const struct rte_flow_item items[],
3511                  const struct rte_flow_action actions[],
3512                  bool external, struct rte_flow_error *error)
3513 {
3514         int ret;
3515         uint64_t action_flags = 0;
3516         uint64_t item_flags = 0;
3517         uint64_t last_item = 0;
3518         uint8_t next_protocol = 0xff;
3519         uint16_t ether_type = 0;
3520         int actions_n = 0;
3521         const struct rte_flow_item *gre_item = NULL;
3522         struct rte_flow_item_tcp nic_tcp_mask = {
3523                 .hdr = {
3524                         .tcp_flags = 0xFF,
3525                         .src_port = RTE_BE16(UINT16_MAX),
3526                         .dst_port = RTE_BE16(UINT16_MAX),
3527                 }
3528         };
3529
3530         if (items == NULL)
3531                 return -1;
3532         ret = flow_dv_validate_attributes(dev, attr, external, error);
3533         if (ret < 0)
3534                 return ret;
3535         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3536                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3537                 int type = items->type;
3538
3539                 switch (type) {
3540                 case RTE_FLOW_ITEM_TYPE_VOID:
3541                         break;
3542                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3543                         ret = flow_dv_validate_item_port_id
3544                                         (dev, items, attr, item_flags, error);
3545                         if (ret < 0)
3546                                 return ret;
3547                         last_item = MLX5_FLOW_ITEM_PORT_ID;
3548                         break;
3549                 case RTE_FLOW_ITEM_TYPE_ETH:
3550                         ret = mlx5_flow_validate_item_eth(items, item_flags,
3551                                                           error);
3552                         if (ret < 0)
3553                                 return ret;
3554                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3555                                              MLX5_FLOW_LAYER_OUTER_L2;
3556                         if (items->mask != NULL && items->spec != NULL) {
3557                                 ether_type =
3558                                         ((const struct rte_flow_item_eth *)
3559                                          items->spec)->type;
3560                                 ether_type &=
3561                                         ((const struct rte_flow_item_eth *)
3562                                          items->mask)->type;
3563                                 ether_type = rte_be_to_cpu_16(ether_type);
3564                         } else {
3565                                 ether_type = 0;
3566                         }
3567                         break;
3568                 case RTE_FLOW_ITEM_TYPE_VLAN:
3569                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
3570                                                            dev, error);
3571                         if (ret < 0)
3572                                 return ret;
3573                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
3574                                              MLX5_FLOW_LAYER_OUTER_VLAN;
3575                         if (items->mask != NULL && items->spec != NULL) {
3576                                 ether_type =
3577                                         ((const struct rte_flow_item_vlan *)
3578                                          items->spec)->inner_type;
3579                                 ether_type &=
3580                                         ((const struct rte_flow_item_vlan *)
3581                                          items->mask)->inner_type;
3582                                 ether_type = rte_be_to_cpu_16(ether_type);
3583                         } else {
3584                                 ether_type = 0;
3585                         }
3586                         break;
3587                 case RTE_FLOW_ITEM_TYPE_IPV4:
3588                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3589                                                   &item_flags, &tunnel);
3590                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
3591                                                            last_item,
3592                                                            ether_type, NULL,
3593                                                            error);
3594                         if (ret < 0)
3595                                 return ret;
3596                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3597                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3598                         if (items->mask != NULL &&
3599                             ((const struct rte_flow_item_ipv4 *)
3600                              items->mask)->hdr.next_proto_id) {
3601                                 next_protocol =
3602                                         ((const struct rte_flow_item_ipv4 *)
3603                                          (items->spec))->hdr.next_proto_id;
3604                                 next_protocol &=
3605                                         ((const struct rte_flow_item_ipv4 *)
3606                                          (items->mask))->hdr.next_proto_id;
3607                         } else {
3608                                 /* Reset for inner layer. */
3609                                 next_protocol = 0xff;
3610                         }
3611                         break;
3612                 case RTE_FLOW_ITEM_TYPE_IPV6:
3613                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3614                                                   &item_flags, &tunnel);
3615                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
3616                                                            last_item,
3617                                                            ether_type, NULL,
3618                                                            error);
3619                         if (ret < 0)
3620                                 return ret;
3621                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3622                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3623                         if (items->mask != NULL &&
3624                             ((const struct rte_flow_item_ipv6 *)
3625                              items->mask)->hdr.proto) {
3626                                 next_protocol =
3627                                         ((const struct rte_flow_item_ipv6 *)
3628                                          items->spec)->hdr.proto;
3629                                 next_protocol &=
3630                                         ((const struct rte_flow_item_ipv6 *)
3631                                          items->mask)->hdr.proto;
3632                         } else {
3633                                 /* Reset for inner layer. */
3634                                 next_protocol = 0xff;
3635                         }
3636                         break;
3637                 case RTE_FLOW_ITEM_TYPE_TCP:
3638                         ret = mlx5_flow_validate_item_tcp
3639                                                 (items, item_flags,
3640                                                  next_protocol,
3641                                                  &nic_tcp_mask,
3642                                                  error);
3643                         if (ret < 0)
3644                                 return ret;
3645                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3646                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3647                         break;
3648                 case RTE_FLOW_ITEM_TYPE_UDP:
3649                         ret = mlx5_flow_validate_item_udp(items, item_flags,
3650                                                           next_protocol,
3651                                                           error);
3652                         if (ret < 0)
3653                                 return ret;
3654                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3655                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3656                         break;
3657                 case RTE_FLOW_ITEM_TYPE_GRE:
3658                         ret = mlx5_flow_validate_item_gre(items, item_flags,
3659                                                           next_protocol, error);
3660                         if (ret < 0)
3661                                 return ret;
3662                         gre_item = items;
3663                         last_item = MLX5_FLOW_LAYER_GRE;
3664                         break;
3665                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3666                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
3667                                                             next_protocol,
3668                                                             error);
3669                         if (ret < 0)
3670                                 return ret;
3671                         last_item = MLX5_FLOW_LAYER_NVGRE;
3672                         break;
3673                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
3674                         ret = mlx5_flow_validate_item_gre_key
3675                                 (items, item_flags, gre_item, error);
3676                         if (ret < 0)
3677                                 return ret;
3678                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
3679                         break;
3680                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3681                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
3682                                                             error);
3683                         if (ret < 0)
3684                                 return ret;
3685                         last_item = MLX5_FLOW_LAYER_VXLAN;
3686                         break;
3687                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3688                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
3689                                                                 item_flags, dev,
3690                                                                 error);
3691                         if (ret < 0)
3692                                 return ret;
3693                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3694                         break;
3695                 case RTE_FLOW_ITEM_TYPE_GENEVE:
3696                         ret = mlx5_flow_validate_item_geneve(items,
3697                                                              item_flags, dev,
3698                                                              error);
3699                         if (ret < 0)
3700                                 return ret;
3701                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3702                         break;
3703                 case RTE_FLOW_ITEM_TYPE_MPLS:
3704                         ret = mlx5_flow_validate_item_mpls(dev, items,
3705                                                            item_flags,
3706                                                            last_item, error);
3707                         if (ret < 0)
3708                                 return ret;
3709                         last_item = MLX5_FLOW_LAYER_MPLS;
3710                         break;
3711                 case RTE_FLOW_ITEM_TYPE_META:
3712                         ret = flow_dv_validate_item_meta(dev, items, attr,
3713                                                          error);
3714                         if (ret < 0)
3715                                 return ret;
3716                         last_item = MLX5_FLOW_ITEM_METADATA;
3717                         break;
3718                 case RTE_FLOW_ITEM_TYPE_ICMP:
3719                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
3720                                                            next_protocol,
3721                                                            error);
3722                         if (ret < 0)
3723                                 return ret;
3724                         last_item = MLX5_FLOW_LAYER_ICMP;
3725                         break;
3726                 case RTE_FLOW_ITEM_TYPE_ICMP6:
3727                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3728                                                             next_protocol,
3729                                                             error);
3730                         if (ret < 0)
3731                                 return ret;
3732                         last_item = MLX5_FLOW_LAYER_ICMP6;
3733                         break;
3734                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
3735                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
3736                         break;
3737                 default:
3738                         return rte_flow_error_set(error, ENOTSUP,
3739                                                   RTE_FLOW_ERROR_TYPE_ITEM,
3740                                                   NULL, "item not supported");
3741                 }
3742                 item_flags |= last_item;
3743         }
3744         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3745                 int type = actions->type;
3746                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3747                         return rte_flow_error_set(error, ENOTSUP,
3748                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3749                                                   actions, "too many actions");
3750                 switch (type) {
3751                 case RTE_FLOW_ACTION_TYPE_VOID:
3752                         break;
3753                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3754                         ret = flow_dv_validate_action_port_id(dev,
3755                                                               action_flags,
3756                                                               actions,
3757                                                               attr,
3758                                                               error);
3759                         if (ret)
3760                                 return ret;
3761                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3762                         ++actions_n;
3763                         break;
3764                 case RTE_FLOW_ACTION_TYPE_FLAG:
3765                         ret = mlx5_flow_validate_action_flag(action_flags,
3766                                                              attr, error);
3767                         if (ret < 0)
3768                                 return ret;
3769                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3770                         ++actions_n;
3771                         break;
3772                 case RTE_FLOW_ACTION_TYPE_MARK:
3773                         ret = mlx5_flow_validate_action_mark(actions,
3774                                                              action_flags,
3775                                                              attr, error);
3776                         if (ret < 0)
3777                                 return ret;
3778                         action_flags |= MLX5_FLOW_ACTION_MARK;
3779                         ++actions_n;
3780                         break;
3781                 case RTE_FLOW_ACTION_TYPE_DROP:
3782                         ret = mlx5_flow_validate_action_drop(action_flags,
3783                                                              attr, error);
3784                         if (ret < 0)
3785                                 return ret;
3786                         action_flags |= MLX5_FLOW_ACTION_DROP;
3787                         ++actions_n;
3788                         break;
3789                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3790                         ret = mlx5_flow_validate_action_queue(actions,
3791                                                               action_flags, dev,
3792                                                               attr, error);
3793                         if (ret < 0)
3794                                 return ret;
3795                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3796                         ++actions_n;
3797                         break;
3798                 case RTE_FLOW_ACTION_TYPE_RSS:
3799                         ret = mlx5_flow_validate_action_rss(actions,
3800                                                             action_flags, dev,
3801                                                             attr, item_flags,
3802                                                             error);
3803                         if (ret < 0)
3804                                 return ret;
3805                         action_flags |= MLX5_FLOW_ACTION_RSS;
3806                         ++actions_n;
3807                         break;
3808                 case RTE_FLOW_ACTION_TYPE_COUNT:
3809                         ret = flow_dv_validate_action_count(dev, error);
3810                         if (ret < 0)
3811                                 return ret;
3812                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3813                         ++actions_n;
3814                         break;
3815                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3816                         if (flow_dv_validate_action_pop_vlan(dev,
3817                                                              action_flags,
3818                                                              actions,
3819                                                              item_flags, attr,
3820                                                              error))
3821                                 return -rte_errno;
3822                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
3823                         ++actions_n;
3824                         break;
3825                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3826                         ret = flow_dv_validate_action_push_vlan(action_flags,
3827                                                                 item_flags,
3828                                                                 actions, attr,
3829                                                                 error);
3830                         if (ret < 0)
3831                                 return ret;
3832                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
3833                         ++actions_n;
3834                         break;
3835                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3836                         ret = flow_dv_validate_action_set_vlan_pcp
3837                                                 (action_flags, actions, error);
3838                         if (ret < 0)
3839                                 return ret;
3840                         /* Count PCP with push_vlan command. */
3841                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
3842                         break;
3843                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3844                         ret = flow_dv_validate_action_set_vlan_vid
3845                                                 (item_flags, action_flags,
3846                                                  actions, error);
3847                         if (ret < 0)
3848                                 return ret;
3849                         /* Count VID with push_vlan command. */
3850                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
3851                         break;
3852                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3853                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3854                         ret = flow_dv_validate_action_l2_encap(action_flags,
3855                                                                actions, attr,
3856                                                                error);
3857                         if (ret < 0)
3858                                 return ret;
3859                         action_flags |= actions->type ==
3860                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3861                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3862                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3863                         ++actions_n;
3864                         break;
3865                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3866                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3867                         ret = flow_dv_validate_action_l2_decap(action_flags,
3868                                                                attr, error);
3869                         if (ret < 0)
3870                                 return ret;
3871                         action_flags |= actions->type ==
3872                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3873                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3874                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3875                         ++actions_n;
3876                         break;
3877                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3878                         ret = flow_dv_validate_action_raw_encap(action_flags,
3879                                                                 actions, attr,
3880                                                                 error);
3881                         if (ret < 0)
3882                                 return ret;
3883                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3884                         ++actions_n;
3885                         break;
3886                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3887                         ret = flow_dv_validate_action_raw_decap(action_flags,
3888                                                                 actions, attr,
3889                                                                 error);
3890                         if (ret < 0)
3891                                 return ret;
3892                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3893                         ++actions_n;
3894                         break;
3895                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3896                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3897                         ret = flow_dv_validate_action_modify_mac(action_flags,
3898                                                                  actions,
3899                                                                  item_flags,
3900                                                                  error);
3901                         if (ret < 0)
3902                                 return ret;
3903                         /* Count all modify-header actions as one action. */
3904                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3905                                 ++actions_n;
3906                         action_flags |= actions->type ==
3907                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3908                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
3909                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
3910                         break;
3911
3912                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3913                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3914                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
3915                                                                   actions,
3916                                                                   item_flags,
3917                                                                   error);
3918                         if (ret < 0)
3919                                 return ret;
3920                         /* Count all modify-header actions as one action. */
3921                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3922                                 ++actions_n;
3923                         action_flags |= actions->type ==
3924                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3925                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3926                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
3927                         break;
3928                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3929                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3930                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
3931                                                                   actions,
3932                                                                   item_flags,
3933                                                                   error);
3934                         if (ret < 0)
3935                                 return ret;
3936                         /* Count all modify-header actions as one action. */
3937                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3938                                 ++actions_n;
3939                         action_flags |= actions->type ==
3940                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3941                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3942                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
3943                         break;
3944                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3945                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3946                         ret = flow_dv_validate_action_modify_tp(action_flags,
3947                                                                 actions,
3948                                                                 item_flags,
3949                                                                 error);
3950                         if (ret < 0)
3951                                 return ret;
3952                         /* Count all modify-header actions as one action. */
3953                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3954                                 ++actions_n;
3955                         action_flags |= actions->type ==
3956                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3957                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
3958                                                 MLX5_FLOW_ACTION_SET_TP_DST;
3959                         break;
3960                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3961                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3962                         ret = flow_dv_validate_action_modify_ttl(action_flags,
3963                                                                  actions,
3964                                                                  item_flags,
3965                                                                  error);
3966                         if (ret < 0)
3967                                 return ret;
3968                         /* Count all modify-header actions as one action. */
3969                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3970                                 ++actions_n;
3971                         action_flags |= actions->type ==
3972                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
3973                                                 MLX5_FLOW_ACTION_SET_TTL :
3974                                                 MLX5_FLOW_ACTION_DEC_TTL;
3975                         break;
3976                 case RTE_FLOW_ACTION_TYPE_JUMP:
3977                         ret = flow_dv_validate_action_jump(actions,
3978                                                            action_flags,
3979                                                            attr, external,
3980                                                            error);
3981                         if (ret)
3982                                 return ret;
3983                         ++actions_n;
3984                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3985                         break;
3986                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3987                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3988                         ret = flow_dv_validate_action_modify_tcp_seq
3989                                                                 (action_flags,
3990                                                                  actions,
3991                                                                  item_flags,
3992                                                                  error);
3993                         if (ret < 0)
3994                                 return ret;
3995                         /* Count all modify-header actions as one action. */
3996                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3997                                 ++actions_n;
3998                         action_flags |= actions->type ==
3999                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4000                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
4001                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4002                         break;
4003                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4004                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4005                         ret = flow_dv_validate_action_modify_tcp_ack
4006                                                                 (action_flags,
4007                                                                  actions,
4008                                                                  item_flags,
4009                                                                  error);
4010                         if (ret < 0)
4011                                 return ret;
4012                         /* Count all modify-header actions as one action. */
4013                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
4014                                 ++actions_n;
4015                         action_flags |= actions->type ==
4016                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4017                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
4018                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
4019                         break;
4020                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
4021                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
4022                         break;
4023                 default:
4024                         return rte_flow_error_set(error, ENOTSUP,
4025                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4026                                                   actions,
4027                                                   "action not supported");
4028                 }
4029         }
4030         if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
4031             (action_flags & MLX5_FLOW_VLAN_ACTIONS))
4032                 return rte_flow_error_set(error, ENOTSUP,
4033                                           RTE_FLOW_ERROR_TYPE_ACTION,
4034                                           actions,
4035                                           "can't have vxlan and vlan"
4036                                           " actions in the same rule");
4037         /* Eswitch has few restrictions on using items and actions */
4038         if (attr->transfer) {
4039                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
4040                         return rte_flow_error_set(error, ENOTSUP,
4041                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4042                                                   NULL,
4043                                                   "unsupported action FLAG");
4044                 if (action_flags & MLX5_FLOW_ACTION_MARK)
4045                         return rte_flow_error_set(error, ENOTSUP,
4046                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4047                                                   NULL,
4048                                                   "unsupported action MARK");
4049                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
4050                         return rte_flow_error_set(error, ENOTSUP,
4051                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4052                                                   NULL,
4053                                                   "unsupported action QUEUE");
4054                 if (action_flags & MLX5_FLOW_ACTION_RSS)
4055                         return rte_flow_error_set(error, ENOTSUP,
4056                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4057                                                   NULL,
4058                                                   "unsupported action RSS");
4059                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
4060                         return rte_flow_error_set(error, EINVAL,
4061                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4062                                                   actions,
4063                                                   "no fate action is found");
4064         } else {
4065                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
4066                         return rte_flow_error_set(error, EINVAL,
4067                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4068                                                   actions,
4069                                                   "no fate action is found");
4070         }
4071         return 0;
4072 }
4073
4074 /**
4075  * Internal preparation function. Allocates the DV flow size,
4076  * this size is constant.
4077  *
4078  * @param[in] attr
4079  *   Pointer to the flow attributes.
4080  * @param[in] items
4081  *   Pointer to the list of items.
4082  * @param[in] actions
4083  *   Pointer to the list of actions.
4084  * @param[out] error
4085  *   Pointer to the error structure.
4086  *
4087  * @return
4088  *   Pointer to mlx5_flow object on success,
4089  *   otherwise NULL and rte_errno is set.
4090  */
4091 static struct mlx5_flow *
4092 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
4093                 const struct rte_flow_item items[] __rte_unused,
4094                 const struct rte_flow_action actions[] __rte_unused,
4095                 struct rte_flow_error *error)
4096 {
4097         size_t size = sizeof(struct mlx5_flow);
4098         struct mlx5_flow *dev_flow;
4099
4100         dev_flow = rte_calloc(__func__, 1, size, 0);
4101         if (!dev_flow) {
4102                 rte_flow_error_set(error, ENOMEM,
4103                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4104                                    "not enough memory to create flow");
4105                 return NULL;
4106         }
4107         dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
4108         dev_flow->ingress = attr->ingress;
4109         dev_flow->transfer = attr->transfer;
4110         return dev_flow;
4111 }
4112
4113 #ifndef NDEBUG
4114 /**
4115  * Sanity check for match mask and value. Similar to check_valid_spec() in
4116  * kernel driver. If unmasked bit is present in value, it returns failure.
4117  *
4118  * @param match_mask
4119  *   pointer to match mask buffer.
4120  * @param match_value
4121  *   pointer to match value buffer.
4122  *
4123  * @return
4124  *   0 if valid, -EINVAL otherwise.
4125  */
4126 static int
4127 flow_dv_check_valid_spec(void *match_mask, void *match_value)
4128 {
4129         uint8_t *m = match_mask;
4130         uint8_t *v = match_value;
4131         unsigned int i;
4132
4133         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
4134                 if (v[i] & ~m[i]) {
4135                         DRV_LOG(ERR,
4136                                 "match_value differs from match_criteria"
4137                                 " %p[%u] != %p[%u]",
4138                                 match_value, i, match_mask, i);
4139                         return -EINVAL;
4140                 }
4141         }
4142         return 0;
4143 }
4144 #endif
4145
4146 /**
4147  * Add Ethernet item to matcher and to the value.
4148  *
4149  * @param[in, out] matcher
4150  *   Flow matcher.
4151  * @param[in, out] key
4152  *   Flow matcher value.
4153  * @param[in] item
4154  *   Flow pattern to translate.
4155  * @param[in] inner
4156  *   Item is inner pattern.
4157  */
4158 static void
4159 flow_dv_translate_item_eth(void *matcher, void *key,
4160                            const struct rte_flow_item *item, int inner)
4161 {
4162         const struct rte_flow_item_eth *eth_m = item->mask;
4163         const struct rte_flow_item_eth *eth_v = item->spec;
4164         const struct rte_flow_item_eth nic_mask = {
4165                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4166                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4167                 .type = RTE_BE16(0xffff),
4168         };
4169         void *headers_m;
4170         void *headers_v;
4171         char *l24_v;
4172         unsigned int i;
4173
4174         if (!eth_v)
4175                 return;
4176         if (!eth_m)
4177                 eth_m = &nic_mask;
4178         if (inner) {
4179                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4180                                          inner_headers);
4181                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4182         } else {
4183                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4184                                          outer_headers);
4185                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4186         }
4187         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
4188                &eth_m->dst, sizeof(eth_m->dst));
4189         /* The value must be in the range of the mask. */
4190         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
4191         for (i = 0; i < sizeof(eth_m->dst); ++i)
4192                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
4193         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
4194                &eth_m->src, sizeof(eth_m->src));
4195         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
4196         /* The value must be in the range of the mask. */
4197         for (i = 0; i < sizeof(eth_m->dst); ++i)
4198                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
4199         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4200                  rte_be_to_cpu_16(eth_m->type));
4201         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
4202         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
4203 }
4204
4205 /**
4206  * Add VLAN item to matcher and to the value.
4207  *
4208  * @param[in, out] dev_flow
4209  *   Flow descriptor.
4210  * @param[in, out] matcher
4211  *   Flow matcher.
4212  * @param[in, out] key
4213  *   Flow matcher value.
4214  * @param[in] item
4215  *   Flow pattern to translate.
4216  * @param[in] inner
4217  *   Item is inner pattern.
4218  */
4219 static void
4220 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
4221                             void *matcher, void *key,
4222                             const struct rte_flow_item *item,
4223                             int inner)
4224 {
4225         const struct rte_flow_item_vlan *vlan_m = item->mask;
4226         const struct rte_flow_item_vlan *vlan_v = item->spec;
4227         void *headers_m;
4228         void *headers_v;
4229         uint16_t tci_m;
4230         uint16_t tci_v;
4231
4232         if (!vlan_v)
4233                 return;
4234         if (!vlan_m)
4235                 vlan_m = &rte_flow_item_vlan_mask;
4236         if (inner) {
4237                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4238                                          inner_headers);
4239                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4240         } else {
4241                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4242                                          outer_headers);
4243                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4244                 /*
4245                  * This is workaround, masks are not supported,
4246                  * and pre-validated.
4247                  */
4248                 dev_flow->dv.vf_vlan.tag =
4249                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
4250         }
4251         tci_m = rte_be_to_cpu_16(vlan_m->tci);
4252         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
4253         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
4254         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
4255         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
4256         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
4257         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
4258         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
4259         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
4260         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
4261         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4262                  rte_be_to_cpu_16(vlan_m->inner_type));
4263         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
4264                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
4265 }
4266
4267 /**
4268  * Add IPV4 item to matcher and to the value.
4269  *
4270  * @param[in, out] matcher
4271  *   Flow matcher.
4272  * @param[in, out] key
4273  *   Flow matcher value.
4274  * @param[in] item
4275  *   Flow pattern to translate.
4276  * @param[in] inner
4277  *   Item is inner pattern.
4278  * @param[in] group
4279  *   The group to insert the rule.
4280  */
4281 static void
4282 flow_dv_translate_item_ipv4(void *matcher, void *key,
4283                             const struct rte_flow_item *item,
4284                             int inner, uint32_t group)
4285 {
4286         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
4287         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
4288         const struct rte_flow_item_ipv4 nic_mask = {
4289                 .hdr = {
4290                         .src_addr = RTE_BE32(0xffffffff),
4291                         .dst_addr = RTE_BE32(0xffffffff),
4292                         .type_of_service = 0xff,
4293                         .next_proto_id = 0xff,
4294                 },
4295         };
4296         void *headers_m;
4297         void *headers_v;
4298         char *l24_m;
4299         char *l24_v;
4300         uint8_t tos;
4301
4302         if (inner) {
4303                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4304                                          inner_headers);
4305                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4306         } else {
4307                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4308                                          outer_headers);
4309                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4310         }
4311         if (group == 0)
4312                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4313         else
4314                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
4315         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
4316         if (!ipv4_v)
4317                 return;
4318         if (!ipv4_m)
4319                 ipv4_m = &nic_mask;
4320         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4321                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4322         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4323                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4324         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
4325         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
4326         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4327                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4328         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4329                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4330         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
4331         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
4332         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
4333         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
4334                  ipv4_m->hdr.type_of_service);
4335         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
4336         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
4337                  ipv4_m->hdr.type_of_service >> 2);
4338         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
4339         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4340                  ipv4_m->hdr.next_proto_id);
4341         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4342                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
4343 }
4344
4345 /**
4346  * Add IPV6 item to matcher and to the value.
4347  *
4348  * @param[in, out] matcher
4349  *   Flow matcher.
4350  * @param[in, out] key
4351  *   Flow matcher value.
4352  * @param[in] item
4353  *   Flow pattern to translate.
4354  * @param[in] inner
4355  *   Item is inner pattern.
4356  * @param[in] group
4357  *   The group to insert the rule.
4358  */
4359 static void
4360 flow_dv_translate_item_ipv6(void *matcher, void *key,
4361                             const struct rte_flow_item *item,
4362                             int inner, uint32_t group)
4363 {
4364         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
4365         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
4366         const struct rte_flow_item_ipv6 nic_mask = {
4367                 .hdr = {
4368                         .src_addr =
4369                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4370                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4371                         .dst_addr =
4372                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4373                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4374                         .vtc_flow = RTE_BE32(0xffffffff),
4375                         .proto = 0xff,
4376                         .hop_limits = 0xff,
4377                 },
4378         };
4379         void *headers_m;
4380         void *headers_v;
4381         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4382         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4383         char *l24_m;
4384         char *l24_v;
4385         uint32_t vtc_m;
4386         uint32_t vtc_v;
4387         int i;
4388         int size;
4389
4390         if (inner) {
4391                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4392                                          inner_headers);
4393                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4394         } else {
4395                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4396                                          outer_headers);
4397                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4398         }
4399         if (group == 0)
4400                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4401         else
4402                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
4403         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
4404         if (!ipv6_v)
4405                 return;
4406         if (!ipv6_m)
4407                 ipv6_m = &nic_mask;
4408         size = sizeof(ipv6_m->hdr.dst_addr);
4409         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4410                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4411         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4412                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4413         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
4414         for (i = 0; i < size; ++i)
4415                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
4416         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4417                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4418         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4419                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4420         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
4421         for (i = 0; i < size; ++i)
4422                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
4423         /* TOS. */
4424         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
4425         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
4426         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
4427         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
4428         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
4429         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
4430         /* Label. */
4431         if (inner) {
4432                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
4433                          vtc_m);
4434                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
4435                          vtc_v);
4436         } else {
4437                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
4438                          vtc_m);
4439                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
4440                          vtc_v);
4441         }
4442         /* Protocol. */
4443         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4444                  ipv6_m->hdr.proto);
4445         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4446                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
4447 }
4448
4449 /**
4450  * Add TCP item to matcher and to the value.
4451  *
4452  * @param[in, out] matcher
4453  *   Flow matcher.
4454  * @param[in, out] key
4455  *   Flow matcher value.
4456  * @param[in] item
4457  *   Flow pattern to translate.
4458  * @param[in] inner
4459  *   Item is inner pattern.
4460  */
4461 static void
4462 flow_dv_translate_item_tcp(void *matcher, void *key,
4463                            const struct rte_flow_item *item,
4464                            int inner)
4465 {
4466         const struct rte_flow_item_tcp *tcp_m = item->mask;
4467         const struct rte_flow_item_tcp *tcp_v = item->spec;
4468         void *headers_m;
4469         void *headers_v;
4470
4471         if (inner) {
4472                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4473                                          inner_headers);
4474                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4475         } else {
4476                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4477                                          outer_headers);
4478                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4479         }
4480         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4481         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
4482         if (!tcp_v)
4483                 return;
4484         if (!tcp_m)
4485                 tcp_m = &rte_flow_item_tcp_mask;
4486         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
4487                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
4488         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
4489                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
4490         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
4491                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
4492         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
4493                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
4494         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
4495                  tcp_m->hdr.tcp_flags);
4496         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
4497                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
4498 }
4499
4500 /**
4501  * Add UDP item to matcher and to the value.
4502  *
4503  * @param[in, out] matcher
4504  *   Flow matcher.
4505  * @param[in, out] key
4506  *   Flow matcher value.
4507  * @param[in] item
4508  *   Flow pattern to translate.
4509  * @param[in] inner
4510  *   Item is inner pattern.
4511  */
4512 static void
4513 flow_dv_translate_item_udp(void *matcher, void *key,
4514                            const struct rte_flow_item *item,
4515                            int inner)
4516 {
4517         const struct rte_flow_item_udp *udp_m = item->mask;
4518         const struct rte_flow_item_udp *udp_v = item->spec;
4519         void *headers_m;
4520         void *headers_v;
4521
4522         if (inner) {
4523                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4524                                          inner_headers);
4525                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4526         } else {
4527                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4528                                          outer_headers);
4529                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4530         }
4531         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4532         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
4533         if (!udp_v)
4534                 return;
4535         if (!udp_m)
4536                 udp_m = &rte_flow_item_udp_mask;
4537         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
4538                  rte_be_to_cpu_16(udp_m->hdr.src_port));
4539         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
4540                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
4541         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
4542                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
4543         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4544                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
4545 }
4546
4547 /**
4548  * Add GRE optional Key item to matcher and to the value.
4549  *
4550  * @param[in, out] matcher
4551  *   Flow matcher.
4552  * @param[in, out] key
4553  *   Flow matcher value.
4554  * @param[in] item
4555  *   Flow pattern to translate.
4556  * @param[in] inner
4557  *   Item is inner pattern.
4558  */
4559 static void
4560 flow_dv_translate_item_gre_key(void *matcher, void *key,
4561                                    const struct rte_flow_item *item)
4562 {
4563         const rte_be32_t *key_m = item->mask;
4564         const rte_be32_t *key_v = item->spec;
4565         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4566         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4567         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
4568
4569         if (!key_v)
4570                 return;
4571         if (!key_m)
4572                 key_m = &gre_key_default_mask;
4573         /* GRE K bit must be on and should already be validated */
4574         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
4575         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
4576         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
4577                  rte_be_to_cpu_32(*key_m) >> 8);
4578         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
4579                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
4580         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
4581                  rte_be_to_cpu_32(*key_m) & 0xFF);
4582         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
4583                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
4584 }
4585
4586 /**
4587  * Add GRE item to matcher and to the value.
4588  *
4589  * @param[in, out] matcher
4590  *   Flow matcher.
4591  * @param[in, out] key
4592  *   Flow matcher value.
4593  * @param[in] item
4594  *   Flow pattern to translate.
4595  * @param[in] inner
4596  *   Item is inner pattern.
4597  */
4598 static void
4599 flow_dv_translate_item_gre(void *matcher, void *key,
4600                            const struct rte_flow_item *item,
4601                            int inner)
4602 {
4603         const struct rte_flow_item_gre *gre_m = item->mask;
4604         const struct rte_flow_item_gre *gre_v = item->spec;
4605         void *headers_m;
4606         void *headers_v;
4607         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4608         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4609         struct {
4610                 union {
4611                         __extension__
4612                         struct {
4613                                 uint16_t version:3;
4614                                 uint16_t rsvd0:9;
4615                                 uint16_t s_present:1;
4616                                 uint16_t k_present:1;
4617                                 uint16_t rsvd_bit1:1;
4618                                 uint16_t c_present:1;
4619                         };
4620                         uint16_t value;
4621                 };
4622         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
4623
4624         if (inner) {
4625                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4626                                          inner_headers);
4627                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4628         } else {
4629                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4630                                          outer_headers);
4631                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4632         }
4633         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4634         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
4635         if (!gre_v)
4636                 return;
4637         if (!gre_m)
4638                 gre_m = &rte_flow_item_gre_mask;
4639         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
4640                  rte_be_to_cpu_16(gre_m->protocol));
4641         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4642                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
4643         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
4644         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
4645         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
4646                  gre_crks_rsvd0_ver_m.c_present);
4647         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
4648                  gre_crks_rsvd0_ver_v.c_present &
4649                  gre_crks_rsvd0_ver_m.c_present);
4650         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
4651                  gre_crks_rsvd0_ver_m.k_present);
4652         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
4653                  gre_crks_rsvd0_ver_v.k_present &
4654                  gre_crks_rsvd0_ver_m.k_present);
4655         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
4656                  gre_crks_rsvd0_ver_m.s_present);
4657         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
4658                  gre_crks_rsvd0_ver_v.s_present &
4659                  gre_crks_rsvd0_ver_m.s_present);
4660 }
4661
4662 /**
4663  * Add NVGRE item to matcher and to the value.
4664  *
4665  * @param[in, out] matcher
4666  *   Flow matcher.
4667  * @param[in, out] key
4668  *   Flow matcher value.
4669  * @param[in] item
4670  *   Flow pattern to translate.
4671  * @param[in] inner
4672  *   Item is inner pattern.
4673  */
4674 static void
4675 flow_dv_translate_item_nvgre(void *matcher, void *key,
4676                              const struct rte_flow_item *item,
4677                              int inner)
4678 {
4679         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
4680         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
4681         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4682         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4683         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
4684         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
4685         char *gre_key_m;
4686         char *gre_key_v;
4687         int size;
4688         int i;
4689
4690         /* For NVGRE, GRE header fields must be set with defined values. */
4691         const struct rte_flow_item_gre gre_spec = {
4692                 .c_rsvd0_ver = RTE_BE16(0x2000),
4693                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
4694         };
4695         const struct rte_flow_item_gre gre_mask = {
4696                 .c_rsvd0_ver = RTE_BE16(0xB000),
4697                 .protocol = RTE_BE16(UINT16_MAX),
4698         };
4699         const struct rte_flow_item gre_item = {
4700                 .spec = &gre_spec,
4701                 .mask = &gre_mask,
4702                 .last = NULL,
4703         };
4704         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
4705         if (!nvgre_v)
4706                 return;
4707         if (!nvgre_m)
4708                 nvgre_m = &rte_flow_item_nvgre_mask;
4709         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
4710         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
4711         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
4712         memcpy(gre_key_m, tni_flow_id_m, size);
4713         for (i = 0; i < size; ++i)
4714                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
4715 }
4716
4717 /**
4718  * Add VXLAN item to matcher and to the value.
4719  *
4720  * @param[in, out] matcher
4721  *   Flow matcher.
4722  * @param[in, out] key
4723  *   Flow matcher value.
4724  * @param[in] item
4725  *   Flow pattern to translate.
4726  * @param[in] inner
4727  *   Item is inner pattern.
4728  */
4729 static void
4730 flow_dv_translate_item_vxlan(void *matcher, void *key,
4731                              const struct rte_flow_item *item,
4732                              int inner)
4733 {
4734         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
4735         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
4736         void *headers_m;
4737         void *headers_v;
4738         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4739         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4740         char *vni_m;
4741         char *vni_v;
4742         uint16_t dport;
4743         int size;
4744         int i;
4745
4746         if (inner) {
4747                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4748                                          inner_headers);
4749                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4750         } else {
4751                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4752                                          outer_headers);
4753                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4754         }
4755         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
4756                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
4757         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4758                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4759                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4760         }
4761         if (!vxlan_v)
4762                 return;
4763         if (!vxlan_m)
4764                 vxlan_m = &rte_flow_item_vxlan_mask;
4765         size = sizeof(vxlan_m->vni);
4766         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
4767         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
4768         memcpy(vni_m, vxlan_m->vni, size);
4769         for (i = 0; i < size; ++i)
4770                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
4771 }
4772
4773 /**
4774  * Add Geneve item to matcher and to the value.
4775  *
4776  * @param[in, out] matcher
4777  *   Flow matcher.
4778  * @param[in, out] key
4779  *   Flow matcher value.
4780  * @param[in] item
4781  *   Flow pattern to translate.
4782  * @param[in] inner
4783  *   Item is inner pattern.
4784  */
4785
4786 static void
4787 flow_dv_translate_item_geneve(void *matcher, void *key,
4788                               const struct rte_flow_item *item, int inner)
4789 {
4790         const struct rte_flow_item_geneve *geneve_m = item->mask;
4791         const struct rte_flow_item_geneve *geneve_v = item->spec;
4792         void *headers_m;
4793         void *headers_v;
4794         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4795         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4796         uint16_t dport;
4797         uint16_t gbhdr_m;
4798         uint16_t gbhdr_v;
4799         char *vni_m;
4800         char *vni_v;
4801         size_t size, i;
4802
4803         if (inner) {
4804                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4805                                          inner_headers);
4806                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4807         } else {
4808                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4809                                          outer_headers);
4810                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4811         }
4812         dport = MLX5_UDP_PORT_GENEVE;
4813         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4814                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4815                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4816         }
4817         if (!geneve_v)
4818                 return;
4819         if (!geneve_m)
4820                 geneve_m = &rte_flow_item_geneve_mask;
4821         size = sizeof(geneve_m->vni);
4822         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
4823         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
4824         memcpy(vni_m, geneve_m->vni, size);
4825         for (i = 0; i < size; ++i)
4826                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
4827         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
4828                  rte_be_to_cpu_16(geneve_m->protocol));
4829         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
4830                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
4831         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
4832         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
4833         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
4834                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4835         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
4836                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4837         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
4838                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4839         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
4840                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
4841                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4842 }
4843
4844 /**
4845  * Add MPLS item to matcher and to the value.
4846  *
4847  * @param[in, out] matcher
4848  *   Flow matcher.
4849  * @param[in, out] key
4850  *   Flow matcher value.
4851  * @param[in] item
4852  *   Flow pattern to translate.
4853  * @param[in] prev_layer
4854  *   The protocol layer indicated in previous item.
4855  * @param[in] inner
4856  *   Item is inner pattern.
4857  */
4858 static void
4859 flow_dv_translate_item_mpls(void *matcher, void *key,
4860                             const struct rte_flow_item *item,
4861                             uint64_t prev_layer,
4862                             int inner)
4863 {
4864         const uint32_t *in_mpls_m = item->mask;
4865         const uint32_t *in_mpls_v = item->spec;
4866         uint32_t *out_mpls_m = 0;
4867         uint32_t *out_mpls_v = 0;
4868         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4869         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4870         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4871                                      misc_parameters_2);
4872         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4873         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4874         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4875
4876         switch (prev_layer) {
4877         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4878                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4879                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4880                          MLX5_UDP_PORT_MPLS);
4881                 break;
4882         case MLX5_FLOW_LAYER_GRE:
4883                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4884                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4885                          RTE_ETHER_TYPE_MPLS);
4886                 break;
4887         default:
4888                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4889                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4890                          IPPROTO_MPLS);
4891                 break;
4892         }
4893         if (!in_mpls_v)
4894                 return;
4895         if (!in_mpls_m)
4896                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4897         switch (prev_layer) {
4898         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4899                 out_mpls_m =
4900                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4901                                                  outer_first_mpls_over_udp);
4902                 out_mpls_v =
4903                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4904                                                  outer_first_mpls_over_udp);
4905                 break;
4906         case MLX5_FLOW_LAYER_GRE:
4907                 out_mpls_m =
4908                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4909                                                  outer_first_mpls_over_gre);
4910                 out_mpls_v =
4911                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4912                                                  outer_first_mpls_over_gre);
4913                 break;
4914         default:
4915                 /* Inner MPLS not over GRE is not supported. */
4916                 if (!inner) {
4917                         out_mpls_m =
4918                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4919                                                          misc2_m,
4920                                                          outer_first_mpls);
4921                         out_mpls_v =
4922                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4923                                                          misc2_v,
4924                                                          outer_first_mpls);
4925                 }
4926                 break;
4927         }
4928         if (out_mpls_m && out_mpls_v) {
4929                 *out_mpls_m = *in_mpls_m;
4930                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4931         }
4932 }
4933
4934 /**
4935  * Add metadata register item to matcher
4936  *
4937  * @param[in, out] matcher
4938  *   Flow matcher.
4939  * @param[in, out] key
4940  *   Flow matcher value.
4941  * @param[in] reg_type
4942  *   Type of device metadata register
4943  * @param[in] value
4944  *   Register value
4945  * @param[in] mask
4946  *   Register mask
4947  */
4948 static void
4949 flow_dv_match_meta_reg(void *matcher, void *key,
4950                        enum modify_reg reg_type,
4951                        uint32_t data, uint32_t mask)
4952 {
4953         void *misc2_m =
4954                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4955         void *misc2_v =
4956                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4957
4958         data &= mask;
4959         switch (reg_type) {
4960         case REG_A:
4961                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
4962                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
4963                 break;
4964         case REG_B:
4965                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
4966                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
4967                 break;
4968         case REG_C_0:
4969                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, mask);
4970                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, data);
4971                 break;
4972         case REG_C_1:
4973                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
4974                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
4975                 break;
4976         case REG_C_2:
4977                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
4978                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
4979                 break;
4980         case REG_C_3:
4981                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
4982                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
4983                 break;
4984         case REG_C_4:
4985                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
4986                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
4987                 break;
4988         case REG_C_5:
4989                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
4990                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
4991                 break;
4992         case REG_C_6:
4993                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
4994                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
4995                 break;
4996         case REG_C_7:
4997                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
4998                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
4999                 break;
5000         default:
5001                 assert(false);
5002                 break;
5003         }
5004 }
5005
5006 /**
5007  * Add META item to matcher
5008  *
5009  * @param[in, out] matcher
5010  *   Flow matcher.
5011  * @param[in, out] key
5012  *   Flow matcher value.
5013  * @param[in] item
5014  *   Flow pattern to translate.
5015  * @param[in] inner
5016  *   Item is inner pattern.
5017  */
5018 static void
5019 flow_dv_translate_item_meta(void *matcher, void *key,
5020                             const struct rte_flow_item *item)
5021 {
5022         const struct rte_flow_item_meta *meta_m;
5023         const struct rte_flow_item_meta *meta_v;
5024
5025         meta_m = (const void *)item->mask;
5026         if (!meta_m)
5027                 meta_m = &rte_flow_item_meta_mask;
5028         meta_v = (const void *)item->spec;
5029         if (meta_v)
5030                 flow_dv_match_meta_reg(matcher, key, REG_A,
5031                                        rte_cpu_to_be_32(meta_v->data),
5032                                        rte_cpu_to_be_32(meta_m->data));
5033 }
5034
5035 /**
5036  * Add vport metadata Reg C0 item to matcher
5037  *
5038  * @param[in, out] matcher
5039  *   Flow matcher.
5040  * @param[in, out] key
5041  *   Flow matcher value.
5042  * @param[in] reg
5043  *   Flow pattern to translate.
5044  */
5045 static void
5046 flow_dv_translate_item_meta_vport(void *matcher, void *key,
5047                                   uint32_t value, uint32_t mask)
5048 {
5049         flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
5050 }
5051
5052 /**
5053  * Add tag item to matcher
5054  *
5055  * @param[in, out] matcher
5056  *   Flow matcher.
5057  * @param[in, out] key
5058  *   Flow matcher value.
5059  * @param[in] item
5060  *   Flow pattern to translate.
5061  */
5062 static void
5063 flow_dv_translate_mlx5_item_tag(void *matcher, void *key,
5064                                 const struct rte_flow_item *item)
5065 {
5066         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
5067         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
5068         enum modify_reg reg = tag_v->id;
5069
5070         flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
5071 }
5072
5073 /**
5074  * Add source vport match to the specified matcher.
5075  *
5076  * @param[in, out] matcher
5077  *   Flow matcher.
5078  * @param[in, out] key
5079  *   Flow matcher value.
5080  * @param[in] port
5081  *   Source vport value to match
5082  * @param[in] mask
5083  *   Mask
5084  */
5085 static void
5086 flow_dv_translate_item_source_vport(void *matcher, void *key,
5087                                     int16_t port, uint16_t mask)
5088 {
5089         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5090         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5091
5092         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
5093         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
5094 }
5095
5096 /**
5097  * Translate port-id item to eswitch match on  port-id.
5098  *
5099  * @param[in] dev
5100  *   The devich to configure through.
5101  * @param[in, out] matcher
5102  *   Flow matcher.
5103  * @param[in, out] key
5104  *   Flow matcher value.
5105  * @param[in] item
5106  *   Flow pattern to translate.
5107  *
5108  * @return
5109  *   0 on success, a negative errno value otherwise.
5110  */
5111 static int
5112 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
5113                                void *key, const struct rte_flow_item *item)
5114 {
5115         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
5116         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
5117         struct mlx5_priv *priv;
5118         uint16_t mask, id;
5119
5120         mask = pid_m ? pid_m->id : 0xffff;
5121         id = pid_v ? pid_v->id : dev->data->port_id;
5122         priv = mlx5_port_to_eswitch_info(id);
5123         if (!priv)
5124                 return -rte_errno;
5125         /* Translate to vport field or to metadata, depending on mode. */
5126         if (priv->vport_meta_mask)
5127                 flow_dv_translate_item_meta_vport(matcher, key,
5128                                                   priv->vport_meta_tag,
5129                                                   priv->vport_meta_mask);
5130         else
5131                 flow_dv_translate_item_source_vport(matcher, key,
5132                                                     priv->vport_id, mask);
5133         return 0;
5134 }
5135
5136 /**
5137  * Add ICMP6 item to matcher and to the value.
5138  *
5139  * @param[in, out] matcher
5140  *   Flow matcher.
5141  * @param[in, out] key
5142  *   Flow matcher value.
5143  * @param[in] item
5144  *   Flow pattern to translate.
5145  * @param[in] inner
5146  *   Item is inner pattern.
5147  */
5148 static void
5149 flow_dv_translate_item_icmp6(void *matcher, void *key,
5150                               const struct rte_flow_item *item,
5151                               int inner)
5152 {
5153         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
5154         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
5155         void *headers_m;
5156         void *headers_v;
5157         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5158                                      misc_parameters_3);
5159         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5160         if (inner) {
5161                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5162                                          inner_headers);
5163                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5164         } else {
5165                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5166                                          outer_headers);
5167                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5168         }
5169         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5170         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
5171         if (!icmp6_v)
5172                 return;
5173         if (!icmp6_m)
5174                 icmp6_m = &rte_flow_item_icmp6_mask;
5175         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
5176         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
5177                  icmp6_v->type & icmp6_m->type);
5178         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
5179         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
5180                  icmp6_v->code & icmp6_m->code);
5181 }
5182
5183 /**
5184  * Add ICMP item to matcher and to the value.
5185  *
5186  * @param[in, out] matcher
5187  *   Flow matcher.
5188  * @param[in, out] key
5189  *   Flow matcher value.
5190  * @param[in] item
5191  *   Flow pattern to translate.
5192  * @param[in] inner
5193  *   Item is inner pattern.
5194  */
5195 static void
5196 flow_dv_translate_item_icmp(void *matcher, void *key,
5197                             const struct rte_flow_item *item,
5198                             int inner)
5199 {
5200         const struct rte_flow_item_icmp *icmp_m = item->mask;
5201         const struct rte_flow_item_icmp *icmp_v = item->spec;
5202         void *headers_m;
5203         void *headers_v;
5204         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5205                                      misc_parameters_3);
5206         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5207         if (inner) {
5208                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5209                                          inner_headers);
5210                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5211         } else {
5212                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5213                                          outer_headers);
5214                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5215         }
5216         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5217         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
5218         if (!icmp_v)
5219                 return;
5220         if (!icmp_m)
5221                 icmp_m = &rte_flow_item_icmp_mask;
5222         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
5223                  icmp_m->hdr.icmp_type);
5224         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
5225                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
5226         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
5227                  icmp_m->hdr.icmp_code);
5228         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
5229                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
5230 }
5231
5232 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
5233
5234 #define HEADER_IS_ZERO(match_criteria, headers)                              \
5235         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
5236                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
5237
5238 /**
5239  * Calculate flow matcher enable bitmap.
5240  *
5241  * @param match_criteria
5242  *   Pointer to flow matcher criteria.
5243  *
5244  * @return
5245  *   Bitmap of enabled fields.
5246  */
5247 static uint8_t
5248 flow_dv_matcher_enable(uint32_t *match_criteria)
5249 {
5250         uint8_t match_criteria_enable;
5251
5252         match_criteria_enable =
5253                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
5254                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
5255         match_criteria_enable |=
5256                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
5257                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
5258         match_criteria_enable |=
5259                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
5260                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
5261         match_criteria_enable |=
5262                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
5263                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
5264         match_criteria_enable |=
5265                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
5266                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
5267         return match_criteria_enable;
5268 }
5269
5270
5271 /**
5272  * Get a flow table.
5273  *
5274  * @param dev[in, out]
5275  *   Pointer to rte_eth_dev structure.
5276  * @param[in] table_id
5277  *   Table id to use.
5278  * @param[in] egress
5279  *   Direction of the table.
5280  * @param[in] transfer
5281  *   E-Switch or NIC flow.
5282  * @param[out] error
5283  *   pointer to error structure.
5284  *
5285  * @return
5286  *   Returns tables resource based on the index, NULL in case of failed.
5287  */
5288 static struct mlx5_flow_tbl_resource *
5289 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
5290                          uint32_t table_id, uint8_t egress,
5291                          uint8_t transfer,
5292                          struct rte_flow_error *error)
5293 {
5294         struct mlx5_priv *priv = dev->data->dev_private;
5295         struct mlx5_ibv_shared *sh = priv->sh;
5296         struct mlx5_flow_tbl_resource *tbl;
5297
5298 #ifdef HAVE_MLX5DV_DR
5299         if (transfer) {
5300                 tbl = &sh->fdb_tbl[table_id];
5301                 if (!tbl->obj)
5302                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5303                                 (sh->fdb_domain, table_id);
5304         } else if (egress) {
5305                 tbl = &sh->tx_tbl[table_id];
5306                 if (!tbl->obj)
5307                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5308                                 (sh->tx_domain, table_id);
5309         } else {
5310                 tbl = &sh->rx_tbl[table_id];
5311                 if (!tbl->obj)
5312                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5313                                 (sh->rx_domain, table_id);
5314         }
5315         if (!tbl->obj) {
5316                 rte_flow_error_set(error, ENOMEM,
5317                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5318                                    NULL, "cannot create table");
5319                 return NULL;
5320         }
5321         rte_atomic32_inc(&tbl->refcnt);
5322         return tbl;
5323 #else
5324         (void)error;
5325         (void)tbl;
5326         if (transfer)
5327                 return &sh->fdb_tbl[table_id];
5328         else if (egress)
5329                 return &sh->tx_tbl[table_id];
5330         else
5331                 return &sh->rx_tbl[table_id];
5332 #endif
5333 }
5334
5335 /**
5336  * Release a flow table.
5337  *
5338  * @param[in] tbl
5339  *   Table resource to be released.
5340  *
5341  * @return
5342  *   Returns 0 if table was released, else return 1;
5343  */
5344 static int
5345 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
5346 {
5347         if (!tbl)
5348                 return 0;
5349         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
5350                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
5351                 tbl->obj = NULL;
5352                 return 0;
5353         }
5354         return 1;
5355 }
5356
5357 /**
5358  * Register the flow matcher.
5359  *
5360  * @param dev[in, out]
5361  *   Pointer to rte_eth_dev structure.
5362  * @param[in, out] matcher
5363  *   Pointer to flow matcher.
5364  * @parm[in, out] dev_flow
5365  *   Pointer to the dev_flow.
5366  * @param[out] error
5367  *   pointer to error structure.
5368  *
5369  * @return
5370  *   0 on success otherwise -errno and errno is set.
5371  */
5372 static int
5373 flow_dv_matcher_register(struct rte_eth_dev *dev,
5374                          struct mlx5_flow_dv_matcher *matcher,
5375                          struct mlx5_flow *dev_flow,
5376                          struct rte_flow_error *error)
5377 {
5378         struct mlx5_priv *priv = dev->data->dev_private;
5379         struct mlx5_ibv_shared *sh = priv->sh;
5380         struct mlx5_flow_dv_matcher *cache_matcher;
5381         struct mlx5dv_flow_matcher_attr dv_attr = {
5382                 .type = IBV_FLOW_ATTR_NORMAL,
5383                 .match_mask = (void *)&matcher->mask,
5384         };
5385         struct mlx5_flow_tbl_resource *tbl = NULL;
5386
5387         /* Lookup from cache. */
5388         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
5389                 if (matcher->crc == cache_matcher->crc &&
5390                     matcher->priority == cache_matcher->priority &&
5391                     matcher->egress == cache_matcher->egress &&
5392                     matcher->group == cache_matcher->group &&
5393                     matcher->transfer == cache_matcher->transfer &&
5394                     !memcmp((const void *)matcher->mask.buf,
5395                             (const void *)cache_matcher->mask.buf,
5396                             cache_matcher->mask.size)) {
5397                         DRV_LOG(DEBUG,
5398                                 "priority %hd use %s matcher %p: refcnt %d++",
5399                                 cache_matcher->priority,
5400                                 cache_matcher->egress ? "tx" : "rx",
5401                                 (void *)cache_matcher,
5402                                 rte_atomic32_read(&cache_matcher->refcnt));
5403                         rte_atomic32_inc(&cache_matcher->refcnt);
5404                         dev_flow->dv.matcher = cache_matcher;
5405                         return 0;
5406                 }
5407         }
5408         /* Register new matcher. */
5409         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
5410         if (!cache_matcher)
5411                 return rte_flow_error_set(error, ENOMEM,
5412                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5413                                           "cannot allocate matcher memory");
5414         tbl = flow_dv_tbl_resource_get(dev, matcher->group,
5415                                        matcher->egress, matcher->transfer,
5416                                        error);
5417         if (!tbl) {
5418                 rte_free(cache_matcher);
5419                 return rte_flow_error_set(error, ENOMEM,
5420                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5421                                           NULL, "cannot create table");
5422         }
5423         *cache_matcher = *matcher;
5424         dv_attr.match_criteria_enable =
5425                 flow_dv_matcher_enable(cache_matcher->mask.buf);
5426         dv_attr.priority = matcher->priority;
5427         if (matcher->egress)
5428                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
5429         cache_matcher->matcher_object =
5430                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
5431         if (!cache_matcher->matcher_object) {
5432                 rte_free(cache_matcher);
5433 #ifdef HAVE_MLX5DV_DR
5434                 flow_dv_tbl_resource_release(tbl);
5435 #endif
5436                 return rte_flow_error_set(error, ENOMEM,
5437                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5438                                           NULL, "cannot create matcher");
5439         }
5440         rte_atomic32_inc(&cache_matcher->refcnt);
5441         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
5442         dev_flow->dv.matcher = cache_matcher;
5443         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
5444                 cache_matcher->priority,
5445                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
5446                 rte_atomic32_read(&cache_matcher->refcnt));
5447         rte_atomic32_inc(&tbl->refcnt);
5448         return 0;
5449 }
5450
5451 /**
5452  * Find existing tag resource or create and register a new one.
5453  *
5454  * @param dev[in, out]
5455  *   Pointer to rte_eth_dev structure.
5456  * @param[in, out] resource
5457  *   Pointer to tag resource.
5458  * @parm[in, out] dev_flow
5459  *   Pointer to the dev_flow.
5460  * @param[out] error
5461  *   pointer to error structure.
5462  *
5463  * @return
5464  *   0 on success otherwise -errno and errno is set.
5465  */
5466 static int
5467 flow_dv_tag_resource_register
5468                         (struct rte_eth_dev *dev,
5469                          struct mlx5_flow_dv_tag_resource *resource,
5470                          struct mlx5_flow *dev_flow,
5471                          struct rte_flow_error *error)
5472 {
5473         struct mlx5_priv *priv = dev->data->dev_private;
5474         struct mlx5_ibv_shared *sh = priv->sh;
5475         struct mlx5_flow_dv_tag_resource *cache_resource;
5476
5477         /* Lookup a matching resource from cache. */
5478         LIST_FOREACH(cache_resource, &sh->tags, next) {
5479                 if (resource->tag == cache_resource->tag) {
5480                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
5481                                 (void *)cache_resource,
5482                                 rte_atomic32_read(&cache_resource->refcnt));
5483                         rte_atomic32_inc(&cache_resource->refcnt);
5484                         dev_flow->dv.tag_resource = cache_resource;
5485                         return 0;
5486                 }
5487         }
5488         /* Register new  resource. */
5489         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
5490         if (!cache_resource)
5491                 return rte_flow_error_set(error, ENOMEM,
5492                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5493                                           "cannot allocate resource memory");
5494         *cache_resource = *resource;
5495         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
5496                 (resource->tag);
5497         if (!cache_resource->action) {
5498                 rte_free(cache_resource);
5499                 return rte_flow_error_set(error, ENOMEM,
5500                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5501                                           NULL, "cannot create action");
5502         }
5503         rte_atomic32_init(&cache_resource->refcnt);
5504         rte_atomic32_inc(&cache_resource->refcnt);
5505         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
5506         dev_flow->dv.tag_resource = cache_resource;
5507         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
5508                 (void *)cache_resource,
5509                 rte_atomic32_read(&cache_resource->refcnt));
5510         return 0;
5511 }
5512
5513 /**
5514  * Release the tag.
5515  *
5516  * @param dev
5517  *   Pointer to Ethernet device.
5518  * @param flow
5519  *   Pointer to mlx5_flow.
5520  *
5521  * @return
5522  *   1 while a reference on it exists, 0 when freed.
5523  */
5524 static int
5525 flow_dv_tag_release(struct rte_eth_dev *dev,
5526                     struct mlx5_flow_dv_tag_resource *tag)
5527 {
5528         assert(tag);
5529         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
5530                 dev->data->port_id, (void *)tag,
5531                 rte_atomic32_read(&tag->refcnt));
5532         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
5533                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
5534                 LIST_REMOVE(tag, next);
5535                 DRV_LOG(DEBUG, "port %u tag %p: removed",
5536                         dev->data->port_id, (void *)tag);
5537                 rte_free(tag);
5538                 return 0;
5539         }
5540         return 1;
5541 }
5542
5543 /**
5544  * Translate port ID action to vport.
5545  *
5546  * @param[in] dev
5547  *   Pointer to rte_eth_dev structure.
5548  * @param[in] action
5549  *   Pointer to the port ID action.
5550  * @param[out] dst_port_id
5551  *   The target port ID.
5552  * @param[out] error
5553  *   Pointer to the error structure.
5554  *
5555  * @return
5556  *   0 on success, a negative errno value otherwise and rte_errno is set.
5557  */
5558 static int
5559 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
5560                                  const struct rte_flow_action *action,
5561                                  uint32_t *dst_port_id,
5562                                  struct rte_flow_error *error)
5563 {
5564         uint32_t port;
5565         struct mlx5_priv *priv;
5566         const struct rte_flow_action_port_id *conf =
5567                         (const struct rte_flow_action_port_id *)action->conf;
5568
5569         port = conf->original ? dev->data->port_id : conf->id;
5570         priv = mlx5_port_to_eswitch_info(port);
5571         if (!priv)
5572                 return rte_flow_error_set(error, -rte_errno,
5573                                           RTE_FLOW_ERROR_TYPE_ACTION,
5574                                           NULL,
5575                                           "No eswitch info was found for port");
5576         if (priv->vport_meta_mask)
5577                 *dst_port_id = priv->vport_meta_tag;
5578         else
5579                 *dst_port_id = priv->vport_id;
5580         return 0;
5581 }
5582
5583 /**
5584  * Add Tx queue matcher
5585  *
5586  * @param[in] dev
5587  *   Pointer to the dev struct.
5588  * @param[in, out] matcher
5589  *   Flow matcher.
5590  * @param[in, out] key
5591  *   Flow matcher value.
5592  * @param[in] item
5593  *   Flow pattern to translate.
5594  * @param[in] inner
5595  *   Item is inner pattern.
5596  */
5597 static void
5598 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
5599                                 void *matcher, void *key,
5600                                 const struct rte_flow_item *item)
5601 {
5602         const struct mlx5_rte_flow_item_tx_queue *queue_m;
5603         const struct mlx5_rte_flow_item_tx_queue *queue_v;
5604         void *misc_m =
5605                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5606         void *misc_v =
5607                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5608         struct mlx5_txq_ctrl *txq;
5609         uint32_t queue;
5610
5611
5612         queue_m = (const void *)item->mask;
5613         if (!queue_m)
5614                 return;
5615         queue_v = (const void *)item->spec;
5616         if (!queue_v)
5617                 return;
5618         txq = mlx5_txq_get(dev, queue_v->queue);
5619         if (!txq)
5620                 return;
5621         queue = txq->obj->sq->id;
5622         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
5623         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
5624                  queue & queue_m->queue);
5625         mlx5_txq_release(dev, queue_v->queue);
5626 }
5627
5628 /**
5629  * Fill the flow with DV spec.
5630  *
5631  * @param[in] dev
5632  *   Pointer to rte_eth_dev structure.
5633  * @param[in, out] dev_flow
5634  *   Pointer to the sub flow.
5635  * @param[in] attr
5636  *   Pointer to the flow attributes.
5637  * @param[in] items
5638  *   Pointer to the list of items.
5639  * @param[in] actions
5640  *   Pointer to the list of actions.
5641  * @param[out] error
5642  *   Pointer to the error structure.
5643  *
5644  * @return
5645  *   0 on success, a negative errno value otherwise and rte_errno is set.
5646  */
5647 static int
5648 flow_dv_translate(struct rte_eth_dev *dev,
5649                   struct mlx5_flow *dev_flow,
5650                   const struct rte_flow_attr *attr,
5651                   const struct rte_flow_item items[],
5652                   const struct rte_flow_action actions[],
5653                   struct rte_flow_error *error)
5654 {
5655         struct mlx5_priv *priv = dev->data->dev_private;
5656         struct rte_flow *flow = dev_flow->flow;
5657         uint64_t item_flags = 0;
5658         uint64_t last_item = 0;
5659         uint64_t action_flags = 0;
5660         uint64_t priority = attr->priority;
5661         struct mlx5_flow_dv_matcher matcher = {
5662                 .mask = {
5663                         .size = sizeof(matcher.mask.buf),
5664                 },
5665         };
5666         int actions_n = 0;
5667         bool actions_end = false;
5668         struct mlx5_flow_dv_modify_hdr_resource res = {
5669                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
5670                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
5671         };
5672         union flow_dv_attr flow_attr = { .attr = 0 };
5673         struct mlx5_flow_dv_tag_resource tag_resource;
5674         uint32_t modify_action_position = UINT32_MAX;
5675         void *match_mask = matcher.mask.buf;
5676         void *match_value = dev_flow->dv.value.buf;
5677         uint8_t next_protocol = 0xff;
5678         struct rte_vlan_hdr vlan = { 0 };
5679         uint32_t table;
5680         int ret = 0;
5681
5682         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
5683                                        &table, error);
5684         if (ret)
5685                 return ret;
5686         dev_flow->group = table;
5687         if (attr->transfer)
5688                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
5689         if (priority == MLX5_FLOW_PRIO_RSVD)
5690                 priority = priv->config.flow_prio - 1;
5691         for (; !actions_end ; actions++) {
5692                 const struct rte_flow_action_queue *queue;
5693                 const struct rte_flow_action_rss *rss;
5694                 const struct rte_flow_action *action = actions;
5695                 const struct rte_flow_action_count *count = action->conf;
5696                 const uint8_t *rss_key;
5697                 const struct rte_flow_action_jump *jump_data;
5698                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
5699                 struct mlx5_flow_tbl_resource *tbl;
5700                 uint32_t port_id = 0;
5701                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
5702                 int action_type = actions->type;
5703                 const struct rte_flow_action *found_action = NULL;
5704
5705                 switch (action_type) {
5706                 case RTE_FLOW_ACTION_TYPE_VOID:
5707                         break;
5708                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5709                         if (flow_dv_translate_action_port_id(dev, action,
5710                                                              &port_id, error))
5711                                 return -rte_errno;
5712                         port_id_resource.port_id = port_id;
5713                         if (flow_dv_port_id_action_resource_register
5714                             (dev, &port_id_resource, dev_flow, error))
5715                                 return -rte_errno;
5716                         dev_flow->dv.actions[actions_n++] =
5717                                 dev_flow->dv.port_id_action->action;
5718                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5719                         break;
5720                 case RTE_FLOW_ACTION_TYPE_FLAG:
5721                         tag_resource.tag =
5722                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
5723                         if (!dev_flow->dv.tag_resource)
5724                                 if (flow_dv_tag_resource_register
5725                                     (dev, &tag_resource, dev_flow, error))
5726                                         return errno;
5727                         dev_flow->dv.actions[actions_n++] =
5728                                 dev_flow->dv.tag_resource->action;
5729                         action_flags |= MLX5_FLOW_ACTION_FLAG;
5730                         break;
5731                 case RTE_FLOW_ACTION_TYPE_MARK:
5732                         tag_resource.tag = mlx5_flow_mark_set
5733                               (((const struct rte_flow_action_mark *)
5734                                (actions->conf))->id);
5735                         if (!dev_flow->dv.tag_resource)
5736                                 if (flow_dv_tag_resource_register
5737                                     (dev, &tag_resource, dev_flow, error))
5738                                         return errno;
5739                         dev_flow->dv.actions[actions_n++] =
5740                                 dev_flow->dv.tag_resource->action;
5741                         action_flags |= MLX5_FLOW_ACTION_MARK;
5742                         break;
5743                 case RTE_FLOW_ACTION_TYPE_DROP:
5744                         action_flags |= MLX5_FLOW_ACTION_DROP;
5745                         break;
5746                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5747                         assert(flow->rss.queue);
5748                         queue = actions->conf;
5749                         flow->rss.queue_num = 1;
5750                         (*flow->rss.queue)[0] = queue->index;
5751                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5752                         break;
5753                 case RTE_FLOW_ACTION_TYPE_RSS:
5754                         assert(flow->rss.queue);
5755                         rss = actions->conf;
5756                         if (flow->rss.queue)
5757                                 memcpy((*flow->rss.queue), rss->queue,
5758                                        rss->queue_num * sizeof(uint16_t));
5759                         flow->rss.queue_num = rss->queue_num;
5760                         /* NULL RSS key indicates default RSS key. */
5761                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
5762                         memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
5763                         /*
5764                          * rss->level and rss.types should be set in advance
5765                          * when expanding items for RSS.
5766                          */
5767                         action_flags |= MLX5_FLOW_ACTION_RSS;
5768                         break;
5769                 case RTE_FLOW_ACTION_TYPE_COUNT:
5770                         if (!priv->config.devx) {
5771                                 rte_errno = ENOTSUP;
5772                                 goto cnt_err;
5773                         }
5774                         flow->counter = flow_dv_counter_alloc(dev,
5775                                                               count->shared,
5776                                                               count->id,
5777                                                               dev_flow->group);
5778                         if (flow->counter == NULL)
5779                                 goto cnt_err;
5780                         dev_flow->dv.actions[actions_n++] =
5781                                 flow->counter->action;
5782                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5783                         break;
5784 cnt_err:
5785                         if (rte_errno == ENOTSUP)
5786                                 return rte_flow_error_set
5787                                               (error, ENOTSUP,
5788                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5789                                                NULL,
5790                                                "count action not supported");
5791                         else
5792                                 return rte_flow_error_set
5793                                                 (error, rte_errno,
5794                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5795                                                  action,
5796                                                  "cannot create counter"
5797                                                   " object.");
5798                         break;
5799                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5800                         dev_flow->dv.actions[actions_n++] =
5801                                                 priv->sh->pop_vlan_action;
5802                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5803                         break;
5804                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5805                         flow_dev_get_vlan_info_from_items(items, &vlan);
5806                         vlan.eth_proto = rte_be_to_cpu_16
5807                              ((((const struct rte_flow_action_of_push_vlan *)
5808                                                    actions->conf)->ethertype));
5809                         found_action = mlx5_flow_find_action
5810                                         (actions + 1,
5811                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
5812                         if (found_action)
5813                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5814                         found_action = mlx5_flow_find_action
5815                                         (actions + 1,
5816                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
5817                         if (found_action)
5818                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5819                         if (flow_dv_create_action_push_vlan
5820                                             (dev, attr, &vlan, dev_flow, error))
5821                                 return -rte_errno;
5822                         dev_flow->dv.actions[actions_n++] =
5823                                            dev_flow->dv.push_vlan_res->action;
5824                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5825                         break;
5826                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5827                         /* of_vlan_push action handled this action */
5828                         assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
5829                         break;
5830                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5831                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
5832                                 break;
5833                         flow_dev_get_vlan_info_from_items(items, &vlan);
5834                         mlx5_update_vlan_vid_pcp(actions, &vlan);
5835                         /* If no VLAN push - this is a modify header action */
5836                         if (flow_dv_convert_action_modify_vlan_vid
5837                                                         (&res, actions, error))
5838                                 return -rte_errno;
5839                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5840                         break;
5841                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5842                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5843                         if (flow_dv_create_action_l2_encap(dev, actions,
5844                                                            dev_flow,
5845                                                            attr->transfer,
5846                                                            error))
5847                                 return -rte_errno;
5848                         dev_flow->dv.actions[actions_n++] =
5849                                 dev_flow->dv.encap_decap->verbs_action;
5850                         action_flags |= actions->type ==
5851                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
5852                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
5853                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
5854                         break;
5855                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5856                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5857                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
5858                                                            attr->transfer,
5859                                                            error))
5860                                 return -rte_errno;
5861                         dev_flow->dv.actions[actions_n++] =
5862                                 dev_flow->dv.encap_decap->verbs_action;
5863                         action_flags |= actions->type ==
5864                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
5865                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
5866                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
5867                         break;
5868                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5869                         /* Handle encap with preceding decap. */
5870                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
5871                                 if (flow_dv_create_action_raw_encap
5872                                         (dev, actions, dev_flow, attr, error))
5873                                         return -rte_errno;
5874                                 dev_flow->dv.actions[actions_n++] =
5875                                         dev_flow->dv.encap_decap->verbs_action;
5876                         } else {
5877                                 /* Handle encap without preceding decap. */
5878                                 if (flow_dv_create_action_l2_encap
5879                                     (dev, actions, dev_flow, attr->transfer,
5880                                      error))
5881                                         return -rte_errno;
5882                                 dev_flow->dv.actions[actions_n++] =
5883                                         dev_flow->dv.encap_decap->verbs_action;
5884                         }
5885                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
5886                         break;
5887                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5888                         /* Check if this decap is followed by encap. */
5889                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
5890                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
5891                                action++) {
5892                         }
5893                         /* Handle decap only if it isn't followed by encap. */
5894                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5895                                 if (flow_dv_create_action_l2_decap
5896                                     (dev, dev_flow, attr->transfer, error))
5897                                         return -rte_errno;
5898                                 dev_flow->dv.actions[actions_n++] =
5899                                         dev_flow->dv.encap_decap->verbs_action;
5900                         }
5901                         /* If decap is followed by encap, handle it at encap. */
5902                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
5903                         break;
5904                 case RTE_FLOW_ACTION_TYPE_JUMP:
5905                         jump_data = action->conf;
5906                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
5907                                                        jump_data->group, &table,
5908                                                        error);
5909                         if (ret)
5910                                 return ret;
5911                         tbl = flow_dv_tbl_resource_get(dev, table,
5912                                                        attr->egress,
5913                                                        attr->transfer, error);
5914                         if (!tbl)
5915                                 return rte_flow_error_set
5916                                                 (error, errno,
5917                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5918                                                  NULL,
5919                                                  "cannot create jump action.");
5920                         jump_tbl_resource.tbl = tbl;
5921                         if (flow_dv_jump_tbl_resource_register
5922                             (dev, &jump_tbl_resource, dev_flow, error)) {
5923                                 flow_dv_tbl_resource_release(tbl);
5924                                 return rte_flow_error_set
5925                                                 (error, errno,
5926                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5927                                                  NULL,
5928                                                  "cannot create jump action.");
5929                         }
5930                         dev_flow->dv.actions[actions_n++] =
5931                                 dev_flow->dv.jump->action;
5932                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5933                         break;
5934                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5935                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5936                         if (flow_dv_convert_action_modify_mac(&res, actions,
5937                                                               error))
5938                                 return -rte_errno;
5939                         action_flags |= actions->type ==
5940                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5941                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
5942                                         MLX5_FLOW_ACTION_SET_MAC_DST;
5943                         break;
5944                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5945                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5946                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
5947                                                                error))
5948                                 return -rte_errno;
5949                         action_flags |= actions->type ==
5950                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5951                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
5952                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
5953                         break;
5954                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5955                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5956                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
5957                                                                error))
5958                                 return -rte_errno;
5959                         action_flags |= actions->type ==
5960                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5961                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
5962                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
5963                         break;
5964                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5965                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5966                         if (flow_dv_convert_action_modify_tp(&res, actions,
5967                                                              items, &flow_attr,
5968                                                              error))
5969                                 return -rte_errno;
5970                         action_flags |= actions->type ==
5971                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5972                                         MLX5_FLOW_ACTION_SET_TP_SRC :
5973                                         MLX5_FLOW_ACTION_SET_TP_DST;
5974                         break;
5975                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5976                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
5977                                                                   &flow_attr,
5978                                                                   error))
5979                                 return -rte_errno;
5980                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
5981                         break;
5982                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5983                         if (flow_dv_convert_action_modify_ttl(&res, actions,
5984                                                              items, &flow_attr,
5985                                                              error))
5986                                 return -rte_errno;
5987                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
5988                         break;
5989                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5990                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5991                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
5992                                                                   error))
5993                                 return -rte_errno;
5994                         action_flags |= actions->type ==
5995                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5996                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
5997                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5998                         break;
5999
6000                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
6001                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
6002                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
6003                                                                   error))
6004                                 return -rte_errno;
6005                         action_flags |= actions->type ==
6006                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
6007                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
6008                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
6009                         break;
6010                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
6011                         if (flow_dv_convert_action_set_reg(&res, actions,
6012                                                            error))
6013                                 return -rte_errno;
6014                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6015                         break;
6016                 case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
6017                         if (flow_dv_convert_action_copy_mreg(dev, &res,
6018                                                              actions, error))
6019                                 return -rte_errno;
6020                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
6021                         break;
6022                 case RTE_FLOW_ACTION_TYPE_END:
6023                         actions_end = true;
6024                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
6025                                 /* create modify action if needed. */
6026                                 if (flow_dv_modify_hdr_resource_register
6027                                                                 (dev, &res,
6028                                                                  dev_flow,
6029                                                                  error))
6030                                         return -rte_errno;
6031                                 dev_flow->dv.actions[modify_action_position] =
6032                                         dev_flow->dv.modify_hdr->verbs_action;
6033                         }
6034                         break;
6035                 default:
6036                         break;
6037                 }
6038                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
6039                     modify_action_position == UINT32_MAX)
6040                         modify_action_position = actions_n++;
6041         }
6042         dev_flow->dv.actions_n = actions_n;
6043         dev_flow->actions = action_flags;
6044         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
6045                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
6046                 int item_type = items->type;
6047
6048                 switch (item_type) {
6049                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6050                         flow_dv_translate_item_port_id(dev, match_mask,
6051                                                        match_value, items);
6052                         last_item = MLX5_FLOW_ITEM_PORT_ID;
6053                         break;
6054                 case RTE_FLOW_ITEM_TYPE_ETH:
6055                         flow_dv_translate_item_eth(match_mask, match_value,
6056                                                    items, tunnel);
6057                         matcher.priority = MLX5_PRIORITY_MAP_L2;
6058                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
6059                                              MLX5_FLOW_LAYER_OUTER_L2;
6060                         break;
6061                 case RTE_FLOW_ITEM_TYPE_VLAN:
6062                         flow_dv_translate_item_vlan(dev_flow,
6063                                                     match_mask, match_value,
6064                                                     items, tunnel);
6065                         matcher.priority = MLX5_PRIORITY_MAP_L2;
6066                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
6067                                               MLX5_FLOW_LAYER_INNER_VLAN) :
6068                                              (MLX5_FLOW_LAYER_OUTER_L2 |
6069                                               MLX5_FLOW_LAYER_OUTER_VLAN);
6070                         break;
6071                 case RTE_FLOW_ITEM_TYPE_IPV4:
6072                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6073                                                   &item_flags, &tunnel);
6074                         flow_dv_translate_item_ipv4(match_mask, match_value,
6075                                                     items, tunnel,
6076                                                     dev_flow->group);
6077                         matcher.priority = MLX5_PRIORITY_MAP_L3;
6078                         dev_flow->hash_fields |=
6079                                 mlx5_flow_hashfields_adjust
6080                                         (dev_flow, tunnel,
6081                                          MLX5_IPV4_LAYER_TYPES,
6082                                          MLX5_IPV4_IBV_RX_HASH);
6083                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
6084                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
6085                         if (items->mask != NULL &&
6086                             ((const struct rte_flow_item_ipv4 *)
6087                              items->mask)->hdr.next_proto_id) {
6088                                 next_protocol =
6089                                         ((const struct rte_flow_item_ipv4 *)
6090                                          (items->spec))->hdr.next_proto_id;
6091                                 next_protocol &=
6092                                         ((const struct rte_flow_item_ipv4 *)
6093                                          (items->mask))->hdr.next_proto_id;
6094                         } else {
6095                                 /* Reset for inner layer. */
6096                                 next_protocol = 0xff;
6097                         }
6098                         break;
6099                 case RTE_FLOW_ITEM_TYPE_IPV6:
6100                         mlx5_flow_tunnel_ip_check(items, next_protocol,
6101                                                   &item_flags, &tunnel);
6102                         flow_dv_translate_item_ipv6(match_mask, match_value,
6103                                                     items, tunnel,
6104                                                     dev_flow->group);
6105                         matcher.priority = MLX5_PRIORITY_MAP_L3;
6106                         dev_flow->hash_fields |=
6107                                 mlx5_flow_hashfields_adjust
6108                                         (dev_flow, tunnel,
6109                                          MLX5_IPV6_LAYER_TYPES,
6110                                          MLX5_IPV6_IBV_RX_HASH);
6111                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
6112                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
6113                         if (items->mask != NULL &&
6114                             ((const struct rte_flow_item_ipv6 *)
6115                              items->mask)->hdr.proto) {
6116                                 next_protocol =
6117                                         ((const struct rte_flow_item_ipv6 *)
6118                                          items->spec)->hdr.proto;
6119                                 next_protocol &=
6120                                         ((const struct rte_flow_item_ipv6 *)
6121                                          items->mask)->hdr.proto;
6122                         } else {
6123                                 /* Reset for inner layer. */
6124                                 next_protocol = 0xff;
6125                         }
6126                         break;
6127                 case RTE_FLOW_ITEM_TYPE_TCP:
6128                         flow_dv_translate_item_tcp(match_mask, match_value,
6129                                                    items, tunnel);
6130                         matcher.priority = MLX5_PRIORITY_MAP_L4;
6131                         dev_flow->hash_fields |=
6132                                 mlx5_flow_hashfields_adjust
6133                                         (dev_flow, tunnel, ETH_RSS_TCP,
6134                                          IBV_RX_HASH_SRC_PORT_TCP |
6135                                          IBV_RX_HASH_DST_PORT_TCP);
6136                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6137                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6138                         break;
6139                 case RTE_FLOW_ITEM_TYPE_UDP:
6140                         flow_dv_translate_item_udp(match_mask, match_value,
6141                                                    items, tunnel);
6142                         matcher.priority = MLX5_PRIORITY_MAP_L4;
6143                         dev_flow->hash_fields |=
6144                                 mlx5_flow_hashfields_adjust
6145                                         (dev_flow, tunnel, ETH_RSS_UDP,
6146                                          IBV_RX_HASH_SRC_PORT_UDP |
6147                                          IBV_RX_HASH_DST_PORT_UDP);
6148                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6149                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6150                         break;
6151                 case RTE_FLOW_ITEM_TYPE_GRE:
6152                         flow_dv_translate_item_gre(match_mask, match_value,
6153                                                    items, tunnel);
6154                         last_item = MLX5_FLOW_LAYER_GRE;
6155                         break;
6156                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6157                         flow_dv_translate_item_gre_key(match_mask,
6158                                                        match_value, items);
6159                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6160                         break;
6161                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6162                         flow_dv_translate_item_nvgre(match_mask, match_value,
6163                                                      items, tunnel);
6164                         last_item = MLX5_FLOW_LAYER_GRE;
6165                         break;
6166                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6167                         flow_dv_translate_item_vxlan(match_mask, match_value,
6168                                                      items, tunnel);
6169                         last_item = MLX5_FLOW_LAYER_VXLAN;
6170                         break;
6171                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6172                         flow_dv_translate_item_vxlan(match_mask, match_value,
6173                                                      items, tunnel);
6174                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6175                         break;
6176                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6177                         flow_dv_translate_item_geneve(match_mask, match_value,
6178                                                       items, tunnel);
6179                         last_item = MLX5_FLOW_LAYER_GENEVE;
6180                         break;
6181                 case RTE_FLOW_ITEM_TYPE_MPLS:
6182                         flow_dv_translate_item_mpls(match_mask, match_value,
6183                                                     items, last_item, tunnel);
6184                         last_item = MLX5_FLOW_LAYER_MPLS;
6185                         break;
6186                 case RTE_FLOW_ITEM_TYPE_META:
6187                         flow_dv_translate_item_meta(match_mask, match_value,
6188                                                     items);
6189                         last_item = MLX5_FLOW_ITEM_METADATA;
6190                         break;
6191                 case RTE_FLOW_ITEM_TYPE_ICMP:
6192                         flow_dv_translate_item_icmp(match_mask, match_value,
6193                                                     items, tunnel);
6194                         last_item = MLX5_FLOW_LAYER_ICMP;
6195                         break;
6196                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6197                         flow_dv_translate_item_icmp6(match_mask, match_value,
6198                                                       items, tunnel);
6199                         last_item = MLX5_FLOW_LAYER_ICMP6;
6200                         break;
6201                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6202                         flow_dv_translate_mlx5_item_tag(match_mask,
6203                                                         match_value, items);
6204                         last_item = MLX5_FLOW_ITEM_TAG;
6205                         break;
6206                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6207                         flow_dv_translate_item_tx_queue(dev, match_mask,
6208                                                         match_value,
6209                                                         items);
6210                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
6211                         break;
6212                 default:
6213                         break;
6214                 }
6215                 item_flags |= last_item;
6216         }
6217         /*
6218          * In case of ingress traffic when E-Switch mode is enabled,
6219          * we have two cases where we need to set the source port manually.
6220          * The first one, is in case of Nic steering rule, and the second is
6221          * E-Switch rule where no port_id item was found. In both cases
6222          * the source port is set according the current port in use.
6223          */
6224         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
6225             (priv->representor || priv->master)) {
6226                 if (flow_dv_translate_item_port_id(dev, match_mask,
6227                                                    match_value, NULL))
6228                         return -rte_errno;
6229         }
6230         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
6231                                          dev_flow->dv.value.buf));
6232         dev_flow->layers = item_flags;
6233         /* Register matcher. */
6234         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
6235                                     matcher.mask.size);
6236         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
6237                                                      matcher.priority);
6238         matcher.egress = attr->egress;
6239         matcher.group = dev_flow->group;
6240         matcher.transfer = attr->transfer;
6241         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
6242                 return -rte_errno;
6243         return 0;
6244 }
6245
6246 /**
6247  * Apply the flow to the NIC.
6248  *
6249  * @param[in] dev
6250  *   Pointer to the Ethernet device structure.
6251  * @param[in, out] flow
6252  *   Pointer to flow structure.
6253  * @param[out] error
6254  *   Pointer to error structure.
6255  *
6256  * @return
6257  *   0 on success, a negative errno value otherwise and rte_errno is set.
6258  */
6259 static int
6260 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
6261               struct rte_flow_error *error)
6262 {
6263         struct mlx5_flow_dv *dv;
6264         struct mlx5_flow *dev_flow;
6265         struct mlx5_priv *priv = dev->data->dev_private;
6266         int n;
6267         int err;
6268
6269         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6270                 dv = &dev_flow->dv;
6271                 n = dv->actions_n;
6272                 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
6273                         if (dev_flow->transfer) {
6274                                 dv->actions[n++] = priv->sh->esw_drop_action;
6275                         } else {
6276                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
6277                                 if (!dv->hrxq) {
6278                                         rte_flow_error_set
6279                                                 (error, errno,
6280                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6281                                                  NULL,
6282                                                  "cannot get drop hash queue");
6283                                         goto error;
6284                                 }
6285                                 dv->actions[n++] = dv->hrxq->action;
6286                         }
6287                 } else if (dev_flow->actions &
6288                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
6289                         struct mlx5_hrxq *hrxq;
6290
6291                         assert(flow->rss.queue);
6292                         hrxq = mlx5_hrxq_get(dev, flow->rss.key,
6293                                              MLX5_RSS_HASH_KEY_LEN,
6294                                              dev_flow->hash_fields,
6295                                              (*flow->rss.queue),
6296                                              flow->rss.queue_num);
6297                         if (!hrxq) {
6298                                 hrxq = mlx5_hrxq_new
6299                                         (dev, flow->rss.key,
6300                                          MLX5_RSS_HASH_KEY_LEN,
6301                                          dev_flow->hash_fields,
6302                                          (*flow->rss.queue),
6303                                          flow->rss.queue_num,
6304                                          !!(dev_flow->layers &
6305                                             MLX5_FLOW_LAYER_TUNNEL));
6306                         }
6307                         if (!hrxq) {
6308                                 rte_flow_error_set
6309                                         (error, rte_errno,
6310                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6311                                          "cannot get hash queue");
6312                                 goto error;
6313                         }
6314                         dv->hrxq = hrxq;
6315                         dv->actions[n++] = dv->hrxq->action;
6316                 }
6317                 dv->flow =
6318                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
6319                                                   (void *)&dv->value, n,
6320                                                   dv->actions);
6321                 if (!dv->flow) {
6322                         rte_flow_error_set(error, errno,
6323                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6324                                            NULL,
6325                                            "hardware refuses to create flow");
6326                         goto error;
6327                 }
6328                 if (priv->vmwa_context &&
6329                     dev_flow->dv.vf_vlan.tag &&
6330                     !dev_flow->dv.vf_vlan.created) {
6331                         /*
6332                          * The rule contains the VLAN pattern.
6333                          * For VF we are going to create VLAN
6334                          * interface to make hypervisor set correct
6335                          * e-Switch vport context.
6336                          */
6337                         mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
6338                 }
6339         }
6340         return 0;
6341 error:
6342         err = rte_errno; /* Save rte_errno before cleanup. */
6343         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6344                 struct mlx5_flow_dv *dv = &dev_flow->dv;
6345                 if (dv->hrxq) {
6346                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6347                                 mlx5_hrxq_drop_release(dev);
6348                         else
6349                                 mlx5_hrxq_release(dev, dv->hrxq);
6350                         dv->hrxq = NULL;
6351                 }
6352                 if (dev_flow->dv.vf_vlan.tag &&
6353                     dev_flow->dv.vf_vlan.created)
6354                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6355         }
6356         rte_errno = err; /* Restore rte_errno. */
6357         return -rte_errno;
6358 }
6359
6360 /**
6361  * Release the flow matcher.
6362  *
6363  * @param dev
6364  *   Pointer to Ethernet device.
6365  * @param flow
6366  *   Pointer to mlx5_flow.
6367  *
6368  * @return
6369  *   1 while a reference on it exists, 0 when freed.
6370  */
6371 static int
6372 flow_dv_matcher_release(struct rte_eth_dev *dev,
6373                         struct mlx5_flow *flow)
6374 {
6375         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
6376         struct mlx5_priv *priv = dev->data->dev_private;
6377         struct mlx5_ibv_shared *sh = priv->sh;
6378         struct mlx5_flow_tbl_resource *tbl;
6379
6380         assert(matcher->matcher_object);
6381         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
6382                 dev->data->port_id, (void *)matcher,
6383                 rte_atomic32_read(&matcher->refcnt));
6384         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
6385                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
6386                            (matcher->matcher_object));
6387                 LIST_REMOVE(matcher, next);
6388                 if (matcher->egress)
6389                         tbl = &sh->tx_tbl[matcher->group];
6390                 else
6391                         tbl = &sh->rx_tbl[matcher->group];
6392                 flow_dv_tbl_resource_release(tbl);
6393                 rte_free(matcher);
6394                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
6395                         dev->data->port_id, (void *)matcher);
6396                 return 0;
6397         }
6398         return 1;
6399 }
6400
6401 /**
6402  * Release an encap/decap resource.
6403  *
6404  * @param flow
6405  *   Pointer to mlx5_flow.
6406  *
6407  * @return
6408  *   1 while a reference on it exists, 0 when freed.
6409  */
6410 static int
6411 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
6412 {
6413         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
6414                                                 flow->dv.encap_decap;
6415
6416         assert(cache_resource->verbs_action);
6417         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
6418                 (void *)cache_resource,
6419                 rte_atomic32_read(&cache_resource->refcnt));
6420         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6421                 claim_zero(mlx5_glue->destroy_flow_action
6422                                 (cache_resource->verbs_action));
6423                 LIST_REMOVE(cache_resource, next);
6424                 rte_free(cache_resource);
6425                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
6426                         (void *)cache_resource);
6427                 return 0;
6428         }
6429         return 1;
6430 }
6431
6432 /**
6433  * Release an jump to table action resource.
6434  *
6435  * @param flow
6436  *   Pointer to mlx5_flow.
6437  *
6438  * @return
6439  *   1 while a reference on it exists, 0 when freed.
6440  */
6441 static int
6442 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
6443 {
6444         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
6445                                                 flow->dv.jump;
6446
6447         assert(cache_resource->action);
6448         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
6449                 (void *)cache_resource,
6450                 rte_atomic32_read(&cache_resource->refcnt));
6451         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6452                 claim_zero(mlx5_glue->destroy_flow_action
6453                                 (cache_resource->action));
6454                 LIST_REMOVE(cache_resource, next);
6455                 flow_dv_tbl_resource_release(cache_resource->tbl);
6456                 rte_free(cache_resource);
6457                 DRV_LOG(DEBUG, "jump table resource %p: removed",
6458                         (void *)cache_resource);
6459                 return 0;
6460         }
6461         return 1;
6462 }
6463
6464 /**
6465  * Release a modify-header resource.
6466  *
6467  * @param flow
6468  *   Pointer to mlx5_flow.
6469  *
6470  * @return
6471  *   1 while a reference on it exists, 0 when freed.
6472  */
6473 static int
6474 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
6475 {
6476         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
6477                                                 flow->dv.modify_hdr;
6478
6479         assert(cache_resource->verbs_action);
6480         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
6481                 (void *)cache_resource,
6482                 rte_atomic32_read(&cache_resource->refcnt));
6483         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6484                 claim_zero(mlx5_glue->destroy_flow_action
6485                                 (cache_resource->verbs_action));
6486                 LIST_REMOVE(cache_resource, next);
6487                 rte_free(cache_resource);
6488                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
6489                         (void *)cache_resource);
6490                 return 0;
6491         }
6492         return 1;
6493 }
6494
6495 /**
6496  * Release port ID action resource.
6497  *
6498  * @param flow
6499  *   Pointer to mlx5_flow.
6500  *
6501  * @return
6502  *   1 while a reference on it exists, 0 when freed.
6503  */
6504 static int
6505 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
6506 {
6507         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
6508                 flow->dv.port_id_action;
6509
6510         assert(cache_resource->action);
6511         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
6512                 (void *)cache_resource,
6513                 rte_atomic32_read(&cache_resource->refcnt));
6514         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6515                 claim_zero(mlx5_glue->destroy_flow_action
6516                                 (cache_resource->action));
6517                 LIST_REMOVE(cache_resource, next);
6518                 rte_free(cache_resource);
6519                 DRV_LOG(DEBUG, "port id action resource %p: removed",
6520                         (void *)cache_resource);
6521                 return 0;
6522         }
6523         return 1;
6524 }
6525
6526 /**
6527  * Release push vlan action resource.
6528  *
6529  * @param flow
6530  *   Pointer to mlx5_flow.
6531  *
6532  * @return
6533  *   1 while a reference on it exists, 0 when freed.
6534  */
6535 static int
6536 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
6537 {
6538         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
6539                 flow->dv.push_vlan_res;
6540
6541         assert(cache_resource->action);
6542         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
6543                 (void *)cache_resource,
6544                 rte_atomic32_read(&cache_resource->refcnt));
6545         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6546                 claim_zero(mlx5_glue->destroy_flow_action
6547                                 (cache_resource->action));
6548                 LIST_REMOVE(cache_resource, next);
6549                 rte_free(cache_resource);
6550                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
6551                         (void *)cache_resource);
6552                 return 0;
6553         }
6554         return 1;
6555 }
6556
6557 /**
6558  * Remove the flow from the NIC but keeps it in memory.
6559  *
6560  * @param[in] dev
6561  *   Pointer to Ethernet device.
6562  * @param[in, out] flow
6563  *   Pointer to flow structure.
6564  */
6565 static void
6566 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6567 {
6568         struct mlx5_flow_dv *dv;
6569         struct mlx5_flow *dev_flow;
6570
6571         if (!flow)
6572                 return;
6573         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6574                 dv = &dev_flow->dv;
6575                 if (dv->flow) {
6576                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
6577                         dv->flow = NULL;
6578                 }
6579                 if (dv->hrxq) {
6580                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6581                                 mlx5_hrxq_drop_release(dev);
6582                         else
6583                                 mlx5_hrxq_release(dev, dv->hrxq);
6584                         dv->hrxq = NULL;
6585                 }
6586                 if (dev_flow->dv.vf_vlan.tag &&
6587                     dev_flow->dv.vf_vlan.created)
6588                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6589         }
6590 }
6591
6592 /**
6593  * Remove the flow from the NIC and the memory.
6594  *
6595  * @param[in] dev
6596  *   Pointer to the Ethernet device structure.
6597  * @param[in, out] flow
6598  *   Pointer to flow structure.
6599  */
6600 static void
6601 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6602 {
6603         struct mlx5_flow *dev_flow;
6604
6605         if (!flow)
6606                 return;
6607         flow_dv_remove(dev, flow);
6608         if (flow->counter) {
6609                 flow_dv_counter_release(dev, flow->counter);
6610                 flow->counter = NULL;
6611         }
6612         while (!LIST_EMPTY(&flow->dev_flows)) {
6613                 dev_flow = LIST_FIRST(&flow->dev_flows);
6614                 LIST_REMOVE(dev_flow, next);
6615                 if (dev_flow->dv.matcher)
6616                         flow_dv_matcher_release(dev, dev_flow);
6617                 if (dev_flow->dv.encap_decap)
6618                         flow_dv_encap_decap_resource_release(dev_flow);
6619                 if (dev_flow->dv.modify_hdr)
6620                         flow_dv_modify_hdr_resource_release(dev_flow);
6621                 if (dev_flow->dv.jump)
6622                         flow_dv_jump_tbl_resource_release(dev_flow);
6623                 if (dev_flow->dv.port_id_action)
6624                         flow_dv_port_id_action_resource_release(dev_flow);
6625                 if (dev_flow->dv.push_vlan_res)
6626                         flow_dv_push_vlan_action_resource_release(dev_flow);
6627                 if (dev_flow->dv.tag_resource)
6628                         flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
6629                 rte_free(dev_flow);
6630         }
6631 }
6632
6633 /**
6634  * Query a dv flow  rule for its statistics via devx.
6635  *
6636  * @param[in] dev
6637  *   Pointer to Ethernet device.
6638  * @param[in] flow
6639  *   Pointer to the sub flow.
6640  * @param[out] data
6641  *   data retrieved by the query.
6642  * @param[out] error
6643  *   Perform verbose error reporting if not NULL.
6644  *
6645  * @return
6646  *   0 on success, a negative errno value otherwise and rte_errno is set.
6647  */
6648 static int
6649 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
6650                     void *data, struct rte_flow_error *error)
6651 {
6652         struct mlx5_priv *priv = dev->data->dev_private;
6653         struct rte_flow_query_count *qc = data;
6654
6655         if (!priv->config.devx)
6656                 return rte_flow_error_set(error, ENOTSUP,
6657                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6658                                           NULL,
6659                                           "counters are not supported");
6660         if (flow->counter) {
6661                 uint64_t pkts, bytes;
6662                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
6663                                                &bytes);
6664
6665                 if (err)
6666                         return rte_flow_error_set(error, -err,
6667                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6668                                         NULL, "cannot read counters");
6669                 qc->hits_set = 1;
6670                 qc->bytes_set = 1;
6671                 qc->hits = pkts - flow->counter->hits;
6672                 qc->bytes = bytes - flow->counter->bytes;
6673                 if (qc->reset) {
6674                         flow->counter->hits = pkts;
6675                         flow->counter->bytes = bytes;
6676                 }
6677                 return 0;
6678         }
6679         return rte_flow_error_set(error, EINVAL,
6680                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6681                                   NULL,
6682                                   "counters are not available");
6683 }
6684
6685 /**
6686  * Query a flow.
6687  *
6688  * @see rte_flow_query()
6689  * @see rte_flow_ops
6690  */
6691 static int
6692 flow_dv_query(struct rte_eth_dev *dev,
6693               struct rte_flow *flow __rte_unused,
6694               const struct rte_flow_action *actions __rte_unused,
6695               void *data __rte_unused,
6696               struct rte_flow_error *error __rte_unused)
6697 {
6698         int ret = -EINVAL;
6699
6700         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6701                 switch (actions->type) {
6702                 case RTE_FLOW_ACTION_TYPE_VOID:
6703                         break;
6704                 case RTE_FLOW_ACTION_TYPE_COUNT:
6705                         ret = flow_dv_query_count(dev, flow, data, error);
6706                         break;
6707                 default:
6708                         return rte_flow_error_set(error, ENOTSUP,
6709                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6710                                                   actions,
6711                                                   "action not supported");
6712                 }
6713         }
6714         return ret;
6715 }
6716
6717 /*
6718  * Mutex-protected thunk to flow_dv_translate().
6719  */
6720 static int
6721 flow_d_translate(struct rte_eth_dev *dev,
6722                  struct mlx5_flow *dev_flow,
6723                  const struct rte_flow_attr *attr,
6724                  const struct rte_flow_item items[],
6725                  const struct rte_flow_action actions[],
6726                  struct rte_flow_error *error)
6727 {
6728         int ret;
6729
6730         flow_d_shared_lock(dev);
6731         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
6732         flow_d_shared_unlock(dev);
6733         return ret;
6734 }
6735
6736 /*
6737  * Mutex-protected thunk to flow_dv_apply().
6738  */
6739 static int
6740 flow_d_apply(struct rte_eth_dev *dev,
6741              struct rte_flow *flow,
6742              struct rte_flow_error *error)
6743 {
6744         int ret;
6745
6746         flow_d_shared_lock(dev);
6747         ret = flow_dv_apply(dev, flow, error);
6748         flow_d_shared_unlock(dev);
6749         return ret;
6750 }
6751
6752 /*
6753  * Mutex-protected thunk to flow_dv_remove().
6754  */
6755 static void
6756 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6757 {
6758         flow_d_shared_lock(dev);
6759         flow_dv_remove(dev, flow);
6760         flow_d_shared_unlock(dev);
6761 }
6762
6763 /*
6764  * Mutex-protected thunk to flow_dv_destroy().
6765  */
6766 static void
6767 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6768 {
6769         flow_d_shared_lock(dev);
6770         flow_dv_destroy(dev, flow);
6771         flow_d_shared_unlock(dev);
6772 }
6773
6774 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
6775         .validate = flow_dv_validate,
6776         .prepare = flow_dv_prepare,
6777         .translate = flow_d_translate,
6778         .apply = flow_d_apply,
6779         .remove = flow_d_remove,
6780         .destroy = flow_d_destroy,
6781         .query = flow_dv_query,
6782 };
6783
6784 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */