net/mlx5: improve flow item IP validation
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29 #include <rte_vxlan.h>
30
31 #include "mlx5.h"
32 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #include "mlx5_prm.h"
36 #include "mlx5_rxtx.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 /**
74  * Initialize flow attributes structure according to flow items' types.
75  *
76  * @param[in] item
77  *   Pointer to item specification.
78  * @param[out] attr
79  *   Pointer to flow attributes structure.
80  */
81 static void
82 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
83 {
84         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
85                 switch (item->type) {
86                 case RTE_FLOW_ITEM_TYPE_IPV4:
87                         attr->ipv4 = 1;
88                         break;
89                 case RTE_FLOW_ITEM_TYPE_IPV6:
90                         attr->ipv6 = 1;
91                         break;
92                 case RTE_FLOW_ITEM_TYPE_UDP:
93                         attr->udp = 1;
94                         break;
95                 case RTE_FLOW_ITEM_TYPE_TCP:
96                         attr->tcp = 1;
97                         break;
98                 default:
99                         break;
100                 }
101         }
102         attr->valid = 1;
103 }
104
105 struct field_modify_info {
106         uint32_t size; /* Size of field in protocol header, in bytes. */
107         uint32_t offset; /* Offset of field in protocol header, in bytes. */
108         enum mlx5_modification_field id;
109 };
110
111 struct field_modify_info modify_eth[] = {
112         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
113         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
114         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
115         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
116         {0, 0, 0},
117 };
118
119 struct field_modify_info modify_vlan_out_first_vid[] = {
120         /* Size in bits !!! */
121         {12, 0, MLX5_MODI_OUT_FIRST_VID},
122         {0, 0, 0},
123 };
124
125 struct field_modify_info modify_ipv4[] = {
126         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
127         {4, 12, MLX5_MODI_OUT_SIPV4},
128         {4, 16, MLX5_MODI_OUT_DIPV4},
129         {0, 0, 0},
130 };
131
132 struct field_modify_info modify_ipv6[] = {
133         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
134         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
135         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
136         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
137         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
138         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
139         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
140         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
141         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
142         {0, 0, 0},
143 };
144
145 struct field_modify_info modify_udp[] = {
146         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
147         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
148         {0, 0, 0},
149 };
150
151 struct field_modify_info modify_tcp[] = {
152         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
153         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
154         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
155         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
156         {0, 0, 0},
157 };
158
159 static void
160 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
161                           uint8_t next_protocol, uint64_t *item_flags,
162                           int *tunnel)
163 {
164         assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
165                item->type == RTE_FLOW_ITEM_TYPE_IPV6);
166         if (next_protocol == IPPROTO_IPIP) {
167                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
168                 *tunnel = 1;
169         }
170         if (next_protocol == IPPROTO_IPV6) {
171                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
172                 *tunnel = 1;
173         }
174 }
175
176 /**
177  * Acquire the synchronizing object to protect multithreaded access
178  * to shared dv context. Lock occurs only if context is actually
179  * shared, i.e. we have multiport IB device and representors are
180  * created.
181  *
182  * @param[in] dev
183  *   Pointer to the rte_eth_dev structure.
184  */
185 static void
186 flow_d_shared_lock(struct rte_eth_dev *dev)
187 {
188         struct mlx5_priv *priv = dev->data->dev_private;
189         struct mlx5_ibv_shared *sh = priv->sh;
190
191         if (sh->dv_refcnt > 1) {
192                 int ret;
193
194                 ret = pthread_mutex_lock(&sh->dv_mutex);
195                 assert(!ret);
196                 (void)ret;
197         }
198 }
199
200 static void
201 flow_d_shared_unlock(struct rte_eth_dev *dev)
202 {
203         struct mlx5_priv *priv = dev->data->dev_private;
204         struct mlx5_ibv_shared *sh = priv->sh;
205
206         if (sh->dv_refcnt > 1) {
207                 int ret;
208
209                 ret = pthread_mutex_unlock(&sh->dv_mutex);
210                 assert(!ret);
211                 (void)ret;
212         }
213 }
214
215 /* Update VLAN's VID/PCP based on input rte_flow_action.
216  *
217  * @param[in] action
218  *   Pointer to struct rte_flow_action.
219  * @param[out] vlan
220  *   Pointer to struct rte_vlan_hdr.
221  */
222 static void
223 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
224                          struct rte_vlan_hdr *vlan)
225 {
226         uint16_t vlan_tci;
227         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
228                 vlan_tci =
229                     ((const struct rte_flow_action_of_set_vlan_pcp *)
230                                                action->conf)->vlan_pcp;
231                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
232                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
233                 vlan->vlan_tci |= vlan_tci;
234         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
235                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
236                 vlan->vlan_tci |= rte_be_to_cpu_16
237                     (((const struct rte_flow_action_of_set_vlan_vid *)
238                                              action->conf)->vlan_vid);
239         }
240 }
241
242 /**
243  * Convert modify-header action to DV specification.
244  *
245  * @param[in] item
246  *   Pointer to item specification.
247  * @param[in] field
248  *   Pointer to field modification information.
249  * @param[in,out] resource
250  *   Pointer to the modify-header resource.
251  * @param[in] type
252  *   Type of modification.
253  * @param[out] error
254  *   Pointer to the error structure.
255  *
256  * @return
257  *   0 on success, a negative errno value otherwise and rte_errno is set.
258  */
259 static int
260 flow_dv_convert_modify_action(struct rte_flow_item *item,
261                               struct field_modify_info *field,
262                               struct mlx5_flow_dv_modify_hdr_resource *resource,
263                               uint32_t type,
264                               struct rte_flow_error *error)
265 {
266         uint32_t i = resource->actions_num;
267         struct mlx5_modification_cmd *actions = resource->actions;
268         const uint8_t *spec = item->spec;
269         const uint8_t *mask = item->mask;
270         uint32_t set;
271
272         while (field->size) {
273                 set = 0;
274                 /* Generate modify command for each mask segment. */
275                 memcpy(&set, &mask[field->offset], field->size);
276                 if (set) {
277                         if (i >= MLX5_MODIFY_NUM)
278                                 return rte_flow_error_set(error, EINVAL,
279                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
280                                          "too many items to modify");
281                         actions[i].action_type = type;
282                         actions[i].field = field->id;
283                         actions[i].length = field->size ==
284                                         4 ? 0 : field->size * 8;
285                         rte_memcpy(&actions[i].data[4 - field->size],
286                                    &spec[field->offset], field->size);
287                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
288                         ++i;
289                 }
290                 if (resource->actions_num != i)
291                         resource->actions_num = i;
292                 field++;
293         }
294         if (!resource->actions_num)
295                 return rte_flow_error_set(error, EINVAL,
296                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
297                                           "invalid modification flow item");
298         return 0;
299 }
300
301 /**
302  * Convert modify-header set IPv4 address action to DV specification.
303  *
304  * @param[in,out] resource
305  *   Pointer to the modify-header resource.
306  * @param[in] action
307  *   Pointer to action specification.
308  * @param[out] error
309  *   Pointer to the error structure.
310  *
311  * @return
312  *   0 on success, a negative errno value otherwise and rte_errno is set.
313  */
314 static int
315 flow_dv_convert_action_modify_ipv4
316                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
317                          const struct rte_flow_action *action,
318                          struct rte_flow_error *error)
319 {
320         const struct rte_flow_action_set_ipv4 *conf =
321                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
322         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
323         struct rte_flow_item_ipv4 ipv4;
324         struct rte_flow_item_ipv4 ipv4_mask;
325
326         memset(&ipv4, 0, sizeof(ipv4));
327         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
328         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
329                 ipv4.hdr.src_addr = conf->ipv4_addr;
330                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
331         } else {
332                 ipv4.hdr.dst_addr = conf->ipv4_addr;
333                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
334         }
335         item.spec = &ipv4;
336         item.mask = &ipv4_mask;
337         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
338                                              MLX5_MODIFICATION_TYPE_SET, error);
339 }
340
341 /**
342  * Convert modify-header set IPv6 address action to DV specification.
343  *
344  * @param[in,out] resource
345  *   Pointer to the modify-header resource.
346  * @param[in] action
347  *   Pointer to action specification.
348  * @param[out] error
349  *   Pointer to the error structure.
350  *
351  * @return
352  *   0 on success, a negative errno value otherwise and rte_errno is set.
353  */
354 static int
355 flow_dv_convert_action_modify_ipv6
356                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
357                          const struct rte_flow_action *action,
358                          struct rte_flow_error *error)
359 {
360         const struct rte_flow_action_set_ipv6 *conf =
361                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
362         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
363         struct rte_flow_item_ipv6 ipv6;
364         struct rte_flow_item_ipv6 ipv6_mask;
365
366         memset(&ipv6, 0, sizeof(ipv6));
367         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
368         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
369                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
370                        sizeof(ipv6.hdr.src_addr));
371                 memcpy(&ipv6_mask.hdr.src_addr,
372                        &rte_flow_item_ipv6_mask.hdr.src_addr,
373                        sizeof(ipv6.hdr.src_addr));
374         } else {
375                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
376                        sizeof(ipv6.hdr.dst_addr));
377                 memcpy(&ipv6_mask.hdr.dst_addr,
378                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
379                        sizeof(ipv6.hdr.dst_addr));
380         }
381         item.spec = &ipv6;
382         item.mask = &ipv6_mask;
383         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
384                                              MLX5_MODIFICATION_TYPE_SET, error);
385 }
386
387 /**
388  * Convert modify-header set MAC address action to DV specification.
389  *
390  * @param[in,out] resource
391  *   Pointer to the modify-header resource.
392  * @param[in] action
393  *   Pointer to action specification.
394  * @param[out] error
395  *   Pointer to the error structure.
396  *
397  * @return
398  *   0 on success, a negative errno value otherwise and rte_errno is set.
399  */
400 static int
401 flow_dv_convert_action_modify_mac
402                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
403                          const struct rte_flow_action *action,
404                          struct rte_flow_error *error)
405 {
406         const struct rte_flow_action_set_mac *conf =
407                 (const struct rte_flow_action_set_mac *)(action->conf);
408         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
409         struct rte_flow_item_eth eth;
410         struct rte_flow_item_eth eth_mask;
411
412         memset(&eth, 0, sizeof(eth));
413         memset(&eth_mask, 0, sizeof(eth_mask));
414         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
415                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
416                        sizeof(eth.src.addr_bytes));
417                 memcpy(&eth_mask.src.addr_bytes,
418                        &rte_flow_item_eth_mask.src.addr_bytes,
419                        sizeof(eth_mask.src.addr_bytes));
420         } else {
421                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
422                        sizeof(eth.dst.addr_bytes));
423                 memcpy(&eth_mask.dst.addr_bytes,
424                        &rte_flow_item_eth_mask.dst.addr_bytes,
425                        sizeof(eth_mask.dst.addr_bytes));
426         }
427         item.spec = &eth;
428         item.mask = &eth_mask;
429         return flow_dv_convert_modify_action(&item, modify_eth, resource,
430                                              MLX5_MODIFICATION_TYPE_SET, error);
431 }
432
433 /**
434  * Convert modify-header set VLAN VID action to DV specification.
435  *
436  * @param[in,out] resource
437  *   Pointer to the modify-header resource.
438  * @param[in] action
439  *   Pointer to action specification.
440  * @param[out] error
441  *   Pointer to the error structure.
442  *
443  * @return
444  *   0 on success, a negative errno value otherwise and rte_errno is set.
445  */
446 static int
447 flow_dv_convert_action_modify_vlan_vid
448                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
449                          const struct rte_flow_action *action,
450                          struct rte_flow_error *error)
451 {
452         const struct rte_flow_action_of_set_vlan_vid *conf =
453                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
454         int i = resource->actions_num;
455         struct mlx5_modification_cmd *actions = &resource->actions[i];
456         struct field_modify_info *field = modify_vlan_out_first_vid;
457
458         if (i >= MLX5_MODIFY_NUM)
459                 return rte_flow_error_set(error, EINVAL,
460                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
461                          "too many items to modify");
462         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
463         actions[i].field = field->id;
464         actions[i].length = field->size;
465         actions[i].offset = field->offset;
466         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
467         actions[i].data1 = conf->vlan_vid;
468         actions[i].data1 = actions[i].data1 << 16;
469         resource->actions_num = ++i;
470         return 0;
471 }
472
473 /**
474  * Convert modify-header set TP action to DV specification.
475  *
476  * @param[in,out] resource
477  *   Pointer to the modify-header resource.
478  * @param[in] action
479  *   Pointer to action specification.
480  * @param[in] items
481  *   Pointer to rte_flow_item objects list.
482  * @param[in] attr
483  *   Pointer to flow attributes structure.
484  * @param[out] error
485  *   Pointer to the error structure.
486  *
487  * @return
488  *   0 on success, a negative errno value otherwise and rte_errno is set.
489  */
490 static int
491 flow_dv_convert_action_modify_tp
492                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
493                          const struct rte_flow_action *action,
494                          const struct rte_flow_item *items,
495                          union flow_dv_attr *attr,
496                          struct rte_flow_error *error)
497 {
498         const struct rte_flow_action_set_tp *conf =
499                 (const struct rte_flow_action_set_tp *)(action->conf);
500         struct rte_flow_item item;
501         struct rte_flow_item_udp udp;
502         struct rte_flow_item_udp udp_mask;
503         struct rte_flow_item_tcp tcp;
504         struct rte_flow_item_tcp tcp_mask;
505         struct field_modify_info *field;
506
507         if (!attr->valid)
508                 flow_dv_attr_init(items, attr);
509         if (attr->udp) {
510                 memset(&udp, 0, sizeof(udp));
511                 memset(&udp_mask, 0, sizeof(udp_mask));
512                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
513                         udp.hdr.src_port = conf->port;
514                         udp_mask.hdr.src_port =
515                                         rte_flow_item_udp_mask.hdr.src_port;
516                 } else {
517                         udp.hdr.dst_port = conf->port;
518                         udp_mask.hdr.dst_port =
519                                         rte_flow_item_udp_mask.hdr.dst_port;
520                 }
521                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
522                 item.spec = &udp;
523                 item.mask = &udp_mask;
524                 field = modify_udp;
525         }
526         if (attr->tcp) {
527                 memset(&tcp, 0, sizeof(tcp));
528                 memset(&tcp_mask, 0, sizeof(tcp_mask));
529                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
530                         tcp.hdr.src_port = conf->port;
531                         tcp_mask.hdr.src_port =
532                                         rte_flow_item_tcp_mask.hdr.src_port;
533                 } else {
534                         tcp.hdr.dst_port = conf->port;
535                         tcp_mask.hdr.dst_port =
536                                         rte_flow_item_tcp_mask.hdr.dst_port;
537                 }
538                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
539                 item.spec = &tcp;
540                 item.mask = &tcp_mask;
541                 field = modify_tcp;
542         }
543         return flow_dv_convert_modify_action(&item, field, resource,
544                                              MLX5_MODIFICATION_TYPE_SET, error);
545 }
546
547 /**
548  * Convert modify-header set TTL action to DV specification.
549  *
550  * @param[in,out] resource
551  *   Pointer to the modify-header resource.
552  * @param[in] action
553  *   Pointer to action specification.
554  * @param[in] items
555  *   Pointer to rte_flow_item objects list.
556  * @param[in] attr
557  *   Pointer to flow attributes structure.
558  * @param[out] error
559  *   Pointer to the error structure.
560  *
561  * @return
562  *   0 on success, a negative errno value otherwise and rte_errno is set.
563  */
564 static int
565 flow_dv_convert_action_modify_ttl
566                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
567                          const struct rte_flow_action *action,
568                          const struct rte_flow_item *items,
569                          union flow_dv_attr *attr,
570                          struct rte_flow_error *error)
571 {
572         const struct rte_flow_action_set_ttl *conf =
573                 (const struct rte_flow_action_set_ttl *)(action->conf);
574         struct rte_flow_item item;
575         struct rte_flow_item_ipv4 ipv4;
576         struct rte_flow_item_ipv4 ipv4_mask;
577         struct rte_flow_item_ipv6 ipv6;
578         struct rte_flow_item_ipv6 ipv6_mask;
579         struct field_modify_info *field;
580
581         if (!attr->valid)
582                 flow_dv_attr_init(items, attr);
583         if (attr->ipv4) {
584                 memset(&ipv4, 0, sizeof(ipv4));
585                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
586                 ipv4.hdr.time_to_live = conf->ttl_value;
587                 ipv4_mask.hdr.time_to_live = 0xFF;
588                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
589                 item.spec = &ipv4;
590                 item.mask = &ipv4_mask;
591                 field = modify_ipv4;
592         }
593         if (attr->ipv6) {
594                 memset(&ipv6, 0, sizeof(ipv6));
595                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
596                 ipv6.hdr.hop_limits = conf->ttl_value;
597                 ipv6_mask.hdr.hop_limits = 0xFF;
598                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
599                 item.spec = &ipv6;
600                 item.mask = &ipv6_mask;
601                 field = modify_ipv6;
602         }
603         return flow_dv_convert_modify_action(&item, field, resource,
604                                              MLX5_MODIFICATION_TYPE_SET, error);
605 }
606
607 /**
608  * Convert modify-header decrement TTL action to DV specification.
609  *
610  * @param[in,out] resource
611  *   Pointer to the modify-header resource.
612  * @param[in] action
613  *   Pointer to action specification.
614  * @param[in] items
615  *   Pointer to rte_flow_item objects list.
616  * @param[in] attr
617  *   Pointer to flow attributes structure.
618  * @param[out] error
619  *   Pointer to the error structure.
620  *
621  * @return
622  *   0 on success, a negative errno value otherwise and rte_errno is set.
623  */
624 static int
625 flow_dv_convert_action_modify_dec_ttl
626                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
627                          const struct rte_flow_item *items,
628                          union flow_dv_attr *attr,
629                          struct rte_flow_error *error)
630 {
631         struct rte_flow_item item;
632         struct rte_flow_item_ipv4 ipv4;
633         struct rte_flow_item_ipv4 ipv4_mask;
634         struct rte_flow_item_ipv6 ipv6;
635         struct rte_flow_item_ipv6 ipv6_mask;
636         struct field_modify_info *field;
637
638         if (!attr->valid)
639                 flow_dv_attr_init(items, attr);
640         if (attr->ipv4) {
641                 memset(&ipv4, 0, sizeof(ipv4));
642                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
643                 ipv4.hdr.time_to_live = 0xFF;
644                 ipv4_mask.hdr.time_to_live = 0xFF;
645                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
646                 item.spec = &ipv4;
647                 item.mask = &ipv4_mask;
648                 field = modify_ipv4;
649         }
650         if (attr->ipv6) {
651                 memset(&ipv6, 0, sizeof(ipv6));
652                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
653                 ipv6.hdr.hop_limits = 0xFF;
654                 ipv6_mask.hdr.hop_limits = 0xFF;
655                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
656                 item.spec = &ipv6;
657                 item.mask = &ipv6_mask;
658                 field = modify_ipv6;
659         }
660         return flow_dv_convert_modify_action(&item, field, resource,
661                                              MLX5_MODIFICATION_TYPE_ADD, error);
662 }
663
664 /**
665  * Convert modify-header increment/decrement TCP Sequence number
666  * to DV specification.
667  *
668  * @param[in,out] resource
669  *   Pointer to the modify-header resource.
670  * @param[in] action
671  *   Pointer to action specification.
672  * @param[out] error
673  *   Pointer to the error structure.
674  *
675  * @return
676  *   0 on success, a negative errno value otherwise and rte_errno is set.
677  */
678 static int
679 flow_dv_convert_action_modify_tcp_seq
680                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
681                          const struct rte_flow_action *action,
682                          struct rte_flow_error *error)
683 {
684         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
685         uint64_t value = rte_be_to_cpu_32(*conf);
686         struct rte_flow_item item;
687         struct rte_flow_item_tcp tcp;
688         struct rte_flow_item_tcp tcp_mask;
689
690         memset(&tcp, 0, sizeof(tcp));
691         memset(&tcp_mask, 0, sizeof(tcp_mask));
692         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
693                 /*
694                  * The HW has no decrement operation, only increment operation.
695                  * To simulate decrement X from Y using increment operation
696                  * we need to add UINT32_MAX X times to Y.
697                  * Each adding of UINT32_MAX decrements Y by 1.
698                  */
699                 value *= UINT32_MAX;
700         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
701         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
702         item.type = RTE_FLOW_ITEM_TYPE_TCP;
703         item.spec = &tcp;
704         item.mask = &tcp_mask;
705         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
706                                              MLX5_MODIFICATION_TYPE_ADD, error);
707 }
708
709 /**
710  * Convert modify-header increment/decrement TCP Acknowledgment number
711  * to DV specification.
712  *
713  * @param[in,out] resource
714  *   Pointer to the modify-header resource.
715  * @param[in] action
716  *   Pointer to action specification.
717  * @param[out] error
718  *   Pointer to the error structure.
719  *
720  * @return
721  *   0 on success, a negative errno value otherwise and rte_errno is set.
722  */
723 static int
724 flow_dv_convert_action_modify_tcp_ack
725                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
726                          const struct rte_flow_action *action,
727                          struct rte_flow_error *error)
728 {
729         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
730         uint64_t value = rte_be_to_cpu_32(*conf);
731         struct rte_flow_item item;
732         struct rte_flow_item_tcp tcp;
733         struct rte_flow_item_tcp tcp_mask;
734
735         memset(&tcp, 0, sizeof(tcp));
736         memset(&tcp_mask, 0, sizeof(tcp_mask));
737         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
738                 /*
739                  * The HW has no decrement operation, only increment operation.
740                  * To simulate decrement X from Y using increment operation
741                  * we need to add UINT32_MAX X times to Y.
742                  * Each adding of UINT32_MAX decrements Y by 1.
743                  */
744                 value *= UINT32_MAX;
745         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
746         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
747         item.type = RTE_FLOW_ITEM_TYPE_TCP;
748         item.spec = &tcp;
749         item.mask = &tcp_mask;
750         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
751                                              MLX5_MODIFICATION_TYPE_ADD, error);
752 }
753
754 static enum mlx5_modification_field reg_to_field[] = {
755         [REG_A] = MLX5_MODI_META_DATA_REG_A,
756         [REG_B] = MLX5_MODI_META_DATA_REG_B,
757         [REG_C_0] = MLX5_MODI_META_REG_C_0,
758         [REG_C_1] = MLX5_MODI_META_REG_C_1,
759         [REG_C_2] = MLX5_MODI_META_REG_C_2,
760         [REG_C_3] = MLX5_MODI_META_REG_C_3,
761         [REG_C_4] = MLX5_MODI_META_REG_C_4,
762         [REG_C_5] = MLX5_MODI_META_REG_C_5,
763         [REG_C_6] = MLX5_MODI_META_REG_C_6,
764         [REG_C_7] = MLX5_MODI_META_REG_C_7,
765 };
766
767 /**
768  * Convert register set to DV specification.
769  *
770  * @param[in,out] resource
771  *   Pointer to the modify-header resource.
772  * @param[in] action
773  *   Pointer to action specification.
774  * @param[out] error
775  *   Pointer to the error structure.
776  *
777  * @return
778  *   0 on success, a negative errno value otherwise and rte_errno is set.
779  */
780 static int
781 flow_dv_convert_action_set_reg
782                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
783                          const struct rte_flow_action *action,
784                          struct rte_flow_error *error)
785 {
786         const struct mlx5_rte_flow_action_set_tag *conf = (action->conf);
787         struct mlx5_modification_cmd *actions = resource->actions;
788         uint32_t i = resource->actions_num;
789
790         if (i >= MLX5_MODIFY_NUM)
791                 return rte_flow_error_set(error, EINVAL,
792                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
793                                           "too many items to modify");
794         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
795         actions[i].field = reg_to_field[conf->id];
796         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
797         actions[i].data1 = conf->data;
798         ++i;
799         resource->actions_num = i;
800         if (!resource->actions_num)
801                 return rte_flow_error_set(error, EINVAL,
802                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
803                                           "invalid modification flow item");
804         return 0;
805 }
806
807 /**
808  * Validate META item.
809  *
810  * @param[in] dev
811  *   Pointer to the rte_eth_dev structure.
812  * @param[in] item
813  *   Item specification.
814  * @param[in] attr
815  *   Attributes of flow that includes this item.
816  * @param[out] error
817  *   Pointer to error structure.
818  *
819  * @return
820  *   0 on success, a negative errno value otherwise and rte_errno is set.
821  */
822 static int
823 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
824                            const struct rte_flow_item *item,
825                            const struct rte_flow_attr *attr,
826                            struct rte_flow_error *error)
827 {
828         const struct rte_flow_item_meta *spec = item->spec;
829         const struct rte_flow_item_meta *mask = item->mask;
830         const struct rte_flow_item_meta nic_mask = {
831                 .data = UINT32_MAX
832         };
833         int ret;
834
835         if (!spec)
836                 return rte_flow_error_set(error, EINVAL,
837                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
838                                           item->spec,
839                                           "data cannot be empty");
840         if (!spec->data)
841                 return rte_flow_error_set(error, EINVAL,
842                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
843                                           NULL,
844                                           "data cannot be zero");
845         if (!mask)
846                 mask = &rte_flow_item_meta_mask;
847         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
848                                         (const uint8_t *)&nic_mask,
849                                         sizeof(struct rte_flow_item_meta),
850                                         error);
851         if (ret < 0)
852                 return ret;
853         if (attr->ingress)
854                 return rte_flow_error_set(error, ENOTSUP,
855                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
856                                           NULL,
857                                           "pattern not supported for ingress");
858         return 0;
859 }
860
861 /**
862  * Validate vport item.
863  *
864  * @param[in] dev
865  *   Pointer to the rte_eth_dev structure.
866  * @param[in] item
867  *   Item specification.
868  * @param[in] attr
869  *   Attributes of flow that includes this item.
870  * @param[in] item_flags
871  *   Bit-fields that holds the items detected until now.
872  * @param[out] error
873  *   Pointer to error structure.
874  *
875  * @return
876  *   0 on success, a negative errno value otherwise and rte_errno is set.
877  */
878 static int
879 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
880                               const struct rte_flow_item *item,
881                               const struct rte_flow_attr *attr,
882                               uint64_t item_flags,
883                               struct rte_flow_error *error)
884 {
885         const struct rte_flow_item_port_id *spec = item->spec;
886         const struct rte_flow_item_port_id *mask = item->mask;
887         const struct rte_flow_item_port_id switch_mask = {
888                         .id = 0xffffffff,
889         };
890         struct mlx5_priv *esw_priv;
891         struct mlx5_priv *dev_priv;
892         int ret;
893
894         if (!attr->transfer)
895                 return rte_flow_error_set(error, EINVAL,
896                                           RTE_FLOW_ERROR_TYPE_ITEM,
897                                           NULL,
898                                           "match on port id is valid only"
899                                           " when transfer flag is enabled");
900         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
901                 return rte_flow_error_set(error, ENOTSUP,
902                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
903                                           "multiple source ports are not"
904                                           " supported");
905         if (!mask)
906                 mask = &switch_mask;
907         if (mask->id != 0xffffffff)
908                 return rte_flow_error_set(error, ENOTSUP,
909                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
910                                            mask,
911                                            "no support for partial mask on"
912                                            " \"id\" field");
913         ret = mlx5_flow_item_acceptable
914                                 (item, (const uint8_t *)mask,
915                                  (const uint8_t *)&rte_flow_item_port_id_mask,
916                                  sizeof(struct rte_flow_item_port_id),
917                                  error);
918         if (ret)
919                 return ret;
920         if (!spec)
921                 return 0;
922         esw_priv = mlx5_port_to_eswitch_info(spec->id);
923         if (!esw_priv)
924                 return rte_flow_error_set(error, rte_errno,
925                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
926                                           "failed to obtain E-Switch info for"
927                                           " port");
928         dev_priv = mlx5_dev_to_eswitch_info(dev);
929         if (!dev_priv)
930                 return rte_flow_error_set(error, rte_errno,
931                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
932                                           NULL,
933                                           "failed to obtain E-Switch info");
934         if (esw_priv->domain_id != dev_priv->domain_id)
935                 return rte_flow_error_set(error, EINVAL,
936                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
937                                           "cannot match on a port from a"
938                                           " different E-Switch");
939         return 0;
940 }
941
942 /**
943  * Validate the pop VLAN action.
944  *
945  * @param[in] dev
946  *   Pointer to the rte_eth_dev structure.
947  * @param[in] action_flags
948  *   Holds the actions detected until now.
949  * @param[in] action
950  *   Pointer to the pop vlan action.
951  * @param[in] item_flags
952  *   The items found in this flow rule.
953  * @param[in] attr
954  *   Pointer to flow attributes.
955  * @param[out] error
956  *   Pointer to error structure.
957  *
958  * @return
959  *   0 on success, a negative errno value otherwise and rte_errno is set.
960  */
961 static int
962 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
963                                  uint64_t action_flags,
964                                  const struct rte_flow_action *action,
965                                  uint64_t item_flags,
966                                  const struct rte_flow_attr *attr,
967                                  struct rte_flow_error *error)
968 {
969         struct mlx5_priv *priv = dev->data->dev_private;
970
971         (void)action;
972         (void)attr;
973         if (!priv->sh->pop_vlan_action)
974                 return rte_flow_error_set(error, ENOTSUP,
975                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
976                                           NULL,
977                                           "pop vlan action is not supported");
978         /*
979          * Check for inconsistencies:
980          *  fail strip_vlan in a flow that matches packets without VLAN tags.
981          *  fail strip_vlan in a flow that matches packets without explicitly a
982          *  matching on VLAN tag ?
983          */
984         if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
985                 return rte_flow_error_set(error, ENOTSUP,
986                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
987                                           NULL,
988                                           "no support for multiple vlan pop "
989                                           "actions");
990         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
991                 return rte_flow_error_set(error, ENOTSUP,
992                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
993                                           NULL,
994                                           "cannot pop vlan without a "
995                                           "match on (outer) vlan in the flow");
996         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
997                 return rte_flow_error_set(error, EINVAL,
998                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
999                                           "wrong action order, port_id should "
1000                                           "be after pop VLAN action");
1001         return 0;
1002 }
1003
1004 /**
1005  * Get VLAN default info from vlan match info.
1006  *
1007  * @param[in] dev
1008  *   Pointer to the rte_eth_dev structure.
1009  * @param[in] item
1010  *   the list of item specifications.
1011  * @param[out] vlan
1012  *   pointer VLAN info to fill to.
1013  * @param[out] error
1014  *   Pointer to error structure.
1015  *
1016  * @return
1017  *   0 on success, a negative errno value otherwise and rte_errno is set.
1018  */
1019 static void
1020 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1021                                   struct rte_vlan_hdr *vlan)
1022 {
1023         const struct rte_flow_item_vlan nic_mask = {
1024                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1025                                 MLX5DV_FLOW_VLAN_VID_MASK),
1026                 .inner_type = RTE_BE16(0xffff),
1027         };
1028
1029         if (items == NULL)
1030                 return;
1031         for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
1032                items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
1033                 ;
1034         if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1035                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1036                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1037
1038                 if (!vlan_m)
1039                         vlan_m = &nic_mask;
1040                 /* Only full match values are accepted */
1041                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1042                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1043                         vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
1044                         vlan->vlan_tci |=
1045                                 rte_be_to_cpu_16(vlan_v->tci &
1046                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1047                 }
1048                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1049                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1050                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1051                         vlan->vlan_tci |=
1052                                 rte_be_to_cpu_16(vlan_v->tci &
1053                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1054                 }
1055                 if (vlan_m->inner_type == nic_mask.inner_type)
1056                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1057                                                            vlan_m->inner_type);
1058         }
1059 }
1060
1061 /**
1062  * Validate the push VLAN action.
1063  *
1064  * @param[in] action_flags
1065  *   Holds the actions detected until now.
1066  * @param[in] action
1067  *   Pointer to the encap action.
1068  * @param[in] attr
1069  *   Pointer to flow attributes
1070  * @param[out] error
1071  *   Pointer to error structure.
1072  *
1073  * @return
1074  *   0 on success, a negative errno value otherwise and rte_errno is set.
1075  */
1076 static int
1077 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1078                                   uint64_t item_flags,
1079                                   const struct rte_flow_action *action,
1080                                   const struct rte_flow_attr *attr,
1081                                   struct rte_flow_error *error)
1082 {
1083         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1084
1085         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1086             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1087                 return rte_flow_error_set(error, EINVAL,
1088                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1089                                           "invalid vlan ethertype");
1090         if (action_flags &
1091                 (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1092                 return rte_flow_error_set(error, ENOTSUP,
1093                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1094                                           "no support for multiple VLAN "
1095                                           "actions");
1096         if (!mlx5_flow_find_action
1097                         (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) &&
1098             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1099                 return rte_flow_error_set(error, ENOTSUP,
1100                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
1101                                 "push VLAN needs to match on VLAN in order to "
1102                                 "get VLAN VID information because there is "
1103                                 "no followed set VLAN VID action");
1104         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1105                 return rte_flow_error_set(error, EINVAL,
1106                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1107                                           "wrong action order, port_id should "
1108                                           "be after push VLAN");
1109         (void)attr;
1110         return 0;
1111 }
1112
1113 /**
1114  * Validate the set VLAN PCP.
1115  *
1116  * @param[in] action_flags
1117  *   Holds the actions detected until now.
1118  * @param[in] actions
1119  *   Pointer to the list of actions remaining in the flow rule.
1120  * @param[in] attr
1121  *   Pointer to flow attributes
1122  * @param[out] error
1123  *   Pointer to error structure.
1124  *
1125  * @return
1126  *   0 on success, a negative errno value otherwise and rte_errno is set.
1127  */
1128 static int
1129 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1130                                      const struct rte_flow_action actions[],
1131                                      struct rte_flow_error *error)
1132 {
1133         const struct rte_flow_action *action = actions;
1134         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1135
1136         if (conf->vlan_pcp > 7)
1137                 return rte_flow_error_set(error, EINVAL,
1138                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1139                                           "VLAN PCP value is too big");
1140         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1141                 return rte_flow_error_set(error, ENOTSUP,
1142                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1143                                           "set VLAN PCP action must follow "
1144                                           "the push VLAN action");
1145         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1146                 return rte_flow_error_set(error, ENOTSUP,
1147                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1148                                           "Multiple VLAN PCP modification are "
1149                                           "not supported");
1150         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1151                 return rte_flow_error_set(error, EINVAL,
1152                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1153                                           "wrong action order, port_id should "
1154                                           "be after set VLAN PCP");
1155         return 0;
1156 }
1157
1158 /**
1159  * Validate the set VLAN VID.
1160  *
1161  * @param[in] item_flags
1162  *   Holds the items detected in this rule.
1163  * @param[in] actions
1164  *   Pointer to the list of actions remaining in the flow rule.
1165  * @param[in] attr
1166  *   Pointer to flow attributes
1167  * @param[out] error
1168  *   Pointer to error structure.
1169  *
1170  * @return
1171  *   0 on success, a negative errno value otherwise and rte_errno is set.
1172  */
1173 static int
1174 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1175                                      uint64_t action_flags,
1176                                      const struct rte_flow_action actions[],
1177                                      struct rte_flow_error *error)
1178 {
1179         const struct rte_flow_action *action = actions;
1180         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1181
1182         if (conf->vlan_vid > RTE_BE16(0xFFE))
1183                 return rte_flow_error_set(error, EINVAL,
1184                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1185                                           "VLAN VID value is too big");
1186         /* there is an of_push_vlan action before us */
1187         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
1188                 if (mlx5_flow_find_action(actions + 1,
1189                                           RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1190                         return rte_flow_error_set(error, ENOTSUP,
1191                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
1192                                         "Multiple VLAN VID modifications are "
1193                                         "not supported");
1194                 else
1195                         return 0;
1196         }
1197
1198         /*
1199          * Action is on an existing VLAN header:
1200          *    Need to verify this is a single modify CID action.
1201          *   Rule mast include a match on outer VLAN.
1202          */
1203         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1204                 return rte_flow_error_set(error, ENOTSUP,
1205                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1206                                           "Multiple VLAN VID modifications are "
1207                                           "not supported");
1208         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1209                 return rte_flow_error_set(error, EINVAL,
1210                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1211                                           "match on VLAN is required in order "
1212                                           "to set VLAN VID");
1213         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1214                 return rte_flow_error_set(error, EINVAL,
1215                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1216                                           "wrong action order, port_id should "
1217                                           "be after set VLAN VID");
1218         return 0;
1219 }
1220
1221 /**
1222  * Validate count action.
1223  *
1224  * @param[in] dev
1225  *   device otr.
1226  * @param[out] error
1227  *   Pointer to error structure.
1228  *
1229  * @return
1230  *   0 on success, a negative errno value otherwise and rte_errno is set.
1231  */
1232 static int
1233 flow_dv_validate_action_count(struct rte_eth_dev *dev,
1234                               struct rte_flow_error *error)
1235 {
1236         struct mlx5_priv *priv = dev->data->dev_private;
1237
1238         if (!priv->config.devx)
1239                 goto notsup_err;
1240 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
1241         return 0;
1242 #endif
1243 notsup_err:
1244         return rte_flow_error_set
1245                       (error, ENOTSUP,
1246                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1247                        NULL,
1248                        "count action not supported");
1249 }
1250
1251 /**
1252  * Validate the L2 encap action.
1253  *
1254  * @param[in] action_flags
1255  *   Holds the actions detected until now.
1256  * @param[in] action
1257  *   Pointer to the encap action.
1258  * @param[in] attr
1259  *   Pointer to flow attributes
1260  * @param[out] error
1261  *   Pointer to error structure.
1262  *
1263  * @return
1264  *   0 on success, a negative errno value otherwise and rte_errno is set.
1265  */
1266 static int
1267 flow_dv_validate_action_l2_encap(uint64_t action_flags,
1268                                  const struct rte_flow_action *action,
1269                                  const struct rte_flow_attr *attr,
1270                                  struct rte_flow_error *error)
1271 {
1272         if (!(action->conf))
1273                 return rte_flow_error_set(error, EINVAL,
1274                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1275                                           "configuration cannot be null");
1276         if (action_flags & MLX5_FLOW_ACTION_DROP)
1277                 return rte_flow_error_set(error, EINVAL,
1278                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1279                                           "can't drop and encap in same flow");
1280         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1281                 return rte_flow_error_set(error, EINVAL,
1282                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1283                                           "can only have a single encap or"
1284                                           " decap action in a flow");
1285         if (!attr->transfer && attr->ingress)
1286                 return rte_flow_error_set(error, ENOTSUP,
1287                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1288                                           NULL,
1289                                           "encap action not supported for "
1290                                           "ingress");
1291         return 0;
1292 }
1293
1294 /**
1295  * Validate the L2 decap action.
1296  *
1297  * @param[in] action_flags
1298  *   Holds the actions detected until now.
1299  * @param[in] attr
1300  *   Pointer to flow attributes
1301  * @param[out] error
1302  *   Pointer to error structure.
1303  *
1304  * @return
1305  *   0 on success, a negative errno value otherwise and rte_errno is set.
1306  */
1307 static int
1308 flow_dv_validate_action_l2_decap(uint64_t action_flags,
1309                                  const struct rte_flow_attr *attr,
1310                                  struct rte_flow_error *error)
1311 {
1312         if (action_flags & MLX5_FLOW_ACTION_DROP)
1313                 return rte_flow_error_set(error, EINVAL,
1314                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1315                                           "can't drop and decap in same flow");
1316         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1317                 return rte_flow_error_set(error, EINVAL,
1318                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1319                                           "can only have a single encap or"
1320                                           " decap action in a flow");
1321         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1322                 return rte_flow_error_set(error, EINVAL,
1323                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1324                                           "can't have decap action after"
1325                                           " modify action");
1326         if (attr->egress)
1327                 return rte_flow_error_set(error, ENOTSUP,
1328                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1329                                           NULL,
1330                                           "decap action not supported for "
1331                                           "egress");
1332         return 0;
1333 }
1334
1335 /**
1336  * Validate the raw encap action.
1337  *
1338  * @param[in] action_flags
1339  *   Holds the actions detected until now.
1340  * @param[in] action
1341  *   Pointer to the encap action.
1342  * @param[in] attr
1343  *   Pointer to flow attributes
1344  * @param[out] error
1345  *   Pointer to error structure.
1346  *
1347  * @return
1348  *   0 on success, a negative errno value otherwise and rte_errno is set.
1349  */
1350 static int
1351 flow_dv_validate_action_raw_encap(uint64_t action_flags,
1352                                   const struct rte_flow_action *action,
1353                                   const struct rte_flow_attr *attr,
1354                                   struct rte_flow_error *error)
1355 {
1356         const struct rte_flow_action_raw_encap *raw_encap =
1357                 (const struct rte_flow_action_raw_encap *)action->conf;
1358         if (!(action->conf))
1359                 return rte_flow_error_set(error, EINVAL,
1360                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1361                                           "configuration cannot be null");
1362         if (action_flags & MLX5_FLOW_ACTION_DROP)
1363                 return rte_flow_error_set(error, EINVAL,
1364                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1365                                           "can't drop and encap in same flow");
1366         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1367                 return rte_flow_error_set(error, EINVAL,
1368                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1369                                           "can only have a single encap"
1370                                           " action in a flow");
1371         /* encap without preceding decap is not supported for ingress */
1372         if (!attr->transfer &&  attr->ingress &&
1373             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
1374                 return rte_flow_error_set(error, ENOTSUP,
1375                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1376                                           NULL,
1377                                           "encap action not supported for "
1378                                           "ingress");
1379         if (!raw_encap->size || !raw_encap->data)
1380                 return rte_flow_error_set(error, EINVAL,
1381                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1382                                           "raw encap data cannot be empty");
1383         return 0;
1384 }
1385
1386 /**
1387  * Validate the raw decap action.
1388  *
1389  * @param[in] action_flags
1390  *   Holds the actions detected until now.
1391  * @param[in] action
1392  *   Pointer to the encap action.
1393  * @param[in] attr
1394  *   Pointer to flow attributes
1395  * @param[out] error
1396  *   Pointer to error structure.
1397  *
1398  * @return
1399  *   0 on success, a negative errno value otherwise and rte_errno is set.
1400  */
1401 static int
1402 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1403                                   const struct rte_flow_action *action,
1404                                   const struct rte_flow_attr *attr,
1405                                   struct rte_flow_error *error)
1406 {
1407         if (action_flags & MLX5_FLOW_ACTION_DROP)
1408                 return rte_flow_error_set(error, EINVAL,
1409                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1410                                           "can't drop and decap in same flow");
1411         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1412                 return rte_flow_error_set(error, EINVAL,
1413                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1414                                           "can't have encap action before"
1415                                           " decap action");
1416         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1417                 return rte_flow_error_set(error, EINVAL,
1418                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1419                                           "can only have a single decap"
1420                                           " action in a flow");
1421         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1422                 return rte_flow_error_set(error, EINVAL,
1423                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1424                                           "can't have decap action after"
1425                                           " modify action");
1426         /* decap action is valid on egress only if it is followed by encap */
1427         if (attr->egress) {
1428                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1429                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1430                        action++) {
1431                 }
1432                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1433                         return rte_flow_error_set
1434                                         (error, ENOTSUP,
1435                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1436                                          NULL, "decap action not supported"
1437                                          " for egress");
1438         }
1439         return 0;
1440 }
1441
1442 /**
1443  * Find existing encap/decap resource or create and register a new one.
1444  *
1445  * @param dev[in, out]
1446  *   Pointer to rte_eth_dev structure.
1447  * @param[in, out] resource
1448  *   Pointer to encap/decap resource.
1449  * @parm[in, out] dev_flow
1450  *   Pointer to the dev_flow.
1451  * @param[out] error
1452  *   pointer to error structure.
1453  *
1454  * @return
1455  *   0 on success otherwise -errno and errno is set.
1456  */
1457 static int
1458 flow_dv_encap_decap_resource_register
1459                         (struct rte_eth_dev *dev,
1460                          struct mlx5_flow_dv_encap_decap_resource *resource,
1461                          struct mlx5_flow *dev_flow,
1462                          struct rte_flow_error *error)
1463 {
1464         struct mlx5_priv *priv = dev->data->dev_private;
1465         struct mlx5_ibv_shared *sh = priv->sh;
1466         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1467         struct rte_flow *flow = dev_flow->flow;
1468         struct mlx5dv_dr_domain *domain;
1469
1470         resource->flags = flow->group ? 0 : 1;
1471         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1472                 domain = sh->fdb_domain;
1473         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1474                 domain = sh->rx_domain;
1475         else
1476                 domain = sh->tx_domain;
1477
1478         /* Lookup a matching resource from cache. */
1479         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1480                 if (resource->reformat_type == cache_resource->reformat_type &&
1481                     resource->ft_type == cache_resource->ft_type &&
1482                     resource->flags == cache_resource->flags &&
1483                     resource->size == cache_resource->size &&
1484                     !memcmp((const void *)resource->buf,
1485                             (const void *)cache_resource->buf,
1486                             resource->size)) {
1487                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1488                                 (void *)cache_resource,
1489                                 rte_atomic32_read(&cache_resource->refcnt));
1490                         rte_atomic32_inc(&cache_resource->refcnt);
1491                         dev_flow->dv.encap_decap = cache_resource;
1492                         return 0;
1493                 }
1494         }
1495         /* Register new encap/decap resource. */
1496         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1497         if (!cache_resource)
1498                 return rte_flow_error_set(error, ENOMEM,
1499                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1500                                           "cannot allocate resource memory");
1501         *cache_resource = *resource;
1502         cache_resource->verbs_action =
1503                 mlx5_glue->dv_create_flow_action_packet_reformat
1504                         (sh->ctx, cache_resource->reformat_type,
1505                          cache_resource->ft_type, domain, cache_resource->flags,
1506                          cache_resource->size,
1507                          (cache_resource->size ? cache_resource->buf : NULL));
1508         if (!cache_resource->verbs_action) {
1509                 rte_free(cache_resource);
1510                 return rte_flow_error_set(error, ENOMEM,
1511                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1512                                           NULL, "cannot create action");
1513         }
1514         rte_atomic32_init(&cache_resource->refcnt);
1515         rte_atomic32_inc(&cache_resource->refcnt);
1516         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1517         dev_flow->dv.encap_decap = cache_resource;
1518         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1519                 (void *)cache_resource,
1520                 rte_atomic32_read(&cache_resource->refcnt));
1521         return 0;
1522 }
1523
1524 /**
1525  * Find existing table jump resource or create and register a new one.
1526  *
1527  * @param dev[in, out]
1528  *   Pointer to rte_eth_dev structure.
1529  * @param[in, out] resource
1530  *   Pointer to jump table resource.
1531  * @parm[in, out] dev_flow
1532  *   Pointer to the dev_flow.
1533  * @param[out] error
1534  *   pointer to error structure.
1535  *
1536  * @return
1537  *   0 on success otherwise -errno and errno is set.
1538  */
1539 static int
1540 flow_dv_jump_tbl_resource_register
1541                         (struct rte_eth_dev *dev,
1542                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1543                          struct mlx5_flow *dev_flow,
1544                          struct rte_flow_error *error)
1545 {
1546         struct mlx5_priv *priv = dev->data->dev_private;
1547         struct mlx5_ibv_shared *sh = priv->sh;
1548         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1549
1550         /* Lookup a matching resource from cache. */
1551         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1552                 if (resource->tbl == cache_resource->tbl) {
1553                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1554                                 (void *)cache_resource,
1555                                 rte_atomic32_read(&cache_resource->refcnt));
1556                         rte_atomic32_inc(&cache_resource->refcnt);
1557                         dev_flow->dv.jump = cache_resource;
1558                         return 0;
1559                 }
1560         }
1561         /* Register new jump table resource. */
1562         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1563         if (!cache_resource)
1564                 return rte_flow_error_set(error, ENOMEM,
1565                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1566                                           "cannot allocate resource memory");
1567         *cache_resource = *resource;
1568         cache_resource->action =
1569                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1570                 (resource->tbl->obj);
1571         if (!cache_resource->action) {
1572                 rte_free(cache_resource);
1573                 return rte_flow_error_set(error, ENOMEM,
1574                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1575                                           NULL, "cannot create action");
1576         }
1577         rte_atomic32_init(&cache_resource->refcnt);
1578         rte_atomic32_inc(&cache_resource->refcnt);
1579         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1580         dev_flow->dv.jump = cache_resource;
1581         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1582                 (void *)cache_resource,
1583                 rte_atomic32_read(&cache_resource->refcnt));
1584         return 0;
1585 }
1586
1587 /**
1588  * Find existing table port ID resource or create and register a new one.
1589  *
1590  * @param dev[in, out]
1591  *   Pointer to rte_eth_dev structure.
1592  * @param[in, out] resource
1593  *   Pointer to port ID action resource.
1594  * @parm[in, out] dev_flow
1595  *   Pointer to the dev_flow.
1596  * @param[out] error
1597  *   pointer to error structure.
1598  *
1599  * @return
1600  *   0 on success otherwise -errno and errno is set.
1601  */
1602 static int
1603 flow_dv_port_id_action_resource_register
1604                         (struct rte_eth_dev *dev,
1605                          struct mlx5_flow_dv_port_id_action_resource *resource,
1606                          struct mlx5_flow *dev_flow,
1607                          struct rte_flow_error *error)
1608 {
1609         struct mlx5_priv *priv = dev->data->dev_private;
1610         struct mlx5_ibv_shared *sh = priv->sh;
1611         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1612
1613         /* Lookup a matching resource from cache. */
1614         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1615                 if (resource->port_id == cache_resource->port_id) {
1616                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1617                                 "refcnt %d++",
1618                                 (void *)cache_resource,
1619                                 rte_atomic32_read(&cache_resource->refcnt));
1620                         rte_atomic32_inc(&cache_resource->refcnt);
1621                         dev_flow->dv.port_id_action = cache_resource;
1622                         return 0;
1623                 }
1624         }
1625         /* Register new port id action resource. */
1626         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1627         if (!cache_resource)
1628                 return rte_flow_error_set(error, ENOMEM,
1629                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1630                                           "cannot allocate resource memory");
1631         *cache_resource = *resource;
1632         cache_resource->action =
1633                 mlx5_glue->dr_create_flow_action_dest_vport
1634                         (priv->sh->fdb_domain, resource->port_id);
1635         if (!cache_resource->action) {
1636                 rte_free(cache_resource);
1637                 return rte_flow_error_set(error, ENOMEM,
1638                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1639                                           NULL, "cannot create action");
1640         }
1641         rte_atomic32_init(&cache_resource->refcnt);
1642         rte_atomic32_inc(&cache_resource->refcnt);
1643         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1644         dev_flow->dv.port_id_action = cache_resource;
1645         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1646                 (void *)cache_resource,
1647                 rte_atomic32_read(&cache_resource->refcnt));
1648         return 0;
1649 }
1650
1651 /**
1652  * Find existing push vlan resource or create and register a new one.
1653  *
1654  * @param dev[in, out]
1655  *   Pointer to rte_eth_dev structure.
1656  * @param[in, out] resource
1657  *   Pointer to port ID action resource.
1658  * @parm[in, out] dev_flow
1659  *   Pointer to the dev_flow.
1660  * @param[out] error
1661  *   pointer to error structure.
1662  *
1663  * @return
1664  *   0 on success otherwise -errno and errno is set.
1665  */
1666 static int
1667 flow_dv_push_vlan_action_resource_register
1668                        (struct rte_eth_dev *dev,
1669                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
1670                         struct mlx5_flow *dev_flow,
1671                         struct rte_flow_error *error)
1672 {
1673         struct mlx5_priv *priv = dev->data->dev_private;
1674         struct mlx5_ibv_shared *sh = priv->sh;
1675         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
1676         struct mlx5dv_dr_domain *domain;
1677
1678         /* Lookup a matching resource from cache. */
1679         LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
1680                 if (resource->vlan_tag == cache_resource->vlan_tag &&
1681                     resource->ft_type == cache_resource->ft_type) {
1682                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
1683                                 "refcnt %d++",
1684                                 (void *)cache_resource,
1685                                 rte_atomic32_read(&cache_resource->refcnt));
1686                         rte_atomic32_inc(&cache_resource->refcnt);
1687                         dev_flow->dv.push_vlan_res = cache_resource;
1688                         return 0;
1689                 }
1690         }
1691         /* Register new push_vlan action resource. */
1692         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1693         if (!cache_resource)
1694                 return rte_flow_error_set(error, ENOMEM,
1695                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1696                                           "cannot allocate resource memory");
1697         *cache_resource = *resource;
1698         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1699                 domain = sh->fdb_domain;
1700         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1701                 domain = sh->rx_domain;
1702         else
1703                 domain = sh->tx_domain;
1704         cache_resource->action =
1705                 mlx5_glue->dr_create_flow_action_push_vlan(domain,
1706                                                            resource->vlan_tag);
1707         if (!cache_resource->action) {
1708                 rte_free(cache_resource);
1709                 return rte_flow_error_set(error, ENOMEM,
1710                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1711                                           NULL, "cannot create action");
1712         }
1713         rte_atomic32_init(&cache_resource->refcnt);
1714         rte_atomic32_inc(&cache_resource->refcnt);
1715         LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
1716         dev_flow->dv.push_vlan_res = cache_resource;
1717         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
1718                 (void *)cache_resource,
1719                 rte_atomic32_read(&cache_resource->refcnt));
1720         return 0;
1721 }
1722 /**
1723  * Get the size of specific rte_flow_item_type
1724  *
1725  * @param[in] item_type
1726  *   Tested rte_flow_item_type.
1727  *
1728  * @return
1729  *   sizeof struct item_type, 0 if void or irrelevant.
1730  */
1731 static size_t
1732 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1733 {
1734         size_t retval;
1735
1736         switch (item_type) {
1737         case RTE_FLOW_ITEM_TYPE_ETH:
1738                 retval = sizeof(struct rte_flow_item_eth);
1739                 break;
1740         case RTE_FLOW_ITEM_TYPE_VLAN:
1741                 retval = sizeof(struct rte_flow_item_vlan);
1742                 break;
1743         case RTE_FLOW_ITEM_TYPE_IPV4:
1744                 retval = sizeof(struct rte_flow_item_ipv4);
1745                 break;
1746         case RTE_FLOW_ITEM_TYPE_IPV6:
1747                 retval = sizeof(struct rte_flow_item_ipv6);
1748                 break;
1749         case RTE_FLOW_ITEM_TYPE_UDP:
1750                 retval = sizeof(struct rte_flow_item_udp);
1751                 break;
1752         case RTE_FLOW_ITEM_TYPE_TCP:
1753                 retval = sizeof(struct rte_flow_item_tcp);
1754                 break;
1755         case RTE_FLOW_ITEM_TYPE_VXLAN:
1756                 retval = sizeof(struct rte_flow_item_vxlan);
1757                 break;
1758         case RTE_FLOW_ITEM_TYPE_GRE:
1759                 retval = sizeof(struct rte_flow_item_gre);
1760                 break;
1761         case RTE_FLOW_ITEM_TYPE_NVGRE:
1762                 retval = sizeof(struct rte_flow_item_nvgre);
1763                 break;
1764         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1765                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1766                 break;
1767         case RTE_FLOW_ITEM_TYPE_MPLS:
1768                 retval = sizeof(struct rte_flow_item_mpls);
1769                 break;
1770         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1771         default:
1772                 retval = 0;
1773                 break;
1774         }
1775         return retval;
1776 }
1777
1778 #define MLX5_ENCAP_IPV4_VERSION         0x40
1779 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1780 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1781 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1782 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1783 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1784 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1785
1786 /**
1787  * Convert the encap action data from list of rte_flow_item to raw buffer
1788  *
1789  * @param[in] items
1790  *   Pointer to rte_flow_item objects list.
1791  * @param[out] buf
1792  *   Pointer to the output buffer.
1793  * @param[out] size
1794  *   Pointer to the output buffer size.
1795  * @param[out] error
1796  *   Pointer to the error structure.
1797  *
1798  * @return
1799  *   0 on success, a negative errno value otherwise and rte_errno is set.
1800  */
1801 static int
1802 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1803                            size_t *size, struct rte_flow_error *error)
1804 {
1805         struct rte_ether_hdr *eth = NULL;
1806         struct rte_vlan_hdr *vlan = NULL;
1807         struct rte_ipv4_hdr *ipv4 = NULL;
1808         struct rte_ipv6_hdr *ipv6 = NULL;
1809         struct rte_udp_hdr *udp = NULL;
1810         struct rte_vxlan_hdr *vxlan = NULL;
1811         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1812         struct rte_gre_hdr *gre = NULL;
1813         size_t len;
1814         size_t temp_size = 0;
1815
1816         if (!items)
1817                 return rte_flow_error_set(error, EINVAL,
1818                                           RTE_FLOW_ERROR_TYPE_ACTION,
1819                                           NULL, "invalid empty data");
1820         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1821                 len = flow_dv_get_item_len(items->type);
1822                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1823                         return rte_flow_error_set(error, EINVAL,
1824                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1825                                                   (void *)items->type,
1826                                                   "items total size is too big"
1827                                                   " for encap action");
1828                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1829                 switch (items->type) {
1830                 case RTE_FLOW_ITEM_TYPE_ETH:
1831                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1832                         break;
1833                 case RTE_FLOW_ITEM_TYPE_VLAN:
1834                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1835                         if (!eth)
1836                                 return rte_flow_error_set(error, EINVAL,
1837                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1838                                                 (void *)items->type,
1839                                                 "eth header not found");
1840                         if (!eth->ether_type)
1841                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1842                         break;
1843                 case RTE_FLOW_ITEM_TYPE_IPV4:
1844                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1845                         if (!vlan && !eth)
1846                                 return rte_flow_error_set(error, EINVAL,
1847                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1848                                                 (void *)items->type,
1849                                                 "neither eth nor vlan"
1850                                                 " header found");
1851                         if (vlan && !vlan->eth_proto)
1852                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1853                         else if (eth && !eth->ether_type)
1854                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1855                         if (!ipv4->version_ihl)
1856                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1857                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1858                         if (!ipv4->time_to_live)
1859                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1860                         break;
1861                 case RTE_FLOW_ITEM_TYPE_IPV6:
1862                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1863                         if (!vlan && !eth)
1864                                 return rte_flow_error_set(error, EINVAL,
1865                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1866                                                 (void *)items->type,
1867                                                 "neither eth nor vlan"
1868                                                 " header found");
1869                         if (vlan && !vlan->eth_proto)
1870                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1871                         else if (eth && !eth->ether_type)
1872                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1873                         if (!ipv6->vtc_flow)
1874                                 ipv6->vtc_flow =
1875                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1876                         if (!ipv6->hop_limits)
1877                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1878                         break;
1879                 case RTE_FLOW_ITEM_TYPE_UDP:
1880                         udp = (struct rte_udp_hdr *)&buf[temp_size];
1881                         if (!ipv4 && !ipv6)
1882                                 return rte_flow_error_set(error, EINVAL,
1883                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1884                                                 (void *)items->type,
1885                                                 "ip header not found");
1886                         if (ipv4 && !ipv4->next_proto_id)
1887                                 ipv4->next_proto_id = IPPROTO_UDP;
1888                         else if (ipv6 && !ipv6->proto)
1889                                 ipv6->proto = IPPROTO_UDP;
1890                         break;
1891                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1892                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1893                         if (!udp)
1894                                 return rte_flow_error_set(error, EINVAL,
1895                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1896                                                 (void *)items->type,
1897                                                 "udp header not found");
1898                         if (!udp->dst_port)
1899                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1900                         if (!vxlan->vx_flags)
1901                                 vxlan->vx_flags =
1902                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1903                         break;
1904                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1905                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1906                         if (!udp)
1907                                 return rte_flow_error_set(error, EINVAL,
1908                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1909                                                 (void *)items->type,
1910                                                 "udp header not found");
1911                         if (!vxlan_gpe->proto)
1912                                 return rte_flow_error_set(error, EINVAL,
1913                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1914                                                 (void *)items->type,
1915                                                 "next protocol not found");
1916                         if (!udp->dst_port)
1917                                 udp->dst_port =
1918                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1919                         if (!vxlan_gpe->vx_flags)
1920                                 vxlan_gpe->vx_flags =
1921                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1922                         break;
1923                 case RTE_FLOW_ITEM_TYPE_GRE:
1924                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1925                         gre = (struct rte_gre_hdr *)&buf[temp_size];
1926                         if (!gre->proto)
1927                                 return rte_flow_error_set(error, EINVAL,
1928                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1929                                                 (void *)items->type,
1930                                                 "next protocol not found");
1931                         if (!ipv4 && !ipv6)
1932                                 return rte_flow_error_set(error, EINVAL,
1933                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1934                                                 (void *)items->type,
1935                                                 "ip header not found");
1936                         if (ipv4 && !ipv4->next_proto_id)
1937                                 ipv4->next_proto_id = IPPROTO_GRE;
1938                         else if (ipv6 && !ipv6->proto)
1939                                 ipv6->proto = IPPROTO_GRE;
1940                         break;
1941                 case RTE_FLOW_ITEM_TYPE_VOID:
1942                         break;
1943                 default:
1944                         return rte_flow_error_set(error, EINVAL,
1945                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1946                                                   (void *)items->type,
1947                                                   "unsupported item type");
1948                         break;
1949                 }
1950                 temp_size += len;
1951         }
1952         *size = temp_size;
1953         return 0;
1954 }
1955
1956 static int
1957 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1958 {
1959         struct rte_ether_hdr *eth = NULL;
1960         struct rte_vlan_hdr *vlan = NULL;
1961         struct rte_ipv6_hdr *ipv6 = NULL;
1962         struct rte_udp_hdr *udp = NULL;
1963         char *next_hdr;
1964         uint16_t proto;
1965
1966         eth = (struct rte_ether_hdr *)data;
1967         next_hdr = (char *)(eth + 1);
1968         proto = RTE_BE16(eth->ether_type);
1969
1970         /* VLAN skipping */
1971         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1972                 vlan = (struct rte_vlan_hdr *)next_hdr;
1973                 proto = RTE_BE16(vlan->eth_proto);
1974                 next_hdr += sizeof(struct rte_vlan_hdr);
1975         }
1976
1977         /* HW calculates IPv4 csum. no need to proceed */
1978         if (proto == RTE_ETHER_TYPE_IPV4)
1979                 return 0;
1980
1981         /* non IPv4/IPv6 header. not supported */
1982         if (proto != RTE_ETHER_TYPE_IPV6) {
1983                 return rte_flow_error_set(error, ENOTSUP,
1984                                           RTE_FLOW_ERROR_TYPE_ACTION,
1985                                           NULL, "Cannot offload non IPv4/IPv6");
1986         }
1987
1988         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1989
1990         /* ignore non UDP */
1991         if (ipv6->proto != IPPROTO_UDP)
1992                 return 0;
1993
1994         udp = (struct rte_udp_hdr *)(ipv6 + 1);
1995         udp->dgram_cksum = 0;
1996
1997         return 0;
1998 }
1999
2000 /**
2001  * Convert L2 encap action to DV specification.
2002  *
2003  * @param[in] dev
2004  *   Pointer to rte_eth_dev structure.
2005  * @param[in] action
2006  *   Pointer to action structure.
2007  * @param[in, out] dev_flow
2008  *   Pointer to the mlx5_flow.
2009  * @param[in] transfer
2010  *   Mark if the flow is E-Switch flow.
2011  * @param[out] error
2012  *   Pointer to the error structure.
2013  *
2014  * @return
2015  *   0 on success, a negative errno value otherwise and rte_errno is set.
2016  */
2017 static int
2018 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2019                                const struct rte_flow_action *action,
2020                                struct mlx5_flow *dev_flow,
2021                                uint8_t transfer,
2022                                struct rte_flow_error *error)
2023 {
2024         const struct rte_flow_item *encap_data;
2025         const struct rte_flow_action_raw_encap *raw_encap_data;
2026         struct mlx5_flow_dv_encap_decap_resource res = {
2027                 .reformat_type =
2028                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2029                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2030                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2031         };
2032
2033         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2034                 raw_encap_data =
2035                         (const struct rte_flow_action_raw_encap *)action->conf;
2036                 res.size = raw_encap_data->size;
2037                 memcpy(res.buf, raw_encap_data->data, res.size);
2038                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
2039                         return -rte_errno;
2040         } else {
2041                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2042                         encap_data =
2043                                 ((const struct rte_flow_action_vxlan_encap *)
2044                                                 action->conf)->definition;
2045                 else
2046                         encap_data =
2047                                 ((const struct rte_flow_action_nvgre_encap *)
2048                                                 action->conf)->definition;
2049                 if (flow_dv_convert_encap_data(encap_data, res.buf,
2050                                                &res.size, error))
2051                         return -rte_errno;
2052         }
2053         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2054                 return rte_flow_error_set(error, EINVAL,
2055                                           RTE_FLOW_ERROR_TYPE_ACTION,
2056                                           NULL, "can't create L2 encap action");
2057         return 0;
2058 }
2059
2060 /**
2061  * Convert L2 decap action to DV specification.
2062  *
2063  * @param[in] dev
2064  *   Pointer to rte_eth_dev structure.
2065  * @param[in, out] dev_flow
2066  *   Pointer to the mlx5_flow.
2067  * @param[in] transfer
2068  *   Mark if the flow is E-Switch flow.
2069  * @param[out] error
2070  *   Pointer to the error structure.
2071  *
2072  * @return
2073  *   0 on success, a negative errno value otherwise and rte_errno is set.
2074  */
2075 static int
2076 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
2077                                struct mlx5_flow *dev_flow,
2078                                uint8_t transfer,
2079                                struct rte_flow_error *error)
2080 {
2081         struct mlx5_flow_dv_encap_decap_resource res = {
2082                 .size = 0,
2083                 .reformat_type =
2084                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
2085                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2086                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
2087         };
2088
2089         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2090                 return rte_flow_error_set(error, EINVAL,
2091                                           RTE_FLOW_ERROR_TYPE_ACTION,
2092                                           NULL, "can't create L2 decap action");
2093         return 0;
2094 }
2095
2096 /**
2097  * Convert raw decap/encap (L3 tunnel) action to DV specification.
2098  *
2099  * @param[in] dev
2100  *   Pointer to rte_eth_dev structure.
2101  * @param[in] action
2102  *   Pointer to action structure.
2103  * @param[in, out] dev_flow
2104  *   Pointer to the mlx5_flow.
2105  * @param[in] attr
2106  *   Pointer to the flow attributes.
2107  * @param[out] error
2108  *   Pointer to the error structure.
2109  *
2110  * @return
2111  *   0 on success, a negative errno value otherwise and rte_errno is set.
2112  */
2113 static int
2114 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
2115                                 const struct rte_flow_action *action,
2116                                 struct mlx5_flow *dev_flow,
2117                                 const struct rte_flow_attr *attr,
2118                                 struct rte_flow_error *error)
2119 {
2120         const struct rte_flow_action_raw_encap *encap_data;
2121         struct mlx5_flow_dv_encap_decap_resource res;
2122
2123         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
2124         res.size = encap_data->size;
2125         memcpy(res.buf, encap_data->data, res.size);
2126         res.reformat_type = attr->egress ?
2127                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
2128                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
2129         if (attr->transfer)
2130                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2131         else
2132                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2133                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2134         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2135                 return rte_flow_error_set(error, EINVAL,
2136                                           RTE_FLOW_ERROR_TYPE_ACTION,
2137                                           NULL, "can't create encap action");
2138         return 0;
2139 }
2140
2141 /**
2142  * Create action push VLAN.
2143  *
2144  * @param[in] dev
2145  *   Pointer to rte_eth_dev structure.
2146  * @param[in] vlan_tag
2147  *   the vlan tag to push to the Ethernet header.
2148  * @param[in, out] dev_flow
2149  *   Pointer to the mlx5_flow.
2150  * @param[in] attr
2151  *   Pointer to the flow attributes.
2152  * @param[out] error
2153  *   Pointer to the error structure.
2154  *
2155  * @return
2156  *   0 on success, a negative errno value otherwise and rte_errno is set.
2157  */
2158 static int
2159 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
2160                                 const struct rte_flow_attr *attr,
2161                                 const struct rte_vlan_hdr *vlan,
2162                                 struct mlx5_flow *dev_flow,
2163                                 struct rte_flow_error *error)
2164 {
2165         struct mlx5_flow_dv_push_vlan_action_resource res;
2166
2167         res.vlan_tag =
2168                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
2169                                  vlan->vlan_tci);
2170         if (attr->transfer)
2171                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2172         else
2173                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2174                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2175         return flow_dv_push_vlan_action_resource_register
2176                                             (dev, &res, dev_flow, error);
2177 }
2178
2179 /**
2180  * Validate the modify-header actions.
2181  *
2182  * @param[in] action_flags
2183  *   Holds the actions detected until now.
2184  * @param[in] action
2185  *   Pointer to the modify action.
2186  * @param[out] error
2187  *   Pointer to error structure.
2188  *
2189  * @return
2190  *   0 on success, a negative errno value otherwise and rte_errno is set.
2191  */
2192 static int
2193 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
2194                                    const struct rte_flow_action *action,
2195                                    struct rte_flow_error *error)
2196 {
2197         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
2198                 return rte_flow_error_set(error, EINVAL,
2199                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2200                                           NULL, "action configuration not set");
2201         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2202                 return rte_flow_error_set(error, EINVAL,
2203                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2204                                           "can't have encap action before"
2205                                           " modify action");
2206         return 0;
2207 }
2208
2209 /**
2210  * Validate the modify-header MAC address actions.
2211  *
2212  * @param[in] action_flags
2213  *   Holds the actions detected until now.
2214  * @param[in] action
2215  *   Pointer to the modify action.
2216  * @param[in] item_flags
2217  *   Holds the items detected.
2218  * @param[out] error
2219  *   Pointer to error structure.
2220  *
2221  * @return
2222  *   0 on success, a negative errno value otherwise and rte_errno is set.
2223  */
2224 static int
2225 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
2226                                    const struct rte_flow_action *action,
2227                                    const uint64_t item_flags,
2228                                    struct rte_flow_error *error)
2229 {
2230         int ret = 0;
2231
2232         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2233         if (!ret) {
2234                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
2235                         return rte_flow_error_set(error, EINVAL,
2236                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2237                                                   NULL,
2238                                                   "no L2 item in pattern");
2239         }
2240         return ret;
2241 }
2242
2243 /**
2244  * Validate the modify-header IPv4 address actions.
2245  *
2246  * @param[in] action_flags
2247  *   Holds the actions detected until now.
2248  * @param[in] action
2249  *   Pointer to the modify action.
2250  * @param[in] item_flags
2251  *   Holds the items detected.
2252  * @param[out] error
2253  *   Pointer to error structure.
2254  *
2255  * @return
2256  *   0 on success, a negative errno value otherwise and rte_errno is set.
2257  */
2258 static int
2259 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
2260                                     const struct rte_flow_action *action,
2261                                     const uint64_t item_flags,
2262                                     struct rte_flow_error *error)
2263 {
2264         int ret = 0;
2265
2266         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2267         if (!ret) {
2268                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
2269                         return rte_flow_error_set(error, EINVAL,
2270                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2271                                                   NULL,
2272                                                   "no ipv4 item in pattern");
2273         }
2274         return ret;
2275 }
2276
2277 /**
2278  * Validate the modify-header IPv6 address actions.
2279  *
2280  * @param[in] action_flags
2281  *   Holds the actions detected until now.
2282  * @param[in] action
2283  *   Pointer to the modify action.
2284  * @param[in] item_flags
2285  *   Holds the items detected.
2286  * @param[out] error
2287  *   Pointer to error structure.
2288  *
2289  * @return
2290  *   0 on success, a negative errno value otherwise and rte_errno is set.
2291  */
2292 static int
2293 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
2294                                     const struct rte_flow_action *action,
2295                                     const uint64_t item_flags,
2296                                     struct rte_flow_error *error)
2297 {
2298         int ret = 0;
2299
2300         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2301         if (!ret) {
2302                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
2303                         return rte_flow_error_set(error, EINVAL,
2304                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2305                                                   NULL,
2306                                                   "no ipv6 item in pattern");
2307         }
2308         return ret;
2309 }
2310
2311 /**
2312  * Validate the modify-header TP actions.
2313  *
2314  * @param[in] action_flags
2315  *   Holds the actions detected until now.
2316  * @param[in] action
2317  *   Pointer to the modify action.
2318  * @param[in] item_flags
2319  *   Holds the items detected.
2320  * @param[out] error
2321  *   Pointer to error structure.
2322  *
2323  * @return
2324  *   0 on success, a negative errno value otherwise and rte_errno is set.
2325  */
2326 static int
2327 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
2328                                   const struct rte_flow_action *action,
2329                                   const uint64_t item_flags,
2330                                   struct rte_flow_error *error)
2331 {
2332         int ret = 0;
2333
2334         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2335         if (!ret) {
2336                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
2337                         return rte_flow_error_set(error, EINVAL,
2338                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2339                                                   NULL, "no transport layer "
2340                                                   "in pattern");
2341         }
2342         return ret;
2343 }
2344
2345 /**
2346  * Validate the modify-header actions of increment/decrement
2347  * TCP Sequence-number.
2348  *
2349  * @param[in] action_flags
2350  *   Holds the actions detected until now.
2351  * @param[in] action
2352  *   Pointer to the modify action.
2353  * @param[in] item_flags
2354  *   Holds the items detected.
2355  * @param[out] error
2356  *   Pointer to error structure.
2357  *
2358  * @return
2359  *   0 on success, a negative errno value otherwise and rte_errno is set.
2360  */
2361 static int
2362 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
2363                                        const struct rte_flow_action *action,
2364                                        const uint64_t item_flags,
2365                                        struct rte_flow_error *error)
2366 {
2367         int ret = 0;
2368
2369         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2370         if (!ret) {
2371                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2372                         return rte_flow_error_set(error, EINVAL,
2373                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2374                                                   NULL, "no TCP item in"
2375                                                   " pattern");
2376                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
2377                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
2378                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
2379                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
2380                         return rte_flow_error_set(error, EINVAL,
2381                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2382                                                   NULL,
2383                                                   "cannot decrease and increase"
2384                                                   " TCP sequence number"
2385                                                   " at the same time");
2386         }
2387         return ret;
2388 }
2389
2390 /**
2391  * Validate the modify-header actions of increment/decrement
2392  * TCP Acknowledgment number.
2393  *
2394  * @param[in] action_flags
2395  *   Holds the actions detected until now.
2396  * @param[in] action
2397  *   Pointer to the modify action.
2398  * @param[in] item_flags
2399  *   Holds the items detected.
2400  * @param[out] error
2401  *   Pointer to error structure.
2402  *
2403  * @return
2404  *   0 on success, a negative errno value otherwise and rte_errno is set.
2405  */
2406 static int
2407 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
2408                                        const struct rte_flow_action *action,
2409                                        const uint64_t item_flags,
2410                                        struct rte_flow_error *error)
2411 {
2412         int ret = 0;
2413
2414         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2415         if (!ret) {
2416                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2417                         return rte_flow_error_set(error, EINVAL,
2418                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2419                                                   NULL, "no TCP item in"
2420                                                   " pattern");
2421                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
2422                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
2423                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
2424                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
2425                         return rte_flow_error_set(error, EINVAL,
2426                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2427                                                   NULL,
2428                                                   "cannot decrease and increase"
2429                                                   " TCP acknowledgment number"
2430                                                   " at the same time");
2431         }
2432         return ret;
2433 }
2434
2435 /**
2436  * Validate the modify-header TTL actions.
2437  *
2438  * @param[in] action_flags
2439  *   Holds the actions detected until now.
2440  * @param[in] action
2441  *   Pointer to the modify action.
2442  * @param[in] item_flags
2443  *   Holds the items detected.
2444  * @param[out] error
2445  *   Pointer to error structure.
2446  *
2447  * @return
2448  *   0 on success, a negative errno value otherwise and rte_errno is set.
2449  */
2450 static int
2451 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
2452                                    const struct rte_flow_action *action,
2453                                    const uint64_t item_flags,
2454                                    struct rte_flow_error *error)
2455 {
2456         int ret = 0;
2457
2458         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2459         if (!ret) {
2460                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
2461                         return rte_flow_error_set(error, EINVAL,
2462                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2463                                                   NULL,
2464                                                   "no IP protocol in pattern");
2465         }
2466         return ret;
2467 }
2468
2469 /**
2470  * Validate jump action.
2471  *
2472  * @param[in] action
2473  *   Pointer to the jump action.
2474  * @param[in] action_flags
2475  *   Holds the actions detected until now.
2476  * @param[in] attributes
2477  *   Pointer to flow attributes
2478  * @param[in] external
2479  *   Action belongs to flow rule created by request external to PMD.
2480  * @param[out] error
2481  *   Pointer to error structure.
2482  *
2483  * @return
2484  *   0 on success, a negative errno value otherwise and rte_errno is set.
2485  */
2486 static int
2487 flow_dv_validate_action_jump(const struct rte_flow_action *action,
2488                              uint64_t action_flags,
2489                              const struct rte_flow_attr *attributes,
2490                              bool external, struct rte_flow_error *error)
2491 {
2492         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
2493                                                     MLX5_MAX_TABLES;
2494         uint32_t target_group, table;
2495         int ret = 0;
2496
2497         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2498                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2499                 return rte_flow_error_set(error, EINVAL,
2500                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2501                                           "can't have 2 fate actions in"
2502                                           " same flow");
2503         if (!action->conf)
2504                 return rte_flow_error_set(error, EINVAL,
2505                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2506                                           NULL, "action configuration not set");
2507         target_group =
2508                 ((const struct rte_flow_action_jump *)action->conf)->group;
2509         ret = mlx5_flow_group_to_table(attributes, external, target_group,
2510                                        &table, error);
2511         if (ret)
2512                 return ret;
2513         if (table >= max_group)
2514                 return rte_flow_error_set(error, EINVAL,
2515                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2516                                           "target group index out of range");
2517         if (attributes->group >= target_group)
2518                 return rte_flow_error_set(error, EINVAL,
2519                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2520                                           "target group must be higher than"
2521                                           " the current flow group");
2522         return 0;
2523 }
2524
2525 /*
2526  * Validate the port_id action.
2527  *
2528  * @param[in] dev
2529  *   Pointer to rte_eth_dev structure.
2530  * @param[in] action_flags
2531  *   Bit-fields that holds the actions detected until now.
2532  * @param[in] action
2533  *   Port_id RTE action structure.
2534  * @param[in] attr
2535  *   Attributes of flow that includes this action.
2536  * @param[out] error
2537  *   Pointer to error structure.
2538  *
2539  * @return
2540  *   0 on success, a negative errno value otherwise and rte_errno is set.
2541  */
2542 static int
2543 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2544                                 uint64_t action_flags,
2545                                 const struct rte_flow_action *action,
2546                                 const struct rte_flow_attr *attr,
2547                                 struct rte_flow_error *error)
2548 {
2549         const struct rte_flow_action_port_id *port_id;
2550         struct mlx5_priv *act_priv;
2551         struct mlx5_priv *dev_priv;
2552         uint16_t port;
2553
2554         if (!attr->transfer)
2555                 return rte_flow_error_set(error, ENOTSUP,
2556                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2557                                           NULL,
2558                                           "port id action is valid in transfer"
2559                                           " mode only");
2560         if (!action || !action->conf)
2561                 return rte_flow_error_set(error, ENOTSUP,
2562                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2563                                           NULL,
2564                                           "port id action parameters must be"
2565                                           " specified");
2566         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2567                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2568                 return rte_flow_error_set(error, EINVAL,
2569                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2570                                           "can have only one fate actions in"
2571                                           " a flow");
2572         dev_priv = mlx5_dev_to_eswitch_info(dev);
2573         if (!dev_priv)
2574                 return rte_flow_error_set(error, rte_errno,
2575                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2576                                           NULL,
2577                                           "failed to obtain E-Switch info");
2578         port_id = action->conf;
2579         port = port_id->original ? dev->data->port_id : port_id->id;
2580         act_priv = mlx5_port_to_eswitch_info(port);
2581         if (!act_priv)
2582                 return rte_flow_error_set
2583                                 (error, rte_errno,
2584                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2585                                  "failed to obtain E-Switch port id for port");
2586         if (act_priv->domain_id != dev_priv->domain_id)
2587                 return rte_flow_error_set
2588                                 (error, EINVAL,
2589                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2590                                  "port does not belong to"
2591                                  " E-Switch being configured");
2592         return 0;
2593 }
2594
2595 /**
2596  * Find existing modify-header resource or create and register a new one.
2597  *
2598  * @param dev[in, out]
2599  *   Pointer to rte_eth_dev structure.
2600  * @param[in, out] resource
2601  *   Pointer to modify-header resource.
2602  * @parm[in, out] dev_flow
2603  *   Pointer to the dev_flow.
2604  * @param[out] error
2605  *   pointer to error structure.
2606  *
2607  * @return
2608  *   0 on success otherwise -errno and errno is set.
2609  */
2610 static int
2611 flow_dv_modify_hdr_resource_register
2612                         (struct rte_eth_dev *dev,
2613                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2614                          struct mlx5_flow *dev_flow,
2615                          struct rte_flow_error *error)
2616 {
2617         struct mlx5_priv *priv = dev->data->dev_private;
2618         struct mlx5_ibv_shared *sh = priv->sh;
2619         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2620         struct mlx5dv_dr_domain *ns;
2621
2622         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2623                 ns = sh->fdb_domain;
2624         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2625                 ns = sh->tx_domain;
2626         else
2627                 ns = sh->rx_domain;
2628         resource->flags =
2629                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2630         /* Lookup a matching resource from cache. */
2631         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2632                 if (resource->ft_type == cache_resource->ft_type &&
2633                     resource->actions_num == cache_resource->actions_num &&
2634                     resource->flags == cache_resource->flags &&
2635                     !memcmp((const void *)resource->actions,
2636                             (const void *)cache_resource->actions,
2637                             (resource->actions_num *
2638                                             sizeof(resource->actions[0])))) {
2639                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2640                                 (void *)cache_resource,
2641                                 rte_atomic32_read(&cache_resource->refcnt));
2642                         rte_atomic32_inc(&cache_resource->refcnt);
2643                         dev_flow->dv.modify_hdr = cache_resource;
2644                         return 0;
2645                 }
2646         }
2647         /* Register new modify-header resource. */
2648         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2649         if (!cache_resource)
2650                 return rte_flow_error_set(error, ENOMEM,
2651                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2652                                           "cannot allocate resource memory");
2653         *cache_resource = *resource;
2654         cache_resource->verbs_action =
2655                 mlx5_glue->dv_create_flow_action_modify_header
2656                                         (sh->ctx, cache_resource->ft_type,
2657                                          ns, cache_resource->flags,
2658                                          cache_resource->actions_num *
2659                                          sizeof(cache_resource->actions[0]),
2660                                          (uint64_t *)cache_resource->actions);
2661         if (!cache_resource->verbs_action) {
2662                 rte_free(cache_resource);
2663                 return rte_flow_error_set(error, ENOMEM,
2664                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2665                                           NULL, "cannot create action");
2666         }
2667         rte_atomic32_init(&cache_resource->refcnt);
2668         rte_atomic32_inc(&cache_resource->refcnt);
2669         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2670         dev_flow->dv.modify_hdr = cache_resource;
2671         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2672                 (void *)cache_resource,
2673                 rte_atomic32_read(&cache_resource->refcnt));
2674         return 0;
2675 }
2676
2677 #define MLX5_CNT_CONTAINER_RESIZE 64
2678
2679 /**
2680  * Get or create a flow counter.
2681  *
2682  * @param[in] dev
2683  *   Pointer to the Ethernet device structure.
2684  * @param[in] shared
2685  *   Indicate if this counter is shared with other flows.
2686  * @param[in] id
2687  *   Counter identifier.
2688  *
2689  * @return
2690  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2691  */
2692 static struct mlx5_flow_counter *
2693 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2694                                uint32_t id)
2695 {
2696         struct mlx5_priv *priv = dev->data->dev_private;
2697         struct mlx5_flow_counter *cnt = NULL;
2698         struct mlx5_devx_obj *dcs = NULL;
2699
2700         if (!priv->config.devx) {
2701                 rte_errno = ENOTSUP;
2702                 return NULL;
2703         }
2704         if (shared) {
2705                 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2706                         if (cnt->shared && cnt->id == id) {
2707                                 cnt->ref_cnt++;
2708                                 return cnt;
2709                         }
2710                 }
2711         }
2712         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2713         if (!dcs)
2714                 return NULL;
2715         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2716         if (!cnt) {
2717                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2718                 rte_errno = ENOMEM;
2719                 return NULL;
2720         }
2721         struct mlx5_flow_counter tmpl = {
2722                 .shared = shared,
2723                 .ref_cnt = 1,
2724                 .id = id,
2725                 .dcs = dcs,
2726         };
2727         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2728         if (!tmpl.action) {
2729                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2730                 rte_errno = errno;
2731                 rte_free(cnt);
2732                 return NULL;
2733         }
2734         *cnt = tmpl;
2735         TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2736         return cnt;
2737 }
2738
2739 /**
2740  * Release a flow counter.
2741  *
2742  * @param[in] dev
2743  *   Pointer to the Ethernet device structure.
2744  * @param[in] counter
2745  *   Pointer to the counter handler.
2746  */
2747 static void
2748 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2749                                  struct mlx5_flow_counter *counter)
2750 {
2751         struct mlx5_priv *priv = dev->data->dev_private;
2752
2753         if (!counter)
2754                 return;
2755         if (--counter->ref_cnt == 0) {
2756                 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2757                 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2758                 rte_free(counter);
2759         }
2760 }
2761
2762 /**
2763  * Query a devx flow counter.
2764  *
2765  * @param[in] dev
2766  *   Pointer to the Ethernet device structure.
2767  * @param[in] cnt
2768  *   Pointer to the flow counter.
2769  * @param[out] pkts
2770  *   The statistics value of packets.
2771  * @param[out] bytes
2772  *   The statistics value of bytes.
2773  *
2774  * @return
2775  *   0 on success, otherwise a negative errno value and rte_errno is set.
2776  */
2777 static inline int
2778 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2779                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2780                      uint64_t *bytes)
2781 {
2782         return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2783                                                 0, NULL, NULL, 0);
2784 }
2785
2786 /**
2787  * Get a pool by a counter.
2788  *
2789  * @param[in] cnt
2790  *   Pointer to the counter.
2791  *
2792  * @return
2793  *   The counter pool.
2794  */
2795 static struct mlx5_flow_counter_pool *
2796 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2797 {
2798         if (!cnt->batch) {
2799                 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2800                 return (struct mlx5_flow_counter_pool *)cnt - 1;
2801         }
2802         return cnt->pool;
2803 }
2804
2805 /**
2806  * Get a pool by devx counter ID.
2807  *
2808  * @param[in] cont
2809  *   Pointer to the counter container.
2810  * @param[in] id
2811  *   The counter devx ID.
2812  *
2813  * @return
2814  *   The counter pool pointer if exists, NULL otherwise,
2815  */
2816 static struct mlx5_flow_counter_pool *
2817 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2818 {
2819         struct mlx5_flow_counter_pool *pool;
2820
2821         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2822                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2823                                 MLX5_COUNTERS_PER_POOL;
2824
2825                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2826                         return pool;
2827         };
2828         return NULL;
2829 }
2830
2831 /**
2832  * Allocate a new memory for the counter values wrapped by all the needed
2833  * management.
2834  *
2835  * @param[in] dev
2836  *   Pointer to the Ethernet device structure.
2837  * @param[in] raws_n
2838  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2839  *
2840  * @return
2841  *   The new memory management pointer on success, otherwise NULL and rte_errno
2842  *   is set.
2843  */
2844 static struct mlx5_counter_stats_mem_mng *
2845 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2846 {
2847         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2848                                         (dev->data->dev_private))->sh;
2849         struct mlx5_devx_mkey_attr mkey_attr;
2850         struct mlx5_counter_stats_mem_mng *mem_mng;
2851         volatile struct flow_counter_stats *raw_data;
2852         int size = (sizeof(struct flow_counter_stats) *
2853                         MLX5_COUNTERS_PER_POOL +
2854                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2855                         sizeof(struct mlx5_counter_stats_mem_mng);
2856         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2857         int i;
2858
2859         if (!mem) {
2860                 rte_errno = ENOMEM;
2861                 return NULL;
2862         }
2863         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2864         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2865         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2866                                                  IBV_ACCESS_LOCAL_WRITE);
2867         if (!mem_mng->umem) {
2868                 rte_errno = errno;
2869                 rte_free(mem);
2870                 return NULL;
2871         }
2872         mkey_attr.addr = (uintptr_t)mem;
2873         mkey_attr.size = size;
2874         mkey_attr.umem_id = mem_mng->umem->umem_id;
2875         mkey_attr.pd = sh->pdn;
2876         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2877         if (!mem_mng->dm) {
2878                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
2879                 rte_errno = errno;
2880                 rte_free(mem);
2881                 return NULL;
2882         }
2883         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
2884         raw_data = (volatile struct flow_counter_stats *)mem;
2885         for (i = 0; i < raws_n; ++i) {
2886                 mem_mng->raws[i].mem_mng = mem_mng;
2887                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
2888         }
2889         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
2890         return mem_mng;
2891 }
2892
2893 /**
2894  * Resize a counter container.
2895  *
2896  * @param[in] dev
2897  *   Pointer to the Ethernet device structure.
2898  * @param[in] batch
2899  *   Whether the pool is for counter that was allocated by batch command.
2900  *
2901  * @return
2902  *   The new container pointer on success, otherwise NULL and rte_errno is set.
2903  */
2904 static struct mlx5_pools_container *
2905 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
2906 {
2907         struct mlx5_priv *priv = dev->data->dev_private;
2908         struct mlx5_pools_container *cont =
2909                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
2910         struct mlx5_pools_container *new_cont =
2911                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
2912         struct mlx5_counter_stats_mem_mng *mem_mng;
2913         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
2914         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
2915         int i;
2916
2917         if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
2918                 /* The last resize still hasn't detected by the host thread. */
2919                 rte_errno = EAGAIN;
2920                 return NULL;
2921         }
2922         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
2923         if (!new_cont->pools) {
2924                 rte_errno = ENOMEM;
2925                 return NULL;
2926         }
2927         if (cont->n)
2928                 memcpy(new_cont->pools, cont->pools, cont->n *
2929                        sizeof(struct mlx5_flow_counter_pool *));
2930         mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
2931                 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
2932         if (!mem_mng) {
2933                 rte_free(new_cont->pools);
2934                 return NULL;
2935         }
2936         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
2937                 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
2938                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
2939                                  i, next);
2940         new_cont->n = resize;
2941         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
2942         TAILQ_INIT(&new_cont->pool_list);
2943         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
2944         new_cont->init_mem_mng = mem_mng;
2945         rte_cio_wmb();
2946          /* Flip the master container. */
2947         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
2948         return new_cont;
2949 }
2950
2951 /**
2952  * Query a devx flow counter.
2953  *
2954  * @param[in] dev
2955  *   Pointer to the Ethernet device structure.
2956  * @param[in] cnt
2957  *   Pointer to the flow counter.
2958  * @param[out] pkts
2959  *   The statistics value of packets.
2960  * @param[out] bytes
2961  *   The statistics value of bytes.
2962  *
2963  * @return
2964  *   0 on success, otherwise a negative errno value and rte_errno is set.
2965  */
2966 static inline int
2967 _flow_dv_query_count(struct rte_eth_dev *dev,
2968                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2969                      uint64_t *bytes)
2970 {
2971         struct mlx5_priv *priv = dev->data->dev_private;
2972         struct mlx5_flow_counter_pool *pool =
2973                         flow_dv_counter_pool_get(cnt);
2974         int offset = cnt - &pool->counters_raw[0];
2975
2976         if (priv->counter_fallback)
2977                 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
2978
2979         rte_spinlock_lock(&pool->sl);
2980         /*
2981          * The single counters allocation may allocate smaller ID than the
2982          * current allocated in parallel to the host reading.
2983          * In this case the new counter values must be reported as 0.
2984          */
2985         if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
2986                 *pkts = 0;
2987                 *bytes = 0;
2988         } else {
2989                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
2990                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
2991         }
2992         rte_spinlock_unlock(&pool->sl);
2993         return 0;
2994 }
2995
2996 /**
2997  * Create and initialize a new counter pool.
2998  *
2999  * @param[in] dev
3000  *   Pointer to the Ethernet device structure.
3001  * @param[out] dcs
3002  *   The devX counter handle.
3003  * @param[in] batch
3004  *   Whether the pool is for counter that was allocated by batch command.
3005  *
3006  * @return
3007  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
3008  */
3009 static struct mlx5_flow_counter_pool *
3010 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
3011                     uint32_t batch)
3012 {
3013         struct mlx5_priv *priv = dev->data->dev_private;
3014         struct mlx5_flow_counter_pool *pool;
3015         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3016                                                                0);
3017         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
3018         uint32_t size;
3019
3020         if (cont->n == n_valid) {
3021                 cont = flow_dv_container_resize(dev, batch);
3022                 if (!cont)
3023                         return NULL;
3024         }
3025         size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
3026                         sizeof(struct mlx5_flow_counter);
3027         pool = rte_calloc(__func__, 1, size, 0);
3028         if (!pool) {
3029                 rte_errno = ENOMEM;
3030                 return NULL;
3031         }
3032         pool->min_dcs = dcs;
3033         pool->raw = cont->init_mem_mng->raws + n_valid %
3034                                                      MLX5_CNT_CONTAINER_RESIZE;
3035         pool->raw_hw = NULL;
3036         rte_spinlock_init(&pool->sl);
3037         /*
3038          * The generation of the new allocated counters in this pool is 0, 2 in
3039          * the pool generation makes all the counters valid for allocation.
3040          */
3041         rte_atomic64_set(&pool->query_gen, 0x2);
3042         TAILQ_INIT(&pool->counters);
3043         TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3044         cont->pools[n_valid] = pool;
3045         /* Pool initialization must be updated before host thread access. */
3046         rte_cio_wmb();
3047         rte_atomic16_add(&cont->n_valid, 1);
3048         return pool;
3049 }
3050
3051 /**
3052  * Prepare a new counter and/or a new counter pool.
3053  *
3054  * @param[in] dev
3055  *   Pointer to the Ethernet device structure.
3056  * @param[out] cnt_free
3057  *   Where to put the pointer of a new counter.
3058  * @param[in] batch
3059  *   Whether the pool is for counter that was allocated by batch command.
3060  *
3061  * @return
3062  *   The free counter pool pointer and @p cnt_free is set on success,
3063  *   NULL otherwise and rte_errno is set.
3064  */
3065 static struct mlx5_flow_counter_pool *
3066 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
3067                              struct mlx5_flow_counter **cnt_free,
3068                              uint32_t batch)
3069 {
3070         struct mlx5_priv *priv = dev->data->dev_private;
3071         struct mlx5_flow_counter_pool *pool;
3072         struct mlx5_devx_obj *dcs = NULL;
3073         struct mlx5_flow_counter *cnt;
3074         uint32_t i;
3075
3076         if (!batch) {
3077                 /* bulk_bitmap must be 0 for single counter allocation. */
3078                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
3079                 if (!dcs)
3080                         return NULL;
3081                 pool = flow_dv_find_pool_by_id
3082                         (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
3083                 if (!pool) {
3084                         pool = flow_dv_pool_create(dev, dcs, batch);
3085                         if (!pool) {
3086                                 mlx5_devx_cmd_destroy(dcs);
3087                                 return NULL;
3088                         }
3089                 } else if (dcs->id < pool->min_dcs->id) {
3090                         rte_atomic64_set(&pool->a64_dcs,
3091                                          (int64_t)(uintptr_t)dcs);
3092                 }
3093                 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
3094                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3095                 cnt->dcs = dcs;
3096                 *cnt_free = cnt;
3097                 return pool;
3098         }
3099         /* bulk_bitmap is in 128 counters units. */
3100         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
3101                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
3102         if (!dcs) {
3103                 rte_errno = ENODATA;
3104                 return NULL;
3105         }
3106         pool = flow_dv_pool_create(dev, dcs, batch);
3107         if (!pool) {
3108                 mlx5_devx_cmd_destroy(dcs);
3109                 return NULL;
3110         }
3111         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3112                 cnt = &pool->counters_raw[i];
3113                 cnt->pool = pool;
3114                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3115         }
3116         *cnt_free = &pool->counters_raw[0];
3117         return pool;
3118 }
3119
3120 /**
3121  * Search for existed shared counter.
3122  *
3123  * @param[in] cont
3124  *   Pointer to the relevant counter pool container.
3125  * @param[in] id
3126  *   The shared counter ID to search.
3127  *
3128  * @return
3129  *   NULL if not existed, otherwise pointer to the shared counter.
3130  */
3131 static struct mlx5_flow_counter *
3132 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
3133                               uint32_t id)
3134 {
3135         static struct mlx5_flow_counter *cnt;
3136         struct mlx5_flow_counter_pool *pool;
3137         int i;
3138
3139         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3140                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3141                         cnt = &pool->counters_raw[i];
3142                         if (cnt->ref_cnt && cnt->shared && cnt->id == id)
3143                                 return cnt;
3144                 }
3145         }
3146         return NULL;
3147 }
3148
3149 /**
3150  * Allocate a flow counter.
3151  *
3152  * @param[in] dev
3153  *   Pointer to the Ethernet device structure.
3154  * @param[in] shared
3155  *   Indicate if this counter is shared with other flows.
3156  * @param[in] id
3157  *   Counter identifier.
3158  * @param[in] group
3159  *   Counter flow group.
3160  *
3161  * @return
3162  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
3163  */
3164 static struct mlx5_flow_counter *
3165 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
3166                       uint16_t group)
3167 {
3168         struct mlx5_priv *priv = dev->data->dev_private;
3169         struct mlx5_flow_counter_pool *pool = NULL;
3170         struct mlx5_flow_counter *cnt_free = NULL;
3171         /*
3172          * Currently group 0 flow counter cannot be assigned to a flow if it is
3173          * not the first one in the batch counter allocation, so it is better
3174          * to allocate counters one by one for these flows in a separate
3175          * container.
3176          * A counter can be shared between different groups so need to take
3177          * shared counters from the single container.
3178          */
3179         uint32_t batch = (group && !shared) ? 1 : 0;
3180         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3181                                                                0);
3182
3183         if (priv->counter_fallback)
3184                 return flow_dv_counter_alloc_fallback(dev, shared, id);
3185         if (!priv->config.devx) {
3186                 rte_errno = ENOTSUP;
3187                 return NULL;
3188         }
3189         if (shared) {
3190                 cnt_free = flow_dv_counter_shared_search(cont, id);
3191                 if (cnt_free) {
3192                         if (cnt_free->ref_cnt + 1 == 0) {
3193                                 rte_errno = E2BIG;
3194                                 return NULL;
3195                         }
3196                         cnt_free->ref_cnt++;
3197                         return cnt_free;
3198                 }
3199         }
3200         /* Pools which has a free counters are in the start. */
3201         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3202                 /*
3203                  * The free counter reset values must be updated between the
3204                  * counter release to the counter allocation, so, at least one
3205                  * query must be done in this time. ensure it by saving the
3206                  * query generation in the release time.
3207                  * The free list is sorted according to the generation - so if
3208                  * the first one is not updated, all the others are not
3209                  * updated too.
3210                  */
3211                 cnt_free = TAILQ_FIRST(&pool->counters);
3212                 if (cnt_free && cnt_free->query_gen + 1 <
3213                     rte_atomic64_read(&pool->query_gen))
3214                         break;
3215                 cnt_free = NULL;
3216         }
3217         if (!cnt_free) {
3218                 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
3219                 if (!pool)
3220                         return NULL;
3221         }
3222         cnt_free->batch = batch;
3223         /* Create a DV counter action only in the first time usage. */
3224         if (!cnt_free->action) {
3225                 uint16_t offset;
3226                 struct mlx5_devx_obj *dcs;
3227
3228                 if (batch) {
3229                         offset = cnt_free - &pool->counters_raw[0];
3230                         dcs = pool->min_dcs;
3231                 } else {
3232                         offset = 0;
3233                         dcs = cnt_free->dcs;
3234                 }
3235                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
3236                                         (dcs->obj, offset);
3237                 if (!cnt_free->action) {
3238                         rte_errno = errno;
3239                         return NULL;
3240                 }
3241         }
3242         /* Update the counter reset values. */
3243         if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
3244                                  &cnt_free->bytes))
3245                 return NULL;
3246         cnt_free->shared = shared;
3247         cnt_free->ref_cnt = 1;
3248         cnt_free->id = id;
3249         if (!priv->sh->cmng.query_thread_on)
3250                 /* Start the asynchronous batch query by the host thread. */
3251                 mlx5_set_query_alarm(priv->sh);
3252         TAILQ_REMOVE(&pool->counters, cnt_free, next);
3253         if (TAILQ_EMPTY(&pool->counters)) {
3254                 /* Move the pool to the end of the container pool list. */
3255                 TAILQ_REMOVE(&cont->pool_list, pool, next);
3256                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3257         }
3258         return cnt_free;
3259 }
3260
3261 /**
3262  * Release a flow counter.
3263  *
3264  * @param[in] dev
3265  *   Pointer to the Ethernet device structure.
3266  * @param[in] counter
3267  *   Pointer to the counter handler.
3268  */
3269 static void
3270 flow_dv_counter_release(struct rte_eth_dev *dev,
3271                         struct mlx5_flow_counter *counter)
3272 {
3273         struct mlx5_priv *priv = dev->data->dev_private;
3274
3275         if (!counter)
3276                 return;
3277         if (priv->counter_fallback) {
3278                 flow_dv_counter_release_fallback(dev, counter);
3279                 return;
3280         }
3281         if (--counter->ref_cnt == 0) {
3282                 struct mlx5_flow_counter_pool *pool =
3283                                 flow_dv_counter_pool_get(counter);
3284
3285                 /* Put the counter in the end - the last updated one. */
3286                 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
3287                 counter->query_gen = rte_atomic64_read(&pool->query_gen);
3288         }
3289 }
3290
3291 /**
3292  * Verify the @p attributes will be correctly understood by the NIC and store
3293  * them in the @p flow if everything is correct.
3294  *
3295  * @param[in] dev
3296  *   Pointer to dev struct.
3297  * @param[in] attributes
3298  *   Pointer to flow attributes
3299  * @param[in] external
3300  *   This flow rule is created by request external to PMD.
3301  * @param[out] error
3302  *   Pointer to error structure.
3303  *
3304  * @return
3305  *   0 on success, a negative errno value otherwise and rte_errno is set.
3306  */
3307 static int
3308 flow_dv_validate_attributes(struct rte_eth_dev *dev,
3309                             const struct rte_flow_attr *attributes,
3310                             bool external __rte_unused,
3311                             struct rte_flow_error *error)
3312 {
3313         struct mlx5_priv *priv = dev->data->dev_private;
3314         uint32_t priority_max = priv->config.flow_prio - 1;
3315
3316 #ifndef HAVE_MLX5DV_DR
3317         if (attributes->group)
3318                 return rte_flow_error_set(error, ENOTSUP,
3319                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3320                                           NULL,
3321                                           "groups are not supported");
3322 #else
3323         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
3324                                                     MLX5_MAX_TABLES;
3325         uint32_t table;
3326         int ret;
3327
3328         ret = mlx5_flow_group_to_table(attributes, external,
3329                                        attributes->group,
3330                                        &table, error);
3331         if (ret)
3332                 return ret;
3333         if (table >= max_group)
3334                 return rte_flow_error_set(error, EINVAL,
3335                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
3336                                           "group index out of range");
3337 #endif
3338         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
3339             attributes->priority >= priority_max)
3340                 return rte_flow_error_set(error, ENOTSUP,
3341                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
3342                                           NULL,
3343                                           "priority out of range");
3344         if (attributes->transfer) {
3345                 if (!priv->config.dv_esw_en)
3346                         return rte_flow_error_set
3347                                 (error, ENOTSUP,
3348                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3349                                  "E-Switch dr is not supported");
3350                 if (!(priv->representor || priv->master))
3351                         return rte_flow_error_set
3352                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3353                                  NULL, "E-Switch configuration can only be"
3354                                  " done by a master or a representor device");
3355                 if (attributes->egress)
3356                         return rte_flow_error_set
3357                                 (error, ENOTSUP,
3358                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
3359                                  "egress is not supported");
3360         }
3361         if (!(attributes->egress ^ attributes->ingress))
3362                 return rte_flow_error_set(error, ENOTSUP,
3363                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
3364                                           "must specify exactly one of "
3365                                           "ingress or egress");
3366         return 0;
3367 }
3368
3369 /**
3370  * Internal validation function. For validating both actions and items.
3371  *
3372  * @param[in] dev
3373  *   Pointer to the rte_eth_dev structure.
3374  * @param[in] attr
3375  *   Pointer to the flow attributes.
3376  * @param[in] items
3377  *   Pointer to the list of items.
3378  * @param[in] actions
3379  *   Pointer to the list of actions.
3380  * @param[in] external
3381  *   This flow rule is created by request external to PMD.
3382  * @param[out] error
3383  *   Pointer to the error structure.
3384  *
3385  * @return
3386  *   0 on success, a negative errno value otherwise and rte_errno is set.
3387  */
3388 static int
3389 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3390                  const struct rte_flow_item items[],
3391                  const struct rte_flow_action actions[],
3392                  bool external, struct rte_flow_error *error)
3393 {
3394         int ret;
3395         uint64_t action_flags = 0;
3396         uint64_t item_flags = 0;
3397         uint64_t last_item = 0;
3398         uint8_t next_protocol = 0xff;
3399         uint16_t ether_type = 0;
3400         int actions_n = 0;
3401         const struct rte_flow_item *gre_item = NULL;
3402         struct rte_flow_item_tcp nic_tcp_mask = {
3403                 .hdr = {
3404                         .tcp_flags = 0xFF,
3405                         .src_port = RTE_BE16(UINT16_MAX),
3406                         .dst_port = RTE_BE16(UINT16_MAX),
3407                 }
3408         };
3409
3410         if (items == NULL)
3411                 return -1;
3412         ret = flow_dv_validate_attributes(dev, attr, external, error);
3413         if (ret < 0)
3414                 return ret;
3415         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3416                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3417                 int type = items->type;
3418
3419                 switch (type) {
3420                 case RTE_FLOW_ITEM_TYPE_VOID:
3421                         break;
3422                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3423                         ret = flow_dv_validate_item_port_id
3424                                         (dev, items, attr, item_flags, error);
3425                         if (ret < 0)
3426                                 return ret;
3427                         last_item = MLX5_FLOW_ITEM_PORT_ID;
3428                         break;
3429                 case RTE_FLOW_ITEM_TYPE_ETH:
3430                         ret = mlx5_flow_validate_item_eth(items, item_flags,
3431                                                           error);
3432                         if (ret < 0)
3433                                 return ret;
3434                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3435                                              MLX5_FLOW_LAYER_OUTER_L2;
3436                         if (items->mask != NULL && items->spec != NULL) {
3437                                 ether_type =
3438                                         ((const struct rte_flow_item_eth *)
3439                                          items->spec)->type;
3440                                 ether_type &=
3441                                         ((const struct rte_flow_item_eth *)
3442                                          items->mask)->type;
3443                                 ether_type = rte_be_to_cpu_16(ether_type);
3444                         } else {
3445                                 ether_type = 0;
3446                         }
3447                         break;
3448                 case RTE_FLOW_ITEM_TYPE_VLAN:
3449                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
3450                                                            dev, error);
3451                         if (ret < 0)
3452                                 return ret;
3453                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
3454                                              MLX5_FLOW_LAYER_OUTER_VLAN;
3455                         if (items->mask != NULL && items->spec != NULL) {
3456                                 ether_type =
3457                                         ((const struct rte_flow_item_vlan *)
3458                                          items->spec)->inner_type;
3459                                 ether_type &=
3460                                         ((const struct rte_flow_item_vlan *)
3461                                          items->mask)->inner_type;
3462                                 ether_type = rte_be_to_cpu_16(ether_type);
3463                         } else {
3464                                 ether_type = 0;
3465                         }
3466                         break;
3467                 case RTE_FLOW_ITEM_TYPE_IPV4:
3468                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3469                                                   &item_flags, &tunnel);
3470                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
3471                                                            last_item,
3472                                                            ether_type, NULL,
3473                                                            error);
3474                         if (ret < 0)
3475                                 return ret;
3476                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3477                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3478                         if (items->mask != NULL &&
3479                             ((const struct rte_flow_item_ipv4 *)
3480                              items->mask)->hdr.next_proto_id) {
3481                                 next_protocol =
3482                                         ((const struct rte_flow_item_ipv4 *)
3483                                          (items->spec))->hdr.next_proto_id;
3484                                 next_protocol &=
3485                                         ((const struct rte_flow_item_ipv4 *)
3486                                          (items->mask))->hdr.next_proto_id;
3487                         } else {
3488                                 /* Reset for inner layer. */
3489                                 next_protocol = 0xff;
3490                         }
3491                         break;
3492                 case RTE_FLOW_ITEM_TYPE_IPV6:
3493                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3494                                                   &item_flags, &tunnel);
3495                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
3496                                                            last_item,
3497                                                            ether_type, NULL,
3498                                                            error);
3499                         if (ret < 0)
3500                                 return ret;
3501                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3502                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3503                         if (items->mask != NULL &&
3504                             ((const struct rte_flow_item_ipv6 *)
3505                              items->mask)->hdr.proto) {
3506                                 next_protocol =
3507                                         ((const struct rte_flow_item_ipv6 *)
3508                                          items->spec)->hdr.proto;
3509                                 next_protocol &=
3510                                         ((const struct rte_flow_item_ipv6 *)
3511                                          items->mask)->hdr.proto;
3512                         } else {
3513                                 /* Reset for inner layer. */
3514                                 next_protocol = 0xff;
3515                         }
3516                         break;
3517                 case RTE_FLOW_ITEM_TYPE_TCP:
3518                         ret = mlx5_flow_validate_item_tcp
3519                                                 (items, item_flags,
3520                                                  next_protocol,
3521                                                  &nic_tcp_mask,
3522                                                  error);
3523                         if (ret < 0)
3524                                 return ret;
3525                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3526                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3527                         break;
3528                 case RTE_FLOW_ITEM_TYPE_UDP:
3529                         ret = mlx5_flow_validate_item_udp(items, item_flags,
3530                                                           next_protocol,
3531                                                           error);
3532                         if (ret < 0)
3533                                 return ret;
3534                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3535                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3536                         break;
3537                 case RTE_FLOW_ITEM_TYPE_GRE:
3538                         ret = mlx5_flow_validate_item_gre(items, item_flags,
3539                                                           next_protocol, error);
3540                         if (ret < 0)
3541                                 return ret;
3542                         gre_item = items;
3543                         last_item = MLX5_FLOW_LAYER_GRE;
3544                         break;
3545                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3546                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
3547                                                             next_protocol,
3548                                                             error);
3549                         if (ret < 0)
3550                                 return ret;
3551                         last_item = MLX5_FLOW_LAYER_NVGRE;
3552                         break;
3553                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
3554                         ret = mlx5_flow_validate_item_gre_key
3555                                 (items, item_flags, gre_item, error);
3556                         if (ret < 0)
3557                                 return ret;
3558                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
3559                         break;
3560                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3561                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
3562                                                             error);
3563                         if (ret < 0)
3564                                 return ret;
3565                         last_item = MLX5_FLOW_LAYER_VXLAN;
3566                         break;
3567                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3568                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
3569                                                                 item_flags, dev,
3570                                                                 error);
3571                         if (ret < 0)
3572                                 return ret;
3573                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3574                         break;
3575                 case RTE_FLOW_ITEM_TYPE_GENEVE:
3576                         ret = mlx5_flow_validate_item_geneve(items,
3577                                                              item_flags, dev,
3578                                                              error);
3579                         if (ret < 0)
3580                                 return ret;
3581                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3582                         break;
3583                 case RTE_FLOW_ITEM_TYPE_MPLS:
3584                         ret = mlx5_flow_validate_item_mpls(dev, items,
3585                                                            item_flags,
3586                                                            last_item, error);
3587                         if (ret < 0)
3588                                 return ret;
3589                         last_item = MLX5_FLOW_LAYER_MPLS;
3590                         break;
3591                 case RTE_FLOW_ITEM_TYPE_META:
3592                         ret = flow_dv_validate_item_meta(dev, items, attr,
3593                                                          error);
3594                         if (ret < 0)
3595                                 return ret;
3596                         last_item = MLX5_FLOW_ITEM_METADATA;
3597                         break;
3598                 case RTE_FLOW_ITEM_TYPE_ICMP:
3599                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
3600                                                            next_protocol,
3601                                                            error);
3602                         if (ret < 0)
3603                                 return ret;
3604                         last_item = MLX5_FLOW_LAYER_ICMP;
3605                         break;
3606                 case RTE_FLOW_ITEM_TYPE_ICMP6:
3607                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3608                                                             next_protocol,
3609                                                             error);
3610                         if (ret < 0)
3611                                 return ret;
3612                         last_item = MLX5_FLOW_LAYER_ICMP6;
3613                         break;
3614                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
3615                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
3616                         break;
3617                 default:
3618                         return rte_flow_error_set(error, ENOTSUP,
3619                                                   RTE_FLOW_ERROR_TYPE_ITEM,
3620                                                   NULL, "item not supported");
3621                 }
3622                 item_flags |= last_item;
3623         }
3624         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3625                 int type = actions->type;
3626                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3627                         return rte_flow_error_set(error, ENOTSUP,
3628                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3629                                                   actions, "too many actions");
3630                 switch (type) {
3631                 case RTE_FLOW_ACTION_TYPE_VOID:
3632                         break;
3633                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3634                         ret = flow_dv_validate_action_port_id(dev,
3635                                                               action_flags,
3636                                                               actions,
3637                                                               attr,
3638                                                               error);
3639                         if (ret)
3640                                 return ret;
3641                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3642                         ++actions_n;
3643                         break;
3644                 case RTE_FLOW_ACTION_TYPE_FLAG:
3645                         ret = mlx5_flow_validate_action_flag(action_flags,
3646                                                              attr, error);
3647                         if (ret < 0)
3648                                 return ret;
3649                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3650                         ++actions_n;
3651                         break;
3652                 case RTE_FLOW_ACTION_TYPE_MARK:
3653                         ret = mlx5_flow_validate_action_mark(actions,
3654                                                              action_flags,
3655                                                              attr, error);
3656                         if (ret < 0)
3657                                 return ret;
3658                         action_flags |= MLX5_FLOW_ACTION_MARK;
3659                         ++actions_n;
3660                         break;
3661                 case RTE_FLOW_ACTION_TYPE_DROP:
3662                         ret = mlx5_flow_validate_action_drop(action_flags,
3663                                                              attr, error);
3664                         if (ret < 0)
3665                                 return ret;
3666                         action_flags |= MLX5_FLOW_ACTION_DROP;
3667                         ++actions_n;
3668                         break;
3669                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3670                         ret = mlx5_flow_validate_action_queue(actions,
3671                                                               action_flags, dev,
3672                                                               attr, error);
3673                         if (ret < 0)
3674                                 return ret;
3675                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3676                         ++actions_n;
3677                         break;
3678                 case RTE_FLOW_ACTION_TYPE_RSS:
3679                         ret = mlx5_flow_validate_action_rss(actions,
3680                                                             action_flags, dev,
3681                                                             attr, item_flags,
3682                                                             error);
3683                         if (ret < 0)
3684                                 return ret;
3685                         action_flags |= MLX5_FLOW_ACTION_RSS;
3686                         ++actions_n;
3687                         break;
3688                 case RTE_FLOW_ACTION_TYPE_COUNT:
3689                         ret = flow_dv_validate_action_count(dev, error);
3690                         if (ret < 0)
3691                                 return ret;
3692                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3693                         ++actions_n;
3694                         break;
3695                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3696                         if (flow_dv_validate_action_pop_vlan(dev,
3697                                                              action_flags,
3698                                                              actions,
3699                                                              item_flags, attr,
3700                                                              error))
3701                                 return -rte_errno;
3702                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
3703                         ++actions_n;
3704                         break;
3705                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3706                         ret = flow_dv_validate_action_push_vlan(action_flags,
3707                                                                 item_flags,
3708                                                                 actions, attr,
3709                                                                 error);
3710                         if (ret < 0)
3711                                 return ret;
3712                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
3713                         ++actions_n;
3714                         break;
3715                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3716                         ret = flow_dv_validate_action_set_vlan_pcp
3717                                                 (action_flags, actions, error);
3718                         if (ret < 0)
3719                                 return ret;
3720                         /* Count PCP with push_vlan command. */
3721                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
3722                         break;
3723                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3724                         ret = flow_dv_validate_action_set_vlan_vid
3725                                                 (item_flags, action_flags,
3726                                                  actions, error);
3727                         if (ret < 0)
3728                                 return ret;
3729                         /* Count VID with push_vlan command. */
3730                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
3731                         break;
3732                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3733                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3734                         ret = flow_dv_validate_action_l2_encap(action_flags,
3735                                                                actions, attr,
3736                                                                error);
3737                         if (ret < 0)
3738                                 return ret;
3739                         action_flags |= actions->type ==
3740                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3741                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3742                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3743                         ++actions_n;
3744                         break;
3745                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3746                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3747                         ret = flow_dv_validate_action_l2_decap(action_flags,
3748                                                                attr, error);
3749                         if (ret < 0)
3750                                 return ret;
3751                         action_flags |= actions->type ==
3752                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3753                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3754                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3755                         ++actions_n;
3756                         break;
3757                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3758                         ret = flow_dv_validate_action_raw_encap(action_flags,
3759                                                                 actions, attr,
3760                                                                 error);
3761                         if (ret < 0)
3762                                 return ret;
3763                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3764                         ++actions_n;
3765                         break;
3766                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3767                         ret = flow_dv_validate_action_raw_decap(action_flags,
3768                                                                 actions, attr,
3769                                                                 error);
3770                         if (ret < 0)
3771                                 return ret;
3772                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3773                         ++actions_n;
3774                         break;
3775                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3776                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3777                         ret = flow_dv_validate_action_modify_mac(action_flags,
3778                                                                  actions,
3779                                                                  item_flags,
3780                                                                  error);
3781                         if (ret < 0)
3782                                 return ret;
3783                         /* Count all modify-header actions as one action. */
3784                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3785                                 ++actions_n;
3786                         action_flags |= actions->type ==
3787                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3788                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
3789                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
3790                         break;
3791
3792                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3793                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3794                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
3795                                                                   actions,
3796                                                                   item_flags,
3797                                                                   error);
3798                         if (ret < 0)
3799                                 return ret;
3800                         /* Count all modify-header actions as one action. */
3801                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3802                                 ++actions_n;
3803                         action_flags |= actions->type ==
3804                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3805                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3806                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
3807                         break;
3808                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3809                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3810                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
3811                                                                   actions,
3812                                                                   item_flags,
3813                                                                   error);
3814                         if (ret < 0)
3815                                 return ret;
3816                         /* Count all modify-header actions as one action. */
3817                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3818                                 ++actions_n;
3819                         action_flags |= actions->type ==
3820                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3821                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3822                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
3823                         break;
3824                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3825                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3826                         ret = flow_dv_validate_action_modify_tp(action_flags,
3827                                                                 actions,
3828                                                                 item_flags,
3829                                                                 error);
3830                         if (ret < 0)
3831                                 return ret;
3832                         /* Count all modify-header actions as one action. */
3833                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3834                                 ++actions_n;
3835                         action_flags |= actions->type ==
3836                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3837                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
3838                                                 MLX5_FLOW_ACTION_SET_TP_DST;
3839                         break;
3840                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3841                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3842                         ret = flow_dv_validate_action_modify_ttl(action_flags,
3843                                                                  actions,
3844                                                                  item_flags,
3845                                                                  error);
3846                         if (ret < 0)
3847                                 return ret;
3848                         /* Count all modify-header actions as one action. */
3849                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3850                                 ++actions_n;
3851                         action_flags |= actions->type ==
3852                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
3853                                                 MLX5_FLOW_ACTION_SET_TTL :
3854                                                 MLX5_FLOW_ACTION_DEC_TTL;
3855                         break;
3856                 case RTE_FLOW_ACTION_TYPE_JUMP:
3857                         ret = flow_dv_validate_action_jump(actions,
3858                                                            action_flags,
3859                                                            attr, external,
3860                                                            error);
3861                         if (ret)
3862                                 return ret;
3863                         ++actions_n;
3864                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3865                         break;
3866                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3867                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3868                         ret = flow_dv_validate_action_modify_tcp_seq
3869                                                                 (action_flags,
3870                                                                  actions,
3871                                                                  item_flags,
3872                                                                  error);
3873                         if (ret < 0)
3874                                 return ret;
3875                         /* Count all modify-header actions as one action. */
3876                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3877                                 ++actions_n;
3878                         action_flags |= actions->type ==
3879                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
3880                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
3881                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
3882                         break;
3883                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
3884                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
3885                         ret = flow_dv_validate_action_modify_tcp_ack
3886                                                                 (action_flags,
3887                                                                  actions,
3888                                                                  item_flags,
3889                                                                  error);
3890                         if (ret < 0)
3891                                 return ret;
3892                         /* Count all modify-header actions as one action. */
3893                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3894                                 ++actions_n;
3895                         action_flags |= actions->type ==
3896                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
3897                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
3898                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
3899                         break;
3900                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
3901                         break;
3902                 default:
3903                         return rte_flow_error_set(error, ENOTSUP,
3904                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3905                                                   actions,
3906                                                   "action not supported");
3907                 }
3908         }
3909         if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
3910             (action_flags & MLX5_FLOW_VLAN_ACTIONS))
3911                 return rte_flow_error_set(error, ENOTSUP,
3912                                           RTE_FLOW_ERROR_TYPE_ACTION,
3913                                           actions,
3914                                           "can't have vxlan and vlan"
3915                                           " actions in the same rule");
3916         /* Eswitch has few restrictions on using items and actions */
3917         if (attr->transfer) {
3918                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3919                         return rte_flow_error_set(error, ENOTSUP,
3920                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3921                                                   NULL,
3922                                                   "unsupported action FLAG");
3923                 if (action_flags & MLX5_FLOW_ACTION_MARK)
3924                         return rte_flow_error_set(error, ENOTSUP,
3925                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3926                                                   NULL,
3927                                                   "unsupported action MARK");
3928                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
3929                         return rte_flow_error_set(error, ENOTSUP,
3930                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3931                                                   NULL,
3932                                                   "unsupported action QUEUE");
3933                 if (action_flags & MLX5_FLOW_ACTION_RSS)
3934                         return rte_flow_error_set(error, ENOTSUP,
3935                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3936                                                   NULL,
3937                                                   "unsupported action RSS");
3938                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3939                         return rte_flow_error_set(error, EINVAL,
3940                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3941                                                   actions,
3942                                                   "no fate action is found");
3943         } else {
3944                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
3945                         return rte_flow_error_set(error, EINVAL,
3946                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3947                                                   actions,
3948                                                   "no fate action is found");
3949         }
3950         return 0;
3951 }
3952
3953 /**
3954  * Internal preparation function. Allocates the DV flow size,
3955  * this size is constant.
3956  *
3957  * @param[in] attr
3958  *   Pointer to the flow attributes.
3959  * @param[in] items
3960  *   Pointer to the list of items.
3961  * @param[in] actions
3962  *   Pointer to the list of actions.
3963  * @param[out] error
3964  *   Pointer to the error structure.
3965  *
3966  * @return
3967  *   Pointer to mlx5_flow object on success,
3968  *   otherwise NULL and rte_errno is set.
3969  */
3970 static struct mlx5_flow *
3971 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
3972                 const struct rte_flow_item items[] __rte_unused,
3973                 const struct rte_flow_action actions[] __rte_unused,
3974                 struct rte_flow_error *error)
3975 {
3976         uint32_t size = sizeof(struct mlx5_flow);
3977         struct mlx5_flow *flow;
3978
3979         flow = rte_calloc(__func__, 1, size, 0);
3980         if (!flow) {
3981                 rte_flow_error_set(error, ENOMEM,
3982                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3983                                    "not enough memory to create flow");
3984                 return NULL;
3985         }
3986         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
3987         return flow;
3988 }
3989
3990 #ifndef NDEBUG
3991 /**
3992  * Sanity check for match mask and value. Similar to check_valid_spec() in
3993  * kernel driver. If unmasked bit is present in value, it returns failure.
3994  *
3995  * @param match_mask
3996  *   pointer to match mask buffer.
3997  * @param match_value
3998  *   pointer to match value buffer.
3999  *
4000  * @return
4001  *   0 if valid, -EINVAL otherwise.
4002  */
4003 static int
4004 flow_dv_check_valid_spec(void *match_mask, void *match_value)
4005 {
4006         uint8_t *m = match_mask;
4007         uint8_t *v = match_value;
4008         unsigned int i;
4009
4010         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
4011                 if (v[i] & ~m[i]) {
4012                         DRV_LOG(ERR,
4013                                 "match_value differs from match_criteria"
4014                                 " %p[%u] != %p[%u]",
4015                                 match_value, i, match_mask, i);
4016                         return -EINVAL;
4017                 }
4018         }
4019         return 0;
4020 }
4021 #endif
4022
4023 /**
4024  * Add Ethernet item to matcher and to the value.
4025  *
4026  * @param[in, out] matcher
4027  *   Flow matcher.
4028  * @param[in, out] key
4029  *   Flow matcher value.
4030  * @param[in] item
4031  *   Flow pattern to translate.
4032  * @param[in] inner
4033  *   Item is inner pattern.
4034  */
4035 static void
4036 flow_dv_translate_item_eth(void *matcher, void *key,
4037                            const struct rte_flow_item *item, int inner)
4038 {
4039         const struct rte_flow_item_eth *eth_m = item->mask;
4040         const struct rte_flow_item_eth *eth_v = item->spec;
4041         const struct rte_flow_item_eth nic_mask = {
4042                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4043                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4044                 .type = RTE_BE16(0xffff),
4045         };
4046         void *headers_m;
4047         void *headers_v;
4048         char *l24_v;
4049         unsigned int i;
4050
4051         if (!eth_v)
4052                 return;
4053         if (!eth_m)
4054                 eth_m = &nic_mask;
4055         if (inner) {
4056                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4057                                          inner_headers);
4058                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4059         } else {
4060                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4061                                          outer_headers);
4062                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4063         }
4064         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
4065                &eth_m->dst, sizeof(eth_m->dst));
4066         /* The value must be in the range of the mask. */
4067         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
4068         for (i = 0; i < sizeof(eth_m->dst); ++i)
4069                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
4070         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
4071                &eth_m->src, sizeof(eth_m->src));
4072         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
4073         /* The value must be in the range of the mask. */
4074         for (i = 0; i < sizeof(eth_m->dst); ++i)
4075                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
4076         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4077                  rte_be_to_cpu_16(eth_m->type));
4078         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
4079         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
4080 }
4081
4082 /**
4083  * Add VLAN item to matcher and to the value.
4084  *
4085  * @param[in, out] dev_flow
4086  *   Flow descriptor.
4087  * @param[in, out] matcher
4088  *   Flow matcher.
4089  * @param[in, out] key
4090  *   Flow matcher value.
4091  * @param[in] item
4092  *   Flow pattern to translate.
4093  * @param[in] inner
4094  *   Item is inner pattern.
4095  */
4096 static void
4097 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
4098                             void *matcher, void *key,
4099                             const struct rte_flow_item *item,
4100                             int inner)
4101 {
4102         const struct rte_flow_item_vlan *vlan_m = item->mask;
4103         const struct rte_flow_item_vlan *vlan_v = item->spec;
4104         void *headers_m;
4105         void *headers_v;
4106         uint16_t tci_m;
4107         uint16_t tci_v;
4108
4109         if (!vlan_v)
4110                 return;
4111         if (!vlan_m)
4112                 vlan_m = &rte_flow_item_vlan_mask;
4113         if (inner) {
4114                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4115                                          inner_headers);
4116                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4117         } else {
4118                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4119                                          outer_headers);
4120                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4121                 /*
4122                  * This is workaround, masks are not supported,
4123                  * and pre-validated.
4124                  */
4125                 dev_flow->dv.vf_vlan.tag =
4126                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
4127         }
4128         tci_m = rte_be_to_cpu_16(vlan_m->tci);
4129         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
4130         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
4131         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
4132         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
4133         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
4134         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
4135         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
4136         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
4137         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
4138         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4139                  rte_be_to_cpu_16(vlan_m->inner_type));
4140         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
4141                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
4142 }
4143
4144 /**
4145  * Add IPV4 item to matcher and to the value.
4146  *
4147  * @param[in, out] matcher
4148  *   Flow matcher.
4149  * @param[in, out] key
4150  *   Flow matcher value.
4151  * @param[in] item
4152  *   Flow pattern to translate.
4153  * @param[in] inner
4154  *   Item is inner pattern.
4155  * @param[in] group
4156  *   The group to insert the rule.
4157  */
4158 static void
4159 flow_dv_translate_item_ipv4(void *matcher, void *key,
4160                             const struct rte_flow_item *item,
4161                             int inner, uint32_t group)
4162 {
4163         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
4164         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
4165         const struct rte_flow_item_ipv4 nic_mask = {
4166                 .hdr = {
4167                         .src_addr = RTE_BE32(0xffffffff),
4168                         .dst_addr = RTE_BE32(0xffffffff),
4169                         .type_of_service = 0xff,
4170                         .next_proto_id = 0xff,
4171                 },
4172         };
4173         void *headers_m;
4174         void *headers_v;
4175         char *l24_m;
4176         char *l24_v;
4177         uint8_t tos;
4178
4179         if (inner) {
4180                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4181                                          inner_headers);
4182                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4183         } else {
4184                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4185                                          outer_headers);
4186                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4187         }
4188         if (group == 0)
4189                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4190         else
4191                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
4192         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
4193         if (!ipv4_v)
4194                 return;
4195         if (!ipv4_m)
4196                 ipv4_m = &nic_mask;
4197         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4198                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4199         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4200                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4201         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
4202         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
4203         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4204                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4205         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4206                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4207         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
4208         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
4209         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
4210         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
4211                  ipv4_m->hdr.type_of_service);
4212         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
4213         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
4214                  ipv4_m->hdr.type_of_service >> 2);
4215         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
4216         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4217                  ipv4_m->hdr.next_proto_id);
4218         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4219                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
4220 }
4221
4222 /**
4223  * Add IPV6 item to matcher and to the value.
4224  *
4225  * @param[in, out] matcher
4226  *   Flow matcher.
4227  * @param[in, out] key
4228  *   Flow matcher value.
4229  * @param[in] item
4230  *   Flow pattern to translate.
4231  * @param[in] inner
4232  *   Item is inner pattern.
4233  * @param[in] group
4234  *   The group to insert the rule.
4235  */
4236 static void
4237 flow_dv_translate_item_ipv6(void *matcher, void *key,
4238                             const struct rte_flow_item *item,
4239                             int inner, uint32_t group)
4240 {
4241         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
4242         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
4243         const struct rte_flow_item_ipv6 nic_mask = {
4244                 .hdr = {
4245                         .src_addr =
4246                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4247                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4248                         .dst_addr =
4249                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4250                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4251                         .vtc_flow = RTE_BE32(0xffffffff),
4252                         .proto = 0xff,
4253                         .hop_limits = 0xff,
4254                 },
4255         };
4256         void *headers_m;
4257         void *headers_v;
4258         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4259         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4260         char *l24_m;
4261         char *l24_v;
4262         uint32_t vtc_m;
4263         uint32_t vtc_v;
4264         int i;
4265         int size;
4266
4267         if (inner) {
4268                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4269                                          inner_headers);
4270                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4271         } else {
4272                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4273                                          outer_headers);
4274                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4275         }
4276         if (group == 0)
4277                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4278         else
4279                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
4280         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
4281         if (!ipv6_v)
4282                 return;
4283         if (!ipv6_m)
4284                 ipv6_m = &nic_mask;
4285         size = sizeof(ipv6_m->hdr.dst_addr);
4286         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4287                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4288         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4289                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4290         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
4291         for (i = 0; i < size; ++i)
4292                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
4293         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4294                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4295         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4296                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4297         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
4298         for (i = 0; i < size; ++i)
4299                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
4300         /* TOS. */
4301         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
4302         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
4303         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
4304         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
4305         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
4306         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
4307         /* Label. */
4308         if (inner) {
4309                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
4310                          vtc_m);
4311                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
4312                          vtc_v);
4313         } else {
4314                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
4315                          vtc_m);
4316                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
4317                          vtc_v);
4318         }
4319         /* Protocol. */
4320         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4321                  ipv6_m->hdr.proto);
4322         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4323                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
4324 }
4325
4326 /**
4327  * Add TCP item to matcher and to the value.
4328  *
4329  * @param[in, out] matcher
4330  *   Flow matcher.
4331  * @param[in, out] key
4332  *   Flow matcher value.
4333  * @param[in] item
4334  *   Flow pattern to translate.
4335  * @param[in] inner
4336  *   Item is inner pattern.
4337  */
4338 static void
4339 flow_dv_translate_item_tcp(void *matcher, void *key,
4340                            const struct rte_flow_item *item,
4341                            int inner)
4342 {
4343         const struct rte_flow_item_tcp *tcp_m = item->mask;
4344         const struct rte_flow_item_tcp *tcp_v = item->spec;
4345         void *headers_m;
4346         void *headers_v;
4347
4348         if (inner) {
4349                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4350                                          inner_headers);
4351                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4352         } else {
4353                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4354                                          outer_headers);
4355                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4356         }
4357         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4358         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
4359         if (!tcp_v)
4360                 return;
4361         if (!tcp_m)
4362                 tcp_m = &rte_flow_item_tcp_mask;
4363         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
4364                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
4365         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
4366                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
4367         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
4368                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
4369         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
4370                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
4371         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
4372                  tcp_m->hdr.tcp_flags);
4373         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
4374                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
4375 }
4376
4377 /**
4378  * Add UDP item to matcher and to the value.
4379  *
4380  * @param[in, out] matcher
4381  *   Flow matcher.
4382  * @param[in, out] key
4383  *   Flow matcher value.
4384  * @param[in] item
4385  *   Flow pattern to translate.
4386  * @param[in] inner
4387  *   Item is inner pattern.
4388  */
4389 static void
4390 flow_dv_translate_item_udp(void *matcher, void *key,
4391                            const struct rte_flow_item *item,
4392                            int inner)
4393 {
4394         const struct rte_flow_item_udp *udp_m = item->mask;
4395         const struct rte_flow_item_udp *udp_v = item->spec;
4396         void *headers_m;
4397         void *headers_v;
4398
4399         if (inner) {
4400                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4401                                          inner_headers);
4402                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4403         } else {
4404                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4405                                          outer_headers);
4406                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4407         }
4408         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4409         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
4410         if (!udp_v)
4411                 return;
4412         if (!udp_m)
4413                 udp_m = &rte_flow_item_udp_mask;
4414         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
4415                  rte_be_to_cpu_16(udp_m->hdr.src_port));
4416         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
4417                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
4418         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
4419                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
4420         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4421                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
4422 }
4423
4424 /**
4425  * Add GRE optional Key item to matcher and to the value.
4426  *
4427  * @param[in, out] matcher
4428  *   Flow matcher.
4429  * @param[in, out] key
4430  *   Flow matcher value.
4431  * @param[in] item
4432  *   Flow pattern to translate.
4433  * @param[in] inner
4434  *   Item is inner pattern.
4435  */
4436 static void
4437 flow_dv_translate_item_gre_key(void *matcher, void *key,
4438                                    const struct rte_flow_item *item)
4439 {
4440         const rte_be32_t *key_m = item->mask;
4441         const rte_be32_t *key_v = item->spec;
4442         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4443         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4444         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
4445
4446         if (!key_v)
4447                 return;
4448         if (!key_m)
4449                 key_m = &gre_key_default_mask;
4450         /* GRE K bit must be on and should already be validated */
4451         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
4452         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
4453         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
4454                  rte_be_to_cpu_32(*key_m) >> 8);
4455         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
4456                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
4457         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
4458                  rte_be_to_cpu_32(*key_m) & 0xFF);
4459         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
4460                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
4461 }
4462
4463 /**
4464  * Add GRE item to matcher and to the value.
4465  *
4466  * @param[in, out] matcher
4467  *   Flow matcher.
4468  * @param[in, out] key
4469  *   Flow matcher value.
4470  * @param[in] item
4471  *   Flow pattern to translate.
4472  * @param[in] inner
4473  *   Item is inner pattern.
4474  */
4475 static void
4476 flow_dv_translate_item_gre(void *matcher, void *key,
4477                            const struct rte_flow_item *item,
4478                            int inner)
4479 {
4480         const struct rte_flow_item_gre *gre_m = item->mask;
4481         const struct rte_flow_item_gre *gre_v = item->spec;
4482         void *headers_m;
4483         void *headers_v;
4484         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4485         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4486         struct {
4487                 union {
4488                         __extension__
4489                         struct {
4490                                 uint16_t version:3;
4491                                 uint16_t rsvd0:9;
4492                                 uint16_t s_present:1;
4493                                 uint16_t k_present:1;
4494                                 uint16_t rsvd_bit1:1;
4495                                 uint16_t c_present:1;
4496                         };
4497                         uint16_t value;
4498                 };
4499         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
4500
4501         if (inner) {
4502                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4503                                          inner_headers);
4504                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4505         } else {
4506                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4507                                          outer_headers);
4508                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4509         }
4510         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4511         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
4512         if (!gre_v)
4513                 return;
4514         if (!gre_m)
4515                 gre_m = &rte_flow_item_gre_mask;
4516         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
4517                  rte_be_to_cpu_16(gre_m->protocol));
4518         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4519                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
4520         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
4521         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
4522         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
4523                  gre_crks_rsvd0_ver_m.c_present);
4524         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
4525                  gre_crks_rsvd0_ver_v.c_present &
4526                  gre_crks_rsvd0_ver_m.c_present);
4527         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
4528                  gre_crks_rsvd0_ver_m.k_present);
4529         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
4530                  gre_crks_rsvd0_ver_v.k_present &
4531                  gre_crks_rsvd0_ver_m.k_present);
4532         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
4533                  gre_crks_rsvd0_ver_m.s_present);
4534         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
4535                  gre_crks_rsvd0_ver_v.s_present &
4536                  gre_crks_rsvd0_ver_m.s_present);
4537 }
4538
4539 /**
4540  * Add NVGRE item to matcher and to the value.
4541  *
4542  * @param[in, out] matcher
4543  *   Flow matcher.
4544  * @param[in, out] key
4545  *   Flow matcher value.
4546  * @param[in] item
4547  *   Flow pattern to translate.
4548  * @param[in] inner
4549  *   Item is inner pattern.
4550  */
4551 static void
4552 flow_dv_translate_item_nvgre(void *matcher, void *key,
4553                              const struct rte_flow_item *item,
4554                              int inner)
4555 {
4556         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
4557         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
4558         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4559         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4560         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
4561         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
4562         char *gre_key_m;
4563         char *gre_key_v;
4564         int size;
4565         int i;
4566
4567         /* For NVGRE, GRE header fields must be set with defined values. */
4568         const struct rte_flow_item_gre gre_spec = {
4569                 .c_rsvd0_ver = RTE_BE16(0x2000),
4570                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
4571         };
4572         const struct rte_flow_item_gre gre_mask = {
4573                 .c_rsvd0_ver = RTE_BE16(0xB000),
4574                 .protocol = RTE_BE16(UINT16_MAX),
4575         };
4576         const struct rte_flow_item gre_item = {
4577                 .spec = &gre_spec,
4578                 .mask = &gre_mask,
4579                 .last = NULL,
4580         };
4581         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
4582         if (!nvgre_v)
4583                 return;
4584         if (!nvgre_m)
4585                 nvgre_m = &rte_flow_item_nvgre_mask;
4586         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
4587         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
4588         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
4589         memcpy(gre_key_m, tni_flow_id_m, size);
4590         for (i = 0; i < size; ++i)
4591                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
4592 }
4593
4594 /**
4595  * Add VXLAN item to matcher and to the value.
4596  *
4597  * @param[in, out] matcher
4598  *   Flow matcher.
4599  * @param[in, out] key
4600  *   Flow matcher value.
4601  * @param[in] item
4602  *   Flow pattern to translate.
4603  * @param[in] inner
4604  *   Item is inner pattern.
4605  */
4606 static void
4607 flow_dv_translate_item_vxlan(void *matcher, void *key,
4608                              const struct rte_flow_item *item,
4609                              int inner)
4610 {
4611         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
4612         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
4613         void *headers_m;
4614         void *headers_v;
4615         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4616         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4617         char *vni_m;
4618         char *vni_v;
4619         uint16_t dport;
4620         int size;
4621         int i;
4622
4623         if (inner) {
4624                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4625                                          inner_headers);
4626                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4627         } else {
4628                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4629                                          outer_headers);
4630                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4631         }
4632         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
4633                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
4634         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4635                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4636                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4637         }
4638         if (!vxlan_v)
4639                 return;
4640         if (!vxlan_m)
4641                 vxlan_m = &rte_flow_item_vxlan_mask;
4642         size = sizeof(vxlan_m->vni);
4643         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
4644         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
4645         memcpy(vni_m, vxlan_m->vni, size);
4646         for (i = 0; i < size; ++i)
4647                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
4648 }
4649
4650 /**
4651  * Add Geneve item to matcher and to the value.
4652  *
4653  * @param[in, out] matcher
4654  *   Flow matcher.
4655  * @param[in, out] key
4656  *   Flow matcher value.
4657  * @param[in] item
4658  *   Flow pattern to translate.
4659  * @param[in] inner
4660  *   Item is inner pattern.
4661  */
4662
4663 static void
4664 flow_dv_translate_item_geneve(void *matcher, void *key,
4665                               const struct rte_flow_item *item, int inner)
4666 {
4667         const struct rte_flow_item_geneve *geneve_m = item->mask;
4668         const struct rte_flow_item_geneve *geneve_v = item->spec;
4669         void *headers_m;
4670         void *headers_v;
4671         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4672         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4673         uint16_t dport;
4674         uint16_t gbhdr_m;
4675         uint16_t gbhdr_v;
4676         char *vni_m;
4677         char *vni_v;
4678         size_t size, i;
4679
4680         if (inner) {
4681                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4682                                          inner_headers);
4683                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4684         } else {
4685                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4686                                          outer_headers);
4687                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4688         }
4689         dport = MLX5_UDP_PORT_GENEVE;
4690         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4691                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4692                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4693         }
4694         if (!geneve_v)
4695                 return;
4696         if (!geneve_m)
4697                 geneve_m = &rte_flow_item_geneve_mask;
4698         size = sizeof(geneve_m->vni);
4699         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
4700         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
4701         memcpy(vni_m, geneve_m->vni, size);
4702         for (i = 0; i < size; ++i)
4703                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
4704         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
4705                  rte_be_to_cpu_16(geneve_m->protocol));
4706         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
4707                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
4708         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
4709         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
4710         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
4711                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4712         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
4713                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4714         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
4715                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4716         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
4717                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
4718                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4719 }
4720
4721 /**
4722  * Add MPLS item to matcher and to the value.
4723  *
4724  * @param[in, out] matcher
4725  *   Flow matcher.
4726  * @param[in, out] key
4727  *   Flow matcher value.
4728  * @param[in] item
4729  *   Flow pattern to translate.
4730  * @param[in] prev_layer
4731  *   The protocol layer indicated in previous item.
4732  * @param[in] inner
4733  *   Item is inner pattern.
4734  */
4735 static void
4736 flow_dv_translate_item_mpls(void *matcher, void *key,
4737                             const struct rte_flow_item *item,
4738                             uint64_t prev_layer,
4739                             int inner)
4740 {
4741         const uint32_t *in_mpls_m = item->mask;
4742         const uint32_t *in_mpls_v = item->spec;
4743         uint32_t *out_mpls_m = 0;
4744         uint32_t *out_mpls_v = 0;
4745         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4746         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4747         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4748                                      misc_parameters_2);
4749         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4750         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4751         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4752
4753         switch (prev_layer) {
4754         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4755                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4756                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4757                          MLX5_UDP_PORT_MPLS);
4758                 break;
4759         case MLX5_FLOW_LAYER_GRE:
4760                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4761                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4762                          RTE_ETHER_TYPE_MPLS);
4763                 break;
4764         default:
4765                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4766                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4767                          IPPROTO_MPLS);
4768                 break;
4769         }
4770         if (!in_mpls_v)
4771                 return;
4772         if (!in_mpls_m)
4773                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4774         switch (prev_layer) {
4775         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4776                 out_mpls_m =
4777                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4778                                                  outer_first_mpls_over_udp);
4779                 out_mpls_v =
4780                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4781                                                  outer_first_mpls_over_udp);
4782                 break;
4783         case MLX5_FLOW_LAYER_GRE:
4784                 out_mpls_m =
4785                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4786                                                  outer_first_mpls_over_gre);
4787                 out_mpls_v =
4788                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4789                                                  outer_first_mpls_over_gre);
4790                 break;
4791         default:
4792                 /* Inner MPLS not over GRE is not supported. */
4793                 if (!inner) {
4794                         out_mpls_m =
4795                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4796                                                          misc2_m,
4797                                                          outer_first_mpls);
4798                         out_mpls_v =
4799                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4800                                                          misc2_v,
4801                                                          outer_first_mpls);
4802                 }
4803                 break;
4804         }
4805         if (out_mpls_m && out_mpls_v) {
4806                 *out_mpls_m = *in_mpls_m;
4807                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4808         }
4809 }
4810
4811 /**
4812  * Add META item to matcher
4813  *
4814  * @param[in, out] matcher
4815  *   Flow matcher.
4816  * @param[in, out] key
4817  *   Flow matcher value.
4818  * @param[in] item
4819  *   Flow pattern to translate.
4820  * @param[in] inner
4821  *   Item is inner pattern.
4822  */
4823 static void
4824 flow_dv_translate_item_meta(void *matcher, void *key,
4825                             const struct rte_flow_item *item)
4826 {
4827         const struct rte_flow_item_meta *meta_m;
4828         const struct rte_flow_item_meta *meta_v;
4829         void *misc2_m =
4830                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4831         void *misc2_v =
4832                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4833
4834         meta_m = (const void *)item->mask;
4835         if (!meta_m)
4836                 meta_m = &rte_flow_item_meta_mask;
4837         meta_v = (const void *)item->spec;
4838         if (meta_v) {
4839                 MLX5_SET(fte_match_set_misc2, misc2_m,
4840                          metadata_reg_a, meta_m->data);
4841                 MLX5_SET(fte_match_set_misc2, misc2_v,
4842                          metadata_reg_a, meta_v->data & meta_m->data);
4843         }
4844 }
4845
4846 /**
4847  * Add vport metadata Reg C0 item to matcher
4848  *
4849  * @param[in, out] matcher
4850  *   Flow matcher.
4851  * @param[in, out] key
4852  *   Flow matcher value.
4853  * @param[in] reg
4854  *   Flow pattern to translate.
4855  */
4856 static void
4857 flow_dv_translate_item_meta_vport(void *matcher, void *key,
4858                                   uint32_t value, uint32_t mask)
4859 {
4860         void *misc2_m =
4861                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4862         void *misc2_v =
4863                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4864
4865         MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, mask);
4866         MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, value);
4867 }
4868
4869 /**
4870  * Add tag item to matcher
4871  *
4872  * @param[in, out] matcher
4873  *   Flow matcher.
4874  * @param[in, out] key
4875  *   Flow matcher value.
4876  * @param[in] item
4877  *   Flow pattern to translate.
4878  */
4879 static void
4880 flow_dv_translate_item_tag(void *matcher, void *key,
4881                            const struct rte_flow_item *item)
4882 {
4883         void *misc2_m =
4884                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4885         void *misc2_v =
4886                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4887         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
4888         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
4889         enum modify_reg reg = tag_v->id;
4890         rte_be32_t value = tag_v->data;
4891         rte_be32_t mask = tag_m->data;
4892
4893         switch (reg) {
4894         case REG_A:
4895                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
4896                                 rte_be_to_cpu_32(mask));
4897                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
4898                                 rte_be_to_cpu_32(value));
4899                 break;
4900         case REG_B:
4901                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b,
4902                                  rte_be_to_cpu_32(mask));
4903                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b,
4904                                 rte_be_to_cpu_32(value));
4905                 break;
4906         case REG_C_0:
4907                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0,
4908                                  rte_be_to_cpu_32(mask));
4909                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0,
4910                                 rte_be_to_cpu_32(value));
4911                 break;
4912         case REG_C_1:
4913                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1,
4914                                  rte_be_to_cpu_32(mask));
4915                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1,
4916                                 rte_be_to_cpu_32(value));
4917                 break;
4918         case REG_C_2:
4919                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2,
4920                                  rte_be_to_cpu_32(mask));
4921                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2,
4922                                 rte_be_to_cpu_32(value));
4923                 break;
4924         case REG_C_3:
4925                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3,
4926                                  rte_be_to_cpu_32(mask));
4927                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3,
4928                                 rte_be_to_cpu_32(value));
4929                 break;
4930         case REG_C_4:
4931                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4,
4932                                  rte_be_to_cpu_32(mask));
4933                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4,
4934                                 rte_be_to_cpu_32(value));
4935                 break;
4936         case REG_C_5:
4937                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5,
4938                                  rte_be_to_cpu_32(mask));
4939                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5,
4940                                 rte_be_to_cpu_32(value));
4941                 break;
4942         case REG_C_6:
4943                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6,
4944                                  rte_be_to_cpu_32(mask));
4945                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6,
4946                                 rte_be_to_cpu_32(value));
4947                 break;
4948         case REG_C_7:
4949                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7,
4950                                  rte_be_to_cpu_32(mask));
4951                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7,
4952                                 rte_be_to_cpu_32(value));
4953                 break;
4954         }
4955 }
4956
4957 /**
4958  * Add source vport match to the specified matcher.
4959  *
4960  * @param[in, out] matcher
4961  *   Flow matcher.
4962  * @param[in, out] key
4963  *   Flow matcher value.
4964  * @param[in] port
4965  *   Source vport value to match
4966  * @param[in] mask
4967  *   Mask
4968  */
4969 static void
4970 flow_dv_translate_item_source_vport(void *matcher, void *key,
4971                                     int16_t port, uint16_t mask)
4972 {
4973         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4974         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4975
4976         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
4977         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
4978 }
4979
4980 /**
4981  * Translate port-id item to eswitch match on  port-id.
4982  *
4983  * @param[in] dev
4984  *   The devich to configure through.
4985  * @param[in, out] matcher
4986  *   Flow matcher.
4987  * @param[in, out] key
4988  *   Flow matcher value.
4989  * @param[in] item
4990  *   Flow pattern to translate.
4991  *
4992  * @return
4993  *   0 on success, a negative errno value otherwise.
4994  */
4995 static int
4996 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
4997                                void *key, const struct rte_flow_item *item)
4998 {
4999         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
5000         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
5001         struct mlx5_priv *priv;
5002         uint16_t mask, id;
5003
5004         mask = pid_m ? pid_m->id : 0xffff;
5005         id = pid_v ? pid_v->id : dev->data->port_id;
5006         priv = mlx5_port_to_eswitch_info(id);
5007         if (!priv)
5008                 return -rte_errno;
5009         /* Translate to vport field or to metadata, depending on mode. */
5010         if (priv->vport_meta_mask)
5011                 flow_dv_translate_item_meta_vport(matcher, key,
5012                                                   priv->vport_meta_tag,
5013                                                   priv->vport_meta_mask);
5014         else
5015                 flow_dv_translate_item_source_vport(matcher, key,
5016                                                     priv->vport_id, mask);
5017         return 0;
5018 }
5019
5020 /**
5021  * Add ICMP6 item to matcher and to the value.
5022  *
5023  * @param[in, out] matcher
5024  *   Flow matcher.
5025  * @param[in, out] key
5026  *   Flow matcher value.
5027  * @param[in] item
5028  *   Flow pattern to translate.
5029  * @param[in] inner
5030  *   Item is inner pattern.
5031  */
5032 static void
5033 flow_dv_translate_item_icmp6(void *matcher, void *key,
5034                               const struct rte_flow_item *item,
5035                               int inner)
5036 {
5037         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
5038         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
5039         void *headers_m;
5040         void *headers_v;
5041         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5042                                      misc_parameters_3);
5043         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5044         if (inner) {
5045                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5046                                          inner_headers);
5047                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5048         } else {
5049                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5050                                          outer_headers);
5051                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5052         }
5053         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5054         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
5055         if (!icmp6_v)
5056                 return;
5057         if (!icmp6_m)
5058                 icmp6_m = &rte_flow_item_icmp6_mask;
5059         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
5060         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
5061                  icmp6_v->type & icmp6_m->type);
5062         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
5063         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
5064                  icmp6_v->code & icmp6_m->code);
5065 }
5066
5067 /**
5068  * Add ICMP item to matcher and to the value.
5069  *
5070  * @param[in, out] matcher
5071  *   Flow matcher.
5072  * @param[in, out] key
5073  *   Flow matcher value.
5074  * @param[in] item
5075  *   Flow pattern to translate.
5076  * @param[in] inner
5077  *   Item is inner pattern.
5078  */
5079 static void
5080 flow_dv_translate_item_icmp(void *matcher, void *key,
5081                             const struct rte_flow_item *item,
5082                             int inner)
5083 {
5084         const struct rte_flow_item_icmp *icmp_m = item->mask;
5085         const struct rte_flow_item_icmp *icmp_v = item->spec;
5086         void *headers_m;
5087         void *headers_v;
5088         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5089                                      misc_parameters_3);
5090         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5091         if (inner) {
5092                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5093                                          inner_headers);
5094                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5095         } else {
5096                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5097                                          outer_headers);
5098                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5099         }
5100         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5101         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
5102         if (!icmp_v)
5103                 return;
5104         if (!icmp_m)
5105                 icmp_m = &rte_flow_item_icmp_mask;
5106         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
5107                  icmp_m->hdr.icmp_type);
5108         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
5109                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
5110         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
5111                  icmp_m->hdr.icmp_code);
5112         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
5113                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
5114 }
5115
5116 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
5117
5118 #define HEADER_IS_ZERO(match_criteria, headers)                              \
5119         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
5120                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
5121
5122 /**
5123  * Calculate flow matcher enable bitmap.
5124  *
5125  * @param match_criteria
5126  *   Pointer to flow matcher criteria.
5127  *
5128  * @return
5129  *   Bitmap of enabled fields.
5130  */
5131 static uint8_t
5132 flow_dv_matcher_enable(uint32_t *match_criteria)
5133 {
5134         uint8_t match_criteria_enable;
5135
5136         match_criteria_enable =
5137                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
5138                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
5139         match_criteria_enable |=
5140                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
5141                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
5142         match_criteria_enable |=
5143                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
5144                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
5145         match_criteria_enable |=
5146                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
5147                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
5148         match_criteria_enable |=
5149                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
5150                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
5151         return match_criteria_enable;
5152 }
5153
5154
5155 /**
5156  * Get a flow table.
5157  *
5158  * @param dev[in, out]
5159  *   Pointer to rte_eth_dev structure.
5160  * @param[in] table_id
5161  *   Table id to use.
5162  * @param[in] egress
5163  *   Direction of the table.
5164  * @param[in] transfer
5165  *   E-Switch or NIC flow.
5166  * @param[out] error
5167  *   pointer to error structure.
5168  *
5169  * @return
5170  *   Returns tables resource based on the index, NULL in case of failed.
5171  */
5172 static struct mlx5_flow_tbl_resource *
5173 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
5174                          uint32_t table_id, uint8_t egress,
5175                          uint8_t transfer,
5176                          struct rte_flow_error *error)
5177 {
5178         struct mlx5_priv *priv = dev->data->dev_private;
5179         struct mlx5_ibv_shared *sh = priv->sh;
5180         struct mlx5_flow_tbl_resource *tbl;
5181
5182 #ifdef HAVE_MLX5DV_DR
5183         if (transfer) {
5184                 tbl = &sh->fdb_tbl[table_id];
5185                 if (!tbl->obj)
5186                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5187                                 (sh->fdb_domain, table_id);
5188         } else if (egress) {
5189                 tbl = &sh->tx_tbl[table_id];
5190                 if (!tbl->obj)
5191                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5192                                 (sh->tx_domain, table_id);
5193         } else {
5194                 tbl = &sh->rx_tbl[table_id];
5195                 if (!tbl->obj)
5196                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5197                                 (sh->rx_domain, table_id);
5198         }
5199         if (!tbl->obj) {
5200                 rte_flow_error_set(error, ENOMEM,
5201                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5202                                    NULL, "cannot create table");
5203                 return NULL;
5204         }
5205         rte_atomic32_inc(&tbl->refcnt);
5206         return tbl;
5207 #else
5208         (void)error;
5209         (void)tbl;
5210         if (transfer)
5211                 return &sh->fdb_tbl[table_id];
5212         else if (egress)
5213                 return &sh->tx_tbl[table_id];
5214         else
5215                 return &sh->rx_tbl[table_id];
5216 #endif
5217 }
5218
5219 /**
5220  * Release a flow table.
5221  *
5222  * @param[in] tbl
5223  *   Table resource to be released.
5224  *
5225  * @return
5226  *   Returns 0 if table was released, else return 1;
5227  */
5228 static int
5229 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
5230 {
5231         if (!tbl)
5232                 return 0;
5233         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
5234                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
5235                 tbl->obj = NULL;
5236                 return 0;
5237         }
5238         return 1;
5239 }
5240
5241 /**
5242  * Register the flow matcher.
5243  *
5244  * @param dev[in, out]
5245  *   Pointer to rte_eth_dev structure.
5246  * @param[in, out] matcher
5247  *   Pointer to flow matcher.
5248  * @parm[in, out] dev_flow
5249  *   Pointer to the dev_flow.
5250  * @param[out] error
5251  *   pointer to error structure.
5252  *
5253  * @return
5254  *   0 on success otherwise -errno and errno is set.
5255  */
5256 static int
5257 flow_dv_matcher_register(struct rte_eth_dev *dev,
5258                          struct mlx5_flow_dv_matcher *matcher,
5259                          struct mlx5_flow *dev_flow,
5260                          struct rte_flow_error *error)
5261 {
5262         struct mlx5_priv *priv = dev->data->dev_private;
5263         struct mlx5_ibv_shared *sh = priv->sh;
5264         struct mlx5_flow_dv_matcher *cache_matcher;
5265         struct mlx5dv_flow_matcher_attr dv_attr = {
5266                 .type = IBV_FLOW_ATTR_NORMAL,
5267                 .match_mask = (void *)&matcher->mask,
5268         };
5269         struct mlx5_flow_tbl_resource *tbl = NULL;
5270
5271         /* Lookup from cache. */
5272         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
5273                 if (matcher->crc == cache_matcher->crc &&
5274                     matcher->priority == cache_matcher->priority &&
5275                     matcher->egress == cache_matcher->egress &&
5276                     matcher->group == cache_matcher->group &&
5277                     matcher->transfer == cache_matcher->transfer &&
5278                     !memcmp((const void *)matcher->mask.buf,
5279                             (const void *)cache_matcher->mask.buf,
5280                             cache_matcher->mask.size)) {
5281                         DRV_LOG(DEBUG,
5282                                 "priority %hd use %s matcher %p: refcnt %d++",
5283                                 cache_matcher->priority,
5284                                 cache_matcher->egress ? "tx" : "rx",
5285                                 (void *)cache_matcher,
5286                                 rte_atomic32_read(&cache_matcher->refcnt));
5287                         rte_atomic32_inc(&cache_matcher->refcnt);
5288                         dev_flow->dv.matcher = cache_matcher;
5289                         return 0;
5290                 }
5291         }
5292         /* Register new matcher. */
5293         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
5294         if (!cache_matcher)
5295                 return rte_flow_error_set(error, ENOMEM,
5296                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5297                                           "cannot allocate matcher memory");
5298         tbl = flow_dv_tbl_resource_get(dev, matcher->group,
5299                                        matcher->egress, matcher->transfer,
5300                                        error);
5301         if (!tbl) {
5302                 rte_free(cache_matcher);
5303                 return rte_flow_error_set(error, ENOMEM,
5304                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5305                                           NULL, "cannot create table");
5306         }
5307         *cache_matcher = *matcher;
5308         dv_attr.match_criteria_enable =
5309                 flow_dv_matcher_enable(cache_matcher->mask.buf);
5310         dv_attr.priority = matcher->priority;
5311         if (matcher->egress)
5312                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
5313         cache_matcher->matcher_object =
5314                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
5315         if (!cache_matcher->matcher_object) {
5316                 rte_free(cache_matcher);
5317 #ifdef HAVE_MLX5DV_DR
5318                 flow_dv_tbl_resource_release(tbl);
5319 #endif
5320                 return rte_flow_error_set(error, ENOMEM,
5321                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5322                                           NULL, "cannot create matcher");
5323         }
5324         rte_atomic32_inc(&cache_matcher->refcnt);
5325         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
5326         dev_flow->dv.matcher = cache_matcher;
5327         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
5328                 cache_matcher->priority,
5329                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
5330                 rte_atomic32_read(&cache_matcher->refcnt));
5331         rte_atomic32_inc(&tbl->refcnt);
5332         return 0;
5333 }
5334
5335 /**
5336  * Find existing tag resource or create and register a new one.
5337  *
5338  * @param dev[in, out]
5339  *   Pointer to rte_eth_dev structure.
5340  * @param[in, out] resource
5341  *   Pointer to tag resource.
5342  * @parm[in, out] dev_flow
5343  *   Pointer to the dev_flow.
5344  * @param[out] error
5345  *   pointer to error structure.
5346  *
5347  * @return
5348  *   0 on success otherwise -errno and errno is set.
5349  */
5350 static int
5351 flow_dv_tag_resource_register
5352                         (struct rte_eth_dev *dev,
5353                          struct mlx5_flow_dv_tag_resource *resource,
5354                          struct mlx5_flow *dev_flow,
5355                          struct rte_flow_error *error)
5356 {
5357         struct mlx5_priv *priv = dev->data->dev_private;
5358         struct mlx5_ibv_shared *sh = priv->sh;
5359         struct mlx5_flow_dv_tag_resource *cache_resource;
5360
5361         /* Lookup a matching resource from cache. */
5362         LIST_FOREACH(cache_resource, &sh->tags, next) {
5363                 if (resource->tag == cache_resource->tag) {
5364                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
5365                                 (void *)cache_resource,
5366                                 rte_atomic32_read(&cache_resource->refcnt));
5367                         rte_atomic32_inc(&cache_resource->refcnt);
5368                         dev_flow->flow->tag_resource = cache_resource;
5369                         return 0;
5370                 }
5371         }
5372         /* Register new  resource. */
5373         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
5374         if (!cache_resource)
5375                 return rte_flow_error_set(error, ENOMEM,
5376                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5377                                           "cannot allocate resource memory");
5378         *cache_resource = *resource;
5379         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
5380                 (resource->tag);
5381         if (!cache_resource->action) {
5382                 rte_free(cache_resource);
5383                 return rte_flow_error_set(error, ENOMEM,
5384                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5385                                           NULL, "cannot create action");
5386         }
5387         rte_atomic32_init(&cache_resource->refcnt);
5388         rte_atomic32_inc(&cache_resource->refcnt);
5389         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
5390         dev_flow->flow->tag_resource = cache_resource;
5391         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
5392                 (void *)cache_resource,
5393                 rte_atomic32_read(&cache_resource->refcnt));
5394         return 0;
5395 }
5396
5397 /**
5398  * Release the tag.
5399  *
5400  * @param dev
5401  *   Pointer to Ethernet device.
5402  * @param flow
5403  *   Pointer to mlx5_flow.
5404  *
5405  * @return
5406  *   1 while a reference on it exists, 0 when freed.
5407  */
5408 static int
5409 flow_dv_tag_release(struct rte_eth_dev *dev,
5410                     struct mlx5_flow_dv_tag_resource *tag)
5411 {
5412         assert(tag);
5413         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
5414                 dev->data->port_id, (void *)tag,
5415                 rte_atomic32_read(&tag->refcnt));
5416         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
5417                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
5418                 LIST_REMOVE(tag, next);
5419                 DRV_LOG(DEBUG, "port %u tag %p: removed",
5420                         dev->data->port_id, (void *)tag);
5421                 rte_free(tag);
5422                 return 0;
5423         }
5424         return 1;
5425 }
5426
5427 /**
5428  * Translate port ID action to vport.
5429  *
5430  * @param[in] dev
5431  *   Pointer to rte_eth_dev structure.
5432  * @param[in] action
5433  *   Pointer to the port ID action.
5434  * @param[out] dst_port_id
5435  *   The target port ID.
5436  * @param[out] error
5437  *   Pointer to the error structure.
5438  *
5439  * @return
5440  *   0 on success, a negative errno value otherwise and rte_errno is set.
5441  */
5442 static int
5443 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
5444                                  const struct rte_flow_action *action,
5445                                  uint32_t *dst_port_id,
5446                                  struct rte_flow_error *error)
5447 {
5448         uint32_t port;
5449         struct mlx5_priv *priv;
5450         const struct rte_flow_action_port_id *conf =
5451                         (const struct rte_flow_action_port_id *)action->conf;
5452
5453         port = conf->original ? dev->data->port_id : conf->id;
5454         priv = mlx5_port_to_eswitch_info(port);
5455         if (!priv)
5456                 return rte_flow_error_set(error, -rte_errno,
5457                                           RTE_FLOW_ERROR_TYPE_ACTION,
5458                                           NULL,
5459                                           "No eswitch info was found for port");
5460         if (priv->vport_meta_mask)
5461                 *dst_port_id = priv->vport_meta_tag;
5462         else
5463                 *dst_port_id = priv->vport_id;
5464         return 0;
5465 }
5466
5467 /**
5468  * Add Tx queue matcher
5469  *
5470  * @param[in] dev
5471  *   Pointer to the dev struct.
5472  * @param[in, out] matcher
5473  *   Flow matcher.
5474  * @param[in, out] key
5475  *   Flow matcher value.
5476  * @param[in] item
5477  *   Flow pattern to translate.
5478  * @param[in] inner
5479  *   Item is inner pattern.
5480  */
5481 static void
5482 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
5483                                 void *matcher, void *key,
5484                                 const struct rte_flow_item *item)
5485 {
5486         const struct mlx5_rte_flow_item_tx_queue *queue_m;
5487         const struct mlx5_rte_flow_item_tx_queue *queue_v;
5488         void *misc_m =
5489                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5490         void *misc_v =
5491                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5492         struct mlx5_txq_ctrl *txq;
5493         uint32_t queue;
5494
5495
5496         queue_m = (const void *)item->mask;
5497         if (!queue_m)
5498                 return;
5499         queue_v = (const void *)item->spec;
5500         if (!queue_v)
5501                 return;
5502         txq = mlx5_txq_get(dev, queue_v->queue);
5503         if (!txq)
5504                 return;
5505         queue = txq->obj->sq->id;
5506         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
5507         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
5508                  queue & queue_m->queue);
5509         mlx5_txq_release(dev, queue_v->queue);
5510 }
5511
5512 /**
5513  * Fill the flow with DV spec.
5514  *
5515  * @param[in] dev
5516  *   Pointer to rte_eth_dev structure.
5517  * @param[in, out] dev_flow
5518  *   Pointer to the sub flow.
5519  * @param[in] attr
5520  *   Pointer to the flow attributes.
5521  * @param[in] items
5522  *   Pointer to the list of items.
5523  * @param[in] actions
5524  *   Pointer to the list of actions.
5525  * @param[out] error
5526  *   Pointer to the error structure.
5527  *
5528  * @return
5529  *   0 on success, a negative errno value otherwise and rte_errno is set.
5530  */
5531 static int
5532 flow_dv_translate(struct rte_eth_dev *dev,
5533                   struct mlx5_flow *dev_flow,
5534                   const struct rte_flow_attr *attr,
5535                   const struct rte_flow_item items[],
5536                   const struct rte_flow_action actions[],
5537                   struct rte_flow_error *error)
5538 {
5539         struct mlx5_priv *priv = dev->data->dev_private;
5540         struct rte_flow *flow = dev_flow->flow;
5541         uint64_t item_flags = 0;
5542         uint64_t last_item = 0;
5543         uint64_t action_flags = 0;
5544         uint64_t priority = attr->priority;
5545         struct mlx5_flow_dv_matcher matcher = {
5546                 .mask = {
5547                         .size = sizeof(matcher.mask.buf),
5548                 },
5549         };
5550         int actions_n = 0;
5551         bool actions_end = false;
5552         struct mlx5_flow_dv_modify_hdr_resource res = {
5553                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
5554                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
5555         };
5556         union flow_dv_attr flow_attr = { .attr = 0 };
5557         struct mlx5_flow_dv_tag_resource tag_resource;
5558         uint32_t modify_action_position = UINT32_MAX;
5559         void *match_mask = matcher.mask.buf;
5560         void *match_value = dev_flow->dv.value.buf;
5561         uint8_t next_protocol = 0xff;
5562         struct rte_vlan_hdr vlan = { 0 };
5563         uint32_t table;
5564         int ret = 0;
5565
5566         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
5567                                        &table, error);
5568         if (ret)
5569                 return ret;
5570         flow->group = table;
5571         if (attr->transfer)
5572                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
5573         if (priority == MLX5_FLOW_PRIO_RSVD)
5574                 priority = priv->config.flow_prio - 1;
5575         for (; !actions_end ; actions++) {
5576                 const struct rte_flow_action_queue *queue;
5577                 const struct rte_flow_action_rss *rss;
5578                 const struct rte_flow_action *action = actions;
5579                 const struct rte_flow_action_count *count = action->conf;
5580                 const uint8_t *rss_key;
5581                 const struct rte_flow_action_jump *jump_data;
5582                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
5583                 struct mlx5_flow_tbl_resource *tbl;
5584                 uint32_t port_id = 0;
5585                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
5586                 int action_type = actions->type;
5587                 const struct rte_flow_action *found_action = NULL;
5588
5589                 switch (action_type) {
5590                 case RTE_FLOW_ACTION_TYPE_VOID:
5591                         break;
5592                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5593                         if (flow_dv_translate_action_port_id(dev, action,
5594                                                              &port_id, error))
5595                                 return -rte_errno;
5596                         port_id_resource.port_id = port_id;
5597                         if (flow_dv_port_id_action_resource_register
5598                             (dev, &port_id_resource, dev_flow, error))
5599                                 return -rte_errno;
5600                         dev_flow->dv.actions[actions_n++] =
5601                                 dev_flow->dv.port_id_action->action;
5602                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5603                         break;
5604                 case RTE_FLOW_ACTION_TYPE_FLAG:
5605                         tag_resource.tag =
5606                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
5607                         if (!flow->tag_resource)
5608                                 if (flow_dv_tag_resource_register
5609                                     (dev, &tag_resource, dev_flow, error))
5610                                         return errno;
5611                         dev_flow->dv.actions[actions_n++] =
5612                                 flow->tag_resource->action;
5613                         action_flags |= MLX5_FLOW_ACTION_FLAG;
5614                         break;
5615                 case RTE_FLOW_ACTION_TYPE_MARK:
5616                         tag_resource.tag = mlx5_flow_mark_set
5617                               (((const struct rte_flow_action_mark *)
5618                                (actions->conf))->id);
5619                         if (!flow->tag_resource)
5620                                 if (flow_dv_tag_resource_register
5621                                     (dev, &tag_resource, dev_flow, error))
5622                                         return errno;
5623                         dev_flow->dv.actions[actions_n++] =
5624                                 flow->tag_resource->action;
5625                         action_flags |= MLX5_FLOW_ACTION_MARK;
5626                         break;
5627                 case RTE_FLOW_ACTION_TYPE_DROP:
5628                         action_flags |= MLX5_FLOW_ACTION_DROP;
5629                         break;
5630                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5631                         queue = actions->conf;
5632                         flow->rss.queue_num = 1;
5633                         (*flow->queue)[0] = queue->index;
5634                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5635                         break;
5636                 case RTE_FLOW_ACTION_TYPE_RSS:
5637                         rss = actions->conf;
5638                         if (flow->queue)
5639                                 memcpy((*flow->queue), rss->queue,
5640                                        rss->queue_num * sizeof(uint16_t));
5641                         flow->rss.queue_num = rss->queue_num;
5642                         /* NULL RSS key indicates default RSS key. */
5643                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
5644                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
5645                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
5646                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
5647                         flow->rss.level = rss->level;
5648                         action_flags |= MLX5_FLOW_ACTION_RSS;
5649                         break;
5650                 case RTE_FLOW_ACTION_TYPE_COUNT:
5651                         if (!priv->config.devx) {
5652                                 rte_errno = ENOTSUP;
5653                                 goto cnt_err;
5654                         }
5655                         flow->counter = flow_dv_counter_alloc(dev,
5656                                                               count->shared,
5657                                                               count->id,
5658                                                               flow->group);
5659                         if (flow->counter == NULL)
5660                                 goto cnt_err;
5661                         dev_flow->dv.actions[actions_n++] =
5662                                 flow->counter->action;
5663                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5664                         break;
5665 cnt_err:
5666                         if (rte_errno == ENOTSUP)
5667                                 return rte_flow_error_set
5668                                               (error, ENOTSUP,
5669                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5670                                                NULL,
5671                                                "count action not supported");
5672                         else
5673                                 return rte_flow_error_set
5674                                                 (error, rte_errno,
5675                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5676                                                  action,
5677                                                  "cannot create counter"
5678                                                   " object.");
5679                         break;
5680                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5681                         dev_flow->dv.actions[actions_n++] =
5682                                                 priv->sh->pop_vlan_action;
5683                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5684                         break;
5685                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5686                         flow_dev_get_vlan_info_from_items(items, &vlan);
5687                         vlan.eth_proto = rte_be_to_cpu_16
5688                              ((((const struct rte_flow_action_of_push_vlan *)
5689                                                    actions->conf)->ethertype));
5690                         found_action = mlx5_flow_find_action
5691                                         (actions + 1,
5692                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
5693                         if (found_action)
5694                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5695                         found_action = mlx5_flow_find_action
5696                                         (actions + 1,
5697                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
5698                         if (found_action)
5699                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5700                         if (flow_dv_create_action_push_vlan
5701                                             (dev, attr, &vlan, dev_flow, error))
5702                                 return -rte_errno;
5703                         dev_flow->dv.actions[actions_n++] =
5704                                            dev_flow->dv.push_vlan_res->action;
5705                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5706                         break;
5707                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5708                         /* of_vlan_push action handled this action */
5709                         assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
5710                         break;
5711                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5712                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
5713                                 break;
5714                         flow_dev_get_vlan_info_from_items(items, &vlan);
5715                         mlx5_update_vlan_vid_pcp(actions, &vlan);
5716                         /* If no VLAN push - this is a modify header action */
5717                         if (flow_dv_convert_action_modify_vlan_vid
5718                                                         (&res, actions, error))
5719                                 return -rte_errno;
5720                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5721                         break;
5722                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5723                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5724                         if (flow_dv_create_action_l2_encap(dev, actions,
5725                                                            dev_flow,
5726                                                            attr->transfer,
5727                                                            error))
5728                                 return -rte_errno;
5729                         dev_flow->dv.actions[actions_n++] =
5730                                 dev_flow->dv.encap_decap->verbs_action;
5731                         action_flags |= actions->type ==
5732                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
5733                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
5734                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
5735                         break;
5736                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5737                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5738                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
5739                                                            attr->transfer,
5740                                                            error))
5741                                 return -rte_errno;
5742                         dev_flow->dv.actions[actions_n++] =
5743                                 dev_flow->dv.encap_decap->verbs_action;
5744                         action_flags |= actions->type ==
5745                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
5746                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
5747                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
5748                         break;
5749                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5750                         /* Handle encap with preceding decap. */
5751                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
5752                                 if (flow_dv_create_action_raw_encap
5753                                         (dev, actions, dev_flow, attr, error))
5754                                         return -rte_errno;
5755                                 dev_flow->dv.actions[actions_n++] =
5756                                         dev_flow->dv.encap_decap->verbs_action;
5757                         } else {
5758                                 /* Handle encap without preceding decap. */
5759                                 if (flow_dv_create_action_l2_encap
5760                                     (dev, actions, dev_flow, attr->transfer,
5761                                      error))
5762                                         return -rte_errno;
5763                                 dev_flow->dv.actions[actions_n++] =
5764                                         dev_flow->dv.encap_decap->verbs_action;
5765                         }
5766                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
5767                         break;
5768                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5769                         /* Check if this decap is followed by encap. */
5770                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
5771                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
5772                                action++) {
5773                         }
5774                         /* Handle decap only if it isn't followed by encap. */
5775                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5776                                 if (flow_dv_create_action_l2_decap
5777                                     (dev, dev_flow, attr->transfer, error))
5778                                         return -rte_errno;
5779                                 dev_flow->dv.actions[actions_n++] =
5780                                         dev_flow->dv.encap_decap->verbs_action;
5781                         }
5782                         /* If decap is followed by encap, handle it at encap. */
5783                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
5784                         break;
5785                 case RTE_FLOW_ACTION_TYPE_JUMP:
5786                         jump_data = action->conf;
5787                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
5788                                                        jump_data->group, &table,
5789                                                        error);
5790                         if (ret)
5791                                 return ret;
5792                         tbl = flow_dv_tbl_resource_get(dev, table,
5793                                                        attr->egress,
5794                                                        attr->transfer, error);
5795                         if (!tbl)
5796                                 return rte_flow_error_set
5797                                                 (error, errno,
5798                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5799                                                  NULL,
5800                                                  "cannot create jump action.");
5801                         jump_tbl_resource.tbl = tbl;
5802                         if (flow_dv_jump_tbl_resource_register
5803                             (dev, &jump_tbl_resource, dev_flow, error)) {
5804                                 flow_dv_tbl_resource_release(tbl);
5805                                 return rte_flow_error_set
5806                                                 (error, errno,
5807                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5808                                                  NULL,
5809                                                  "cannot create jump action.");
5810                         }
5811                         dev_flow->dv.actions[actions_n++] =
5812                                 dev_flow->dv.jump->action;
5813                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5814                         break;
5815                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5816                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5817                         if (flow_dv_convert_action_modify_mac(&res, actions,
5818                                                               error))
5819                                 return -rte_errno;
5820                         action_flags |= actions->type ==
5821                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5822                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
5823                                         MLX5_FLOW_ACTION_SET_MAC_DST;
5824                         break;
5825                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5826                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5827                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
5828                                                                error))
5829                                 return -rte_errno;
5830                         action_flags |= actions->type ==
5831                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5832                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
5833                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
5834                         break;
5835                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5836                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5837                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
5838                                                                error))
5839                                 return -rte_errno;
5840                         action_flags |= actions->type ==
5841                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5842                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
5843                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
5844                         break;
5845                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5846                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5847                         if (flow_dv_convert_action_modify_tp(&res, actions,
5848                                                              items, &flow_attr,
5849                                                              error))
5850                                 return -rte_errno;
5851                         action_flags |= actions->type ==
5852                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5853                                         MLX5_FLOW_ACTION_SET_TP_SRC :
5854                                         MLX5_FLOW_ACTION_SET_TP_DST;
5855                         break;
5856                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5857                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
5858                                                                   &flow_attr,
5859                                                                   error))
5860                                 return -rte_errno;
5861                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
5862                         break;
5863                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5864                         if (flow_dv_convert_action_modify_ttl(&res, actions,
5865                                                              items, &flow_attr,
5866                                                              error))
5867                                 return -rte_errno;
5868                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
5869                         break;
5870                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5871                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5872                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
5873                                                                   error))
5874                                 return -rte_errno;
5875                         action_flags |= actions->type ==
5876                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5877                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
5878                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5879                         break;
5880
5881                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5882                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5883                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
5884                                                                   error))
5885                                 return -rte_errno;
5886                         action_flags |= actions->type ==
5887                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5888                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
5889                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
5890                         break;
5891                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5892                         if (flow_dv_convert_action_set_reg(&res, actions,
5893                                                            error))
5894                                 return -rte_errno;
5895                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5896                         break;
5897                 case RTE_FLOW_ACTION_TYPE_END:
5898                         actions_end = true;
5899                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
5900                                 /* create modify action if needed. */
5901                                 if (flow_dv_modify_hdr_resource_register
5902                                                                 (dev, &res,
5903                                                                  dev_flow,
5904                                                                  error))
5905                                         return -rte_errno;
5906                                 dev_flow->dv.actions[modify_action_position] =
5907                                         dev_flow->dv.modify_hdr->verbs_action;
5908                         }
5909                         break;
5910                 default:
5911                         break;
5912                 }
5913                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
5914                     modify_action_position == UINT32_MAX)
5915                         modify_action_position = actions_n++;
5916         }
5917         dev_flow->dv.actions_n = actions_n;
5918         dev_flow->actions = action_flags;
5919         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5920                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5921                 int item_type = items->type;
5922
5923                 switch (item_type) {
5924                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5925                         flow_dv_translate_item_port_id(dev, match_mask,
5926                                                        match_value, items);
5927                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5928                         break;
5929                 case RTE_FLOW_ITEM_TYPE_ETH:
5930                         flow_dv_translate_item_eth(match_mask, match_value,
5931                                                    items, tunnel);
5932                         matcher.priority = MLX5_PRIORITY_MAP_L2;
5933                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5934                                              MLX5_FLOW_LAYER_OUTER_L2;
5935                         break;
5936                 case RTE_FLOW_ITEM_TYPE_VLAN:
5937                         flow_dv_translate_item_vlan(dev_flow,
5938                                                     match_mask, match_value,
5939                                                     items, tunnel);
5940                         matcher.priority = MLX5_PRIORITY_MAP_L2;
5941                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
5942                                               MLX5_FLOW_LAYER_INNER_VLAN) :
5943                                              (MLX5_FLOW_LAYER_OUTER_L2 |
5944                                               MLX5_FLOW_LAYER_OUTER_VLAN);
5945                         break;
5946                 case RTE_FLOW_ITEM_TYPE_IPV4:
5947                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5948                                                   &item_flags, &tunnel);
5949                         flow_dv_translate_item_ipv4(match_mask, match_value,
5950                                                     items, tunnel, flow->group);
5951                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5952                         dev_flow->dv.hash_fields |=
5953                                 mlx5_flow_hashfields_adjust
5954                                         (dev_flow, tunnel,
5955                                          MLX5_IPV4_LAYER_TYPES,
5956                                          MLX5_IPV4_IBV_RX_HASH);
5957                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5958                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5959                         if (items->mask != NULL &&
5960                             ((const struct rte_flow_item_ipv4 *)
5961                              items->mask)->hdr.next_proto_id) {
5962                                 next_protocol =
5963                                         ((const struct rte_flow_item_ipv4 *)
5964                                          (items->spec))->hdr.next_proto_id;
5965                                 next_protocol &=
5966                                         ((const struct rte_flow_item_ipv4 *)
5967                                          (items->mask))->hdr.next_proto_id;
5968                         } else {
5969                                 /* Reset for inner layer. */
5970                                 next_protocol = 0xff;
5971                         }
5972                         break;
5973                 case RTE_FLOW_ITEM_TYPE_IPV6:
5974                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5975                                                   &item_flags, &tunnel);
5976                         flow_dv_translate_item_ipv6(match_mask, match_value,
5977                                                     items, tunnel, flow->group);
5978                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5979                         dev_flow->dv.hash_fields |=
5980                                 mlx5_flow_hashfields_adjust
5981                                         (dev_flow, tunnel,
5982                                          MLX5_IPV6_LAYER_TYPES,
5983                                          MLX5_IPV6_IBV_RX_HASH);
5984                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5985                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5986                         if (items->mask != NULL &&
5987                             ((const struct rte_flow_item_ipv6 *)
5988                              items->mask)->hdr.proto) {
5989                                 next_protocol =
5990                                         ((const struct rte_flow_item_ipv6 *)
5991                                          items->spec)->hdr.proto;
5992                                 next_protocol &=
5993                                         ((const struct rte_flow_item_ipv6 *)
5994                                          items->mask)->hdr.proto;
5995                         } else {
5996                                 /* Reset for inner layer. */
5997                                 next_protocol = 0xff;
5998                         }
5999                         break;
6000                 case RTE_FLOW_ITEM_TYPE_TCP:
6001                         flow_dv_translate_item_tcp(match_mask, match_value,
6002                                                    items, tunnel);
6003                         matcher.priority = MLX5_PRIORITY_MAP_L4;
6004                         dev_flow->dv.hash_fields |=
6005                                 mlx5_flow_hashfields_adjust
6006                                         (dev_flow, tunnel, ETH_RSS_TCP,
6007                                          IBV_RX_HASH_SRC_PORT_TCP |
6008                                          IBV_RX_HASH_DST_PORT_TCP);
6009                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
6010                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
6011                         break;
6012                 case RTE_FLOW_ITEM_TYPE_UDP:
6013                         flow_dv_translate_item_udp(match_mask, match_value,
6014                                                    items, tunnel);
6015                         matcher.priority = MLX5_PRIORITY_MAP_L4;
6016                         dev_flow->dv.hash_fields |=
6017                                 mlx5_flow_hashfields_adjust
6018                                         (dev_flow, tunnel, ETH_RSS_UDP,
6019                                          IBV_RX_HASH_SRC_PORT_UDP |
6020                                          IBV_RX_HASH_DST_PORT_UDP);
6021                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
6022                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
6023                         break;
6024                 case RTE_FLOW_ITEM_TYPE_GRE:
6025                         flow_dv_translate_item_gre(match_mask, match_value,
6026                                                    items, tunnel);
6027                         last_item = MLX5_FLOW_LAYER_GRE;
6028                         break;
6029                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6030                         flow_dv_translate_item_gre_key(match_mask,
6031                                                        match_value, items);
6032                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6033                         break;
6034                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6035                         flow_dv_translate_item_nvgre(match_mask, match_value,
6036                                                      items, tunnel);
6037                         last_item = MLX5_FLOW_LAYER_GRE;
6038                         break;
6039                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6040                         flow_dv_translate_item_vxlan(match_mask, match_value,
6041                                                      items, tunnel);
6042                         last_item = MLX5_FLOW_LAYER_VXLAN;
6043                         break;
6044                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6045                         flow_dv_translate_item_vxlan(match_mask, match_value,
6046                                                      items, tunnel);
6047                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6048                         break;
6049                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6050                         flow_dv_translate_item_geneve(match_mask, match_value,
6051                                                       items, tunnel);
6052                         last_item = MLX5_FLOW_LAYER_GENEVE;
6053                         break;
6054                 case RTE_FLOW_ITEM_TYPE_MPLS:
6055                         flow_dv_translate_item_mpls(match_mask, match_value,
6056                                                     items, last_item, tunnel);
6057                         last_item = MLX5_FLOW_LAYER_MPLS;
6058                         break;
6059                 case RTE_FLOW_ITEM_TYPE_META:
6060                         flow_dv_translate_item_meta(match_mask, match_value,
6061                                                     items);
6062                         last_item = MLX5_FLOW_ITEM_METADATA;
6063                         break;
6064                 case RTE_FLOW_ITEM_TYPE_ICMP:
6065                         flow_dv_translate_item_icmp(match_mask, match_value,
6066                                                     items, tunnel);
6067                         last_item = MLX5_FLOW_LAYER_ICMP;
6068                         break;
6069                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6070                         flow_dv_translate_item_icmp6(match_mask, match_value,
6071                                                       items, tunnel);
6072                         last_item = MLX5_FLOW_LAYER_ICMP6;
6073                         break;
6074                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6075                         flow_dv_translate_item_tag(match_mask, match_value,
6076                                                    items);
6077                         last_item = MLX5_FLOW_ITEM_TAG;
6078                         break;
6079                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6080                         flow_dv_translate_item_tx_queue(dev, match_mask,
6081                                                         match_value,
6082                                                         items);
6083                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
6084                         break;
6085                 default:
6086                         break;
6087                 }
6088                 item_flags |= last_item;
6089         }
6090         /*
6091          * In case of ingress traffic when E-Switch mode is enabled,
6092          * we have two cases where we need to set the source port manually.
6093          * The first one, is in case of Nic steering rule, and the second is
6094          * E-Switch rule where no port_id item was found. In both cases
6095          * the source port is set according the current port in use.
6096          */
6097         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
6098             (priv->representor || priv->master)) {
6099                 if (flow_dv_translate_item_port_id(dev, match_mask,
6100                                                    match_value, NULL))
6101                         return -rte_errno;
6102         }
6103         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
6104                                          dev_flow->dv.value.buf));
6105         dev_flow->layers = item_flags;
6106         /* Register matcher. */
6107         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
6108                                     matcher.mask.size);
6109         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
6110                                                      matcher.priority);
6111         matcher.egress = attr->egress;
6112         matcher.group = flow->group;
6113         matcher.transfer = attr->transfer;
6114         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
6115                 return -rte_errno;
6116         return 0;
6117 }
6118
6119 /**
6120  * Apply the flow to the NIC.
6121  *
6122  * @param[in] dev
6123  *   Pointer to the Ethernet device structure.
6124  * @param[in, out] flow
6125  *   Pointer to flow structure.
6126  * @param[out] error
6127  *   Pointer to error structure.
6128  *
6129  * @return
6130  *   0 on success, a negative errno value otherwise and rte_errno is set.
6131  */
6132 static int
6133 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
6134               struct rte_flow_error *error)
6135 {
6136         struct mlx5_flow_dv *dv;
6137         struct mlx5_flow *dev_flow;
6138         struct mlx5_priv *priv = dev->data->dev_private;
6139         int n;
6140         int err;
6141
6142         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6143                 dv = &dev_flow->dv;
6144                 n = dv->actions_n;
6145                 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
6146                         if (flow->transfer) {
6147                                 dv->actions[n++] = priv->sh->esw_drop_action;
6148                         } else {
6149                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
6150                                 if (!dv->hrxq) {
6151                                         rte_flow_error_set
6152                                                 (error, errno,
6153                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6154                                                  NULL,
6155                                                  "cannot get drop hash queue");
6156                                         goto error;
6157                                 }
6158                                 dv->actions[n++] = dv->hrxq->action;
6159                         }
6160                 } else if (dev_flow->actions &
6161                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
6162                         struct mlx5_hrxq *hrxq;
6163
6164                         hrxq = mlx5_hrxq_get(dev, flow->key,
6165                                              MLX5_RSS_HASH_KEY_LEN,
6166                                              dv->hash_fields,
6167                                              (*flow->queue),
6168                                              flow->rss.queue_num);
6169                         if (!hrxq) {
6170                                 hrxq = mlx5_hrxq_new
6171                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
6172                                          dv->hash_fields, (*flow->queue),
6173                                          flow->rss.queue_num,
6174                                          !!(dev_flow->layers &
6175                                             MLX5_FLOW_LAYER_TUNNEL));
6176                         }
6177                         if (!hrxq) {
6178                                 rte_flow_error_set
6179                                         (error, rte_errno,
6180                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6181                                          "cannot get hash queue");
6182                                 goto error;
6183                         }
6184                         dv->hrxq = hrxq;
6185                         dv->actions[n++] = dv->hrxq->action;
6186                 }
6187                 dv->flow =
6188                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
6189                                                   (void *)&dv->value, n,
6190                                                   dv->actions);
6191                 if (!dv->flow) {
6192                         rte_flow_error_set(error, errno,
6193                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6194                                            NULL,
6195                                            "hardware refuses to create flow");
6196                         goto error;
6197                 }
6198                 if (priv->vmwa_context &&
6199                     dev_flow->dv.vf_vlan.tag &&
6200                     !dev_flow->dv.vf_vlan.created) {
6201                         /*
6202                          * The rule contains the VLAN pattern.
6203                          * For VF we are going to create VLAN
6204                          * interface to make hypervisor set correct
6205                          * e-Switch vport context.
6206                          */
6207                         mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
6208                 }
6209         }
6210         return 0;
6211 error:
6212         err = rte_errno; /* Save rte_errno before cleanup. */
6213         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6214                 struct mlx5_flow_dv *dv = &dev_flow->dv;
6215                 if (dv->hrxq) {
6216                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6217                                 mlx5_hrxq_drop_release(dev);
6218                         else
6219                                 mlx5_hrxq_release(dev, dv->hrxq);
6220                         dv->hrxq = NULL;
6221                 }
6222                 if (dev_flow->dv.vf_vlan.tag &&
6223                     dev_flow->dv.vf_vlan.created)
6224                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6225         }
6226         rte_errno = err; /* Restore rte_errno. */
6227         return -rte_errno;
6228 }
6229
6230 /**
6231  * Release the flow matcher.
6232  *
6233  * @param dev
6234  *   Pointer to Ethernet device.
6235  * @param flow
6236  *   Pointer to mlx5_flow.
6237  *
6238  * @return
6239  *   1 while a reference on it exists, 0 when freed.
6240  */
6241 static int
6242 flow_dv_matcher_release(struct rte_eth_dev *dev,
6243                         struct mlx5_flow *flow)
6244 {
6245         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
6246         struct mlx5_priv *priv = dev->data->dev_private;
6247         struct mlx5_ibv_shared *sh = priv->sh;
6248         struct mlx5_flow_tbl_resource *tbl;
6249
6250         assert(matcher->matcher_object);
6251         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
6252                 dev->data->port_id, (void *)matcher,
6253                 rte_atomic32_read(&matcher->refcnt));
6254         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
6255                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
6256                            (matcher->matcher_object));
6257                 LIST_REMOVE(matcher, next);
6258                 if (matcher->egress)
6259                         tbl = &sh->tx_tbl[matcher->group];
6260                 else
6261                         tbl = &sh->rx_tbl[matcher->group];
6262                 flow_dv_tbl_resource_release(tbl);
6263                 rte_free(matcher);
6264                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
6265                         dev->data->port_id, (void *)matcher);
6266                 return 0;
6267         }
6268         return 1;
6269 }
6270
6271 /**
6272  * Release an encap/decap resource.
6273  *
6274  * @param flow
6275  *   Pointer to mlx5_flow.
6276  *
6277  * @return
6278  *   1 while a reference on it exists, 0 when freed.
6279  */
6280 static int
6281 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
6282 {
6283         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
6284                                                 flow->dv.encap_decap;
6285
6286         assert(cache_resource->verbs_action);
6287         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
6288                 (void *)cache_resource,
6289                 rte_atomic32_read(&cache_resource->refcnt));
6290         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6291                 claim_zero(mlx5_glue->destroy_flow_action
6292                                 (cache_resource->verbs_action));
6293                 LIST_REMOVE(cache_resource, next);
6294                 rte_free(cache_resource);
6295                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
6296                         (void *)cache_resource);
6297                 return 0;
6298         }
6299         return 1;
6300 }
6301
6302 /**
6303  * Release an jump to table action resource.
6304  *
6305  * @param flow
6306  *   Pointer to mlx5_flow.
6307  *
6308  * @return
6309  *   1 while a reference on it exists, 0 when freed.
6310  */
6311 static int
6312 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
6313 {
6314         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
6315                                                 flow->dv.jump;
6316
6317         assert(cache_resource->action);
6318         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
6319                 (void *)cache_resource,
6320                 rte_atomic32_read(&cache_resource->refcnt));
6321         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6322                 claim_zero(mlx5_glue->destroy_flow_action
6323                                 (cache_resource->action));
6324                 LIST_REMOVE(cache_resource, next);
6325                 flow_dv_tbl_resource_release(cache_resource->tbl);
6326                 rte_free(cache_resource);
6327                 DRV_LOG(DEBUG, "jump table resource %p: removed",
6328                         (void *)cache_resource);
6329                 return 0;
6330         }
6331         return 1;
6332 }
6333
6334 /**
6335  * Release a modify-header resource.
6336  *
6337  * @param flow
6338  *   Pointer to mlx5_flow.
6339  *
6340  * @return
6341  *   1 while a reference on it exists, 0 when freed.
6342  */
6343 static int
6344 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
6345 {
6346         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
6347                                                 flow->dv.modify_hdr;
6348
6349         assert(cache_resource->verbs_action);
6350         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
6351                 (void *)cache_resource,
6352                 rte_atomic32_read(&cache_resource->refcnt));
6353         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6354                 claim_zero(mlx5_glue->destroy_flow_action
6355                                 (cache_resource->verbs_action));
6356                 LIST_REMOVE(cache_resource, next);
6357                 rte_free(cache_resource);
6358                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
6359                         (void *)cache_resource);
6360                 return 0;
6361         }
6362         return 1;
6363 }
6364
6365 /**
6366  * Release port ID action resource.
6367  *
6368  * @param flow
6369  *   Pointer to mlx5_flow.
6370  *
6371  * @return
6372  *   1 while a reference on it exists, 0 when freed.
6373  */
6374 static int
6375 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
6376 {
6377         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
6378                 flow->dv.port_id_action;
6379
6380         assert(cache_resource->action);
6381         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
6382                 (void *)cache_resource,
6383                 rte_atomic32_read(&cache_resource->refcnt));
6384         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6385                 claim_zero(mlx5_glue->destroy_flow_action
6386                                 (cache_resource->action));
6387                 LIST_REMOVE(cache_resource, next);
6388                 rte_free(cache_resource);
6389                 DRV_LOG(DEBUG, "port id action resource %p: removed",
6390                         (void *)cache_resource);
6391                 return 0;
6392         }
6393         return 1;
6394 }
6395
6396 /**
6397  * Release push vlan action resource.
6398  *
6399  * @param flow
6400  *   Pointer to mlx5_flow.
6401  *
6402  * @return
6403  *   1 while a reference on it exists, 0 when freed.
6404  */
6405 static int
6406 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
6407 {
6408         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
6409                 flow->dv.push_vlan_res;
6410
6411         assert(cache_resource->action);
6412         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
6413                 (void *)cache_resource,
6414                 rte_atomic32_read(&cache_resource->refcnt));
6415         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6416                 claim_zero(mlx5_glue->destroy_flow_action
6417                                 (cache_resource->action));
6418                 LIST_REMOVE(cache_resource, next);
6419                 rte_free(cache_resource);
6420                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
6421                         (void *)cache_resource);
6422                 return 0;
6423         }
6424         return 1;
6425 }
6426
6427 /**
6428  * Remove the flow from the NIC but keeps it in memory.
6429  *
6430  * @param[in] dev
6431  *   Pointer to Ethernet device.
6432  * @param[in, out] flow
6433  *   Pointer to flow structure.
6434  */
6435 static void
6436 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6437 {
6438         struct mlx5_flow_dv *dv;
6439         struct mlx5_flow *dev_flow;
6440
6441         if (!flow)
6442                 return;
6443         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6444                 dv = &dev_flow->dv;
6445                 if (dv->flow) {
6446                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
6447                         dv->flow = NULL;
6448                 }
6449                 if (dv->hrxq) {
6450                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6451                                 mlx5_hrxq_drop_release(dev);
6452                         else
6453                                 mlx5_hrxq_release(dev, dv->hrxq);
6454                         dv->hrxq = NULL;
6455                 }
6456                 if (dev_flow->dv.vf_vlan.tag &&
6457                     dev_flow->dv.vf_vlan.created)
6458                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6459         }
6460 }
6461
6462 /**
6463  * Remove the flow from the NIC and the memory.
6464  *
6465  * @param[in] dev
6466  *   Pointer to the Ethernet device structure.
6467  * @param[in, out] flow
6468  *   Pointer to flow structure.
6469  */
6470 static void
6471 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6472 {
6473         struct mlx5_flow *dev_flow;
6474
6475         if (!flow)
6476                 return;
6477         flow_dv_remove(dev, flow);
6478         if (flow->counter) {
6479                 flow_dv_counter_release(dev, flow->counter);
6480                 flow->counter = NULL;
6481         }
6482         if (flow->tag_resource) {
6483                 flow_dv_tag_release(dev, flow->tag_resource);
6484                 flow->tag_resource = NULL;
6485         }
6486         while (!LIST_EMPTY(&flow->dev_flows)) {
6487                 dev_flow = LIST_FIRST(&flow->dev_flows);
6488                 LIST_REMOVE(dev_flow, next);
6489                 if (dev_flow->dv.matcher)
6490                         flow_dv_matcher_release(dev, dev_flow);
6491                 if (dev_flow->dv.encap_decap)
6492                         flow_dv_encap_decap_resource_release(dev_flow);
6493                 if (dev_flow->dv.modify_hdr)
6494                         flow_dv_modify_hdr_resource_release(dev_flow);
6495                 if (dev_flow->dv.jump)
6496                         flow_dv_jump_tbl_resource_release(dev_flow);
6497                 if (dev_flow->dv.port_id_action)
6498                         flow_dv_port_id_action_resource_release(dev_flow);
6499                 if (dev_flow->dv.push_vlan_res)
6500                         flow_dv_push_vlan_action_resource_release(dev_flow);
6501                 rte_free(dev_flow);
6502         }
6503 }
6504
6505 /**
6506  * Query a dv flow  rule for its statistics via devx.
6507  *
6508  * @param[in] dev
6509  *   Pointer to Ethernet device.
6510  * @param[in] flow
6511  *   Pointer to the sub flow.
6512  * @param[out] data
6513  *   data retrieved by the query.
6514  * @param[out] error
6515  *   Perform verbose error reporting if not NULL.
6516  *
6517  * @return
6518  *   0 on success, a negative errno value otherwise and rte_errno is set.
6519  */
6520 static int
6521 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
6522                     void *data, struct rte_flow_error *error)
6523 {
6524         struct mlx5_priv *priv = dev->data->dev_private;
6525         struct rte_flow_query_count *qc = data;
6526
6527         if (!priv->config.devx)
6528                 return rte_flow_error_set(error, ENOTSUP,
6529                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6530                                           NULL,
6531                                           "counters are not supported");
6532         if (flow->counter) {
6533                 uint64_t pkts, bytes;
6534                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
6535                                                &bytes);
6536
6537                 if (err)
6538                         return rte_flow_error_set(error, -err,
6539                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6540                                         NULL, "cannot read counters");
6541                 qc->hits_set = 1;
6542                 qc->bytes_set = 1;
6543                 qc->hits = pkts - flow->counter->hits;
6544                 qc->bytes = bytes - flow->counter->bytes;
6545                 if (qc->reset) {
6546                         flow->counter->hits = pkts;
6547                         flow->counter->bytes = bytes;
6548                 }
6549                 return 0;
6550         }
6551         return rte_flow_error_set(error, EINVAL,
6552                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6553                                   NULL,
6554                                   "counters are not available");
6555 }
6556
6557 /**
6558  * Query a flow.
6559  *
6560  * @see rte_flow_query()
6561  * @see rte_flow_ops
6562  */
6563 static int
6564 flow_dv_query(struct rte_eth_dev *dev,
6565               struct rte_flow *flow __rte_unused,
6566               const struct rte_flow_action *actions __rte_unused,
6567               void *data __rte_unused,
6568               struct rte_flow_error *error __rte_unused)
6569 {
6570         int ret = -EINVAL;
6571
6572         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6573                 switch (actions->type) {
6574                 case RTE_FLOW_ACTION_TYPE_VOID:
6575                         break;
6576                 case RTE_FLOW_ACTION_TYPE_COUNT:
6577                         ret = flow_dv_query_count(dev, flow, data, error);
6578                         break;
6579                 default:
6580                         return rte_flow_error_set(error, ENOTSUP,
6581                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6582                                                   actions,
6583                                                   "action not supported");
6584                 }
6585         }
6586         return ret;
6587 }
6588
6589 /*
6590  * Mutex-protected thunk to flow_dv_translate().
6591  */
6592 static int
6593 flow_d_translate(struct rte_eth_dev *dev,
6594                  struct mlx5_flow *dev_flow,
6595                  const struct rte_flow_attr *attr,
6596                  const struct rte_flow_item items[],
6597                  const struct rte_flow_action actions[],
6598                  struct rte_flow_error *error)
6599 {
6600         int ret;
6601
6602         flow_d_shared_lock(dev);
6603         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
6604         flow_d_shared_unlock(dev);
6605         return ret;
6606 }
6607
6608 /*
6609  * Mutex-protected thunk to flow_dv_apply().
6610  */
6611 static int
6612 flow_d_apply(struct rte_eth_dev *dev,
6613              struct rte_flow *flow,
6614              struct rte_flow_error *error)
6615 {
6616         int ret;
6617
6618         flow_d_shared_lock(dev);
6619         ret = flow_dv_apply(dev, flow, error);
6620         flow_d_shared_unlock(dev);
6621         return ret;
6622 }
6623
6624 /*
6625  * Mutex-protected thunk to flow_dv_remove().
6626  */
6627 static void
6628 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6629 {
6630         flow_d_shared_lock(dev);
6631         flow_dv_remove(dev, flow);
6632         flow_d_shared_unlock(dev);
6633 }
6634
6635 /*
6636  * Mutex-protected thunk to flow_dv_destroy().
6637  */
6638 static void
6639 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6640 {
6641         flow_d_shared_lock(dev);
6642         flow_dv_destroy(dev, flow);
6643         flow_d_shared_unlock(dev);
6644 }
6645
6646 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
6647         .validate = flow_dv_validate,
6648         .prepare = flow_dv_prepare,
6649         .translate = flow_d_translate,
6650         .apply = flow_d_apply,
6651         .remove = flow_d_remove,
6652         .destroy = flow_d_destroy,
6653         .query = flow_dv_query,
6654 };
6655
6656 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */