ethdev: move egress metadata to dynamic field
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29 #include <rte_vxlan.h>
30
31 #include "mlx5.h"
32 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #include "mlx5_prm.h"
36 #include "mlx5_rxtx.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 /**
74  * Initialize flow attributes structure according to flow items' types.
75  *
76  * @param[in] item
77  *   Pointer to item specification.
78  * @param[out] attr
79  *   Pointer to flow attributes structure.
80  */
81 static void
82 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
83 {
84         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
85                 switch (item->type) {
86                 case RTE_FLOW_ITEM_TYPE_IPV4:
87                         attr->ipv4 = 1;
88                         break;
89                 case RTE_FLOW_ITEM_TYPE_IPV6:
90                         attr->ipv6 = 1;
91                         break;
92                 case RTE_FLOW_ITEM_TYPE_UDP:
93                         attr->udp = 1;
94                         break;
95                 case RTE_FLOW_ITEM_TYPE_TCP:
96                         attr->tcp = 1;
97                         break;
98                 default:
99                         break;
100                 }
101         }
102         attr->valid = 1;
103 }
104
105 struct field_modify_info {
106         uint32_t size; /* Size of field in protocol header, in bytes. */
107         uint32_t offset; /* Offset of field in protocol header, in bytes. */
108         enum mlx5_modification_field id;
109 };
110
111 struct field_modify_info modify_eth[] = {
112         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
113         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
114         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
115         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
116         {0, 0, 0},
117 };
118
119 struct field_modify_info modify_vlan_out_first_vid[] = {
120         /* Size in bits !!! */
121         {12, 0, MLX5_MODI_OUT_FIRST_VID},
122         {0, 0, 0},
123 };
124
125 struct field_modify_info modify_ipv4[] = {
126         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
127         {4, 12, MLX5_MODI_OUT_SIPV4},
128         {4, 16, MLX5_MODI_OUT_DIPV4},
129         {0, 0, 0},
130 };
131
132 struct field_modify_info modify_ipv6[] = {
133         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
134         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
135         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
136         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
137         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
138         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
139         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
140         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
141         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
142         {0, 0, 0},
143 };
144
145 struct field_modify_info modify_udp[] = {
146         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
147         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
148         {0, 0, 0},
149 };
150
151 struct field_modify_info modify_tcp[] = {
152         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
153         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
154         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
155         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
156         {0, 0, 0},
157 };
158
159 static void
160 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
161                           uint8_t next_protocol, uint64_t *item_flags,
162                           int *tunnel)
163 {
164         assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
165                item->type == RTE_FLOW_ITEM_TYPE_IPV6);
166         if (next_protocol == IPPROTO_IPIP) {
167                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
168                 *tunnel = 1;
169         }
170         if (next_protocol == IPPROTO_IPV6) {
171                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
172                 *tunnel = 1;
173         }
174 }
175
176 /**
177  * Acquire the synchronizing object to protect multithreaded access
178  * to shared dv context. Lock occurs only if context is actually
179  * shared, i.e. we have multiport IB device and representors are
180  * created.
181  *
182  * @param[in] dev
183  *   Pointer to the rte_eth_dev structure.
184  */
185 static void
186 flow_d_shared_lock(struct rte_eth_dev *dev)
187 {
188         struct mlx5_priv *priv = dev->data->dev_private;
189         struct mlx5_ibv_shared *sh = priv->sh;
190
191         if (sh->dv_refcnt > 1) {
192                 int ret;
193
194                 ret = pthread_mutex_lock(&sh->dv_mutex);
195                 assert(!ret);
196                 (void)ret;
197         }
198 }
199
200 static void
201 flow_d_shared_unlock(struct rte_eth_dev *dev)
202 {
203         struct mlx5_priv *priv = dev->data->dev_private;
204         struct mlx5_ibv_shared *sh = priv->sh;
205
206         if (sh->dv_refcnt > 1) {
207                 int ret;
208
209                 ret = pthread_mutex_unlock(&sh->dv_mutex);
210                 assert(!ret);
211                 (void)ret;
212         }
213 }
214
215 /* Update VLAN's VID/PCP based on input rte_flow_action.
216  *
217  * @param[in] action
218  *   Pointer to struct rte_flow_action.
219  * @param[out] vlan
220  *   Pointer to struct rte_vlan_hdr.
221  */
222 static void
223 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
224                          struct rte_vlan_hdr *vlan)
225 {
226         uint16_t vlan_tci;
227         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
228                 vlan_tci =
229                     ((const struct rte_flow_action_of_set_vlan_pcp *)
230                                                action->conf)->vlan_pcp;
231                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
232                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
233                 vlan->vlan_tci |= vlan_tci;
234         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
235                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
236                 vlan->vlan_tci |= rte_be_to_cpu_16
237                     (((const struct rte_flow_action_of_set_vlan_vid *)
238                                              action->conf)->vlan_vid);
239         }
240 }
241
242 /**
243  * Convert modify-header action to DV specification.
244  *
245  * @param[in] item
246  *   Pointer to item specification.
247  * @param[in] field
248  *   Pointer to field modification information.
249  * @param[in,out] resource
250  *   Pointer to the modify-header resource.
251  * @param[in] type
252  *   Type of modification.
253  * @param[out] error
254  *   Pointer to the error structure.
255  *
256  * @return
257  *   0 on success, a negative errno value otherwise and rte_errno is set.
258  */
259 static int
260 flow_dv_convert_modify_action(struct rte_flow_item *item,
261                               struct field_modify_info *field,
262                               struct mlx5_flow_dv_modify_hdr_resource *resource,
263                               uint32_t type,
264                               struct rte_flow_error *error)
265 {
266         uint32_t i = resource->actions_num;
267         struct mlx5_modification_cmd *actions = resource->actions;
268         const uint8_t *spec = item->spec;
269         const uint8_t *mask = item->mask;
270         uint32_t set;
271
272         while (field->size) {
273                 set = 0;
274                 /* Generate modify command for each mask segment. */
275                 memcpy(&set, &mask[field->offset], field->size);
276                 if (set) {
277                         if (i >= MLX5_MODIFY_NUM)
278                                 return rte_flow_error_set(error, EINVAL,
279                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
280                                          "too many items to modify");
281                         actions[i].action_type = type;
282                         actions[i].field = field->id;
283                         actions[i].length = field->size ==
284                                         4 ? 0 : field->size * 8;
285                         rte_memcpy(&actions[i].data[4 - field->size],
286                                    &spec[field->offset], field->size);
287                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
288                         ++i;
289                 }
290                 if (resource->actions_num != i)
291                         resource->actions_num = i;
292                 field++;
293         }
294         if (!resource->actions_num)
295                 return rte_flow_error_set(error, EINVAL,
296                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
297                                           "invalid modification flow item");
298         return 0;
299 }
300
301 /**
302  * Convert modify-header set IPv4 address action to DV specification.
303  *
304  * @param[in,out] resource
305  *   Pointer to the modify-header resource.
306  * @param[in] action
307  *   Pointer to action specification.
308  * @param[out] error
309  *   Pointer to the error structure.
310  *
311  * @return
312  *   0 on success, a negative errno value otherwise and rte_errno is set.
313  */
314 static int
315 flow_dv_convert_action_modify_ipv4
316                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
317                          const struct rte_flow_action *action,
318                          struct rte_flow_error *error)
319 {
320         const struct rte_flow_action_set_ipv4 *conf =
321                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
322         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
323         struct rte_flow_item_ipv4 ipv4;
324         struct rte_flow_item_ipv4 ipv4_mask;
325
326         memset(&ipv4, 0, sizeof(ipv4));
327         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
328         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
329                 ipv4.hdr.src_addr = conf->ipv4_addr;
330                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
331         } else {
332                 ipv4.hdr.dst_addr = conf->ipv4_addr;
333                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
334         }
335         item.spec = &ipv4;
336         item.mask = &ipv4_mask;
337         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
338                                              MLX5_MODIFICATION_TYPE_SET, error);
339 }
340
341 /**
342  * Convert modify-header set IPv6 address action to DV specification.
343  *
344  * @param[in,out] resource
345  *   Pointer to the modify-header resource.
346  * @param[in] action
347  *   Pointer to action specification.
348  * @param[out] error
349  *   Pointer to the error structure.
350  *
351  * @return
352  *   0 on success, a negative errno value otherwise and rte_errno is set.
353  */
354 static int
355 flow_dv_convert_action_modify_ipv6
356                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
357                          const struct rte_flow_action *action,
358                          struct rte_flow_error *error)
359 {
360         const struct rte_flow_action_set_ipv6 *conf =
361                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
362         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
363         struct rte_flow_item_ipv6 ipv6;
364         struct rte_flow_item_ipv6 ipv6_mask;
365
366         memset(&ipv6, 0, sizeof(ipv6));
367         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
368         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
369                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
370                        sizeof(ipv6.hdr.src_addr));
371                 memcpy(&ipv6_mask.hdr.src_addr,
372                        &rte_flow_item_ipv6_mask.hdr.src_addr,
373                        sizeof(ipv6.hdr.src_addr));
374         } else {
375                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
376                        sizeof(ipv6.hdr.dst_addr));
377                 memcpy(&ipv6_mask.hdr.dst_addr,
378                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
379                        sizeof(ipv6.hdr.dst_addr));
380         }
381         item.spec = &ipv6;
382         item.mask = &ipv6_mask;
383         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
384                                              MLX5_MODIFICATION_TYPE_SET, error);
385 }
386
387 /**
388  * Convert modify-header set MAC address action to DV specification.
389  *
390  * @param[in,out] resource
391  *   Pointer to the modify-header resource.
392  * @param[in] action
393  *   Pointer to action specification.
394  * @param[out] error
395  *   Pointer to the error structure.
396  *
397  * @return
398  *   0 on success, a negative errno value otherwise and rte_errno is set.
399  */
400 static int
401 flow_dv_convert_action_modify_mac
402                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
403                          const struct rte_flow_action *action,
404                          struct rte_flow_error *error)
405 {
406         const struct rte_flow_action_set_mac *conf =
407                 (const struct rte_flow_action_set_mac *)(action->conf);
408         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
409         struct rte_flow_item_eth eth;
410         struct rte_flow_item_eth eth_mask;
411
412         memset(&eth, 0, sizeof(eth));
413         memset(&eth_mask, 0, sizeof(eth_mask));
414         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
415                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
416                        sizeof(eth.src.addr_bytes));
417                 memcpy(&eth_mask.src.addr_bytes,
418                        &rte_flow_item_eth_mask.src.addr_bytes,
419                        sizeof(eth_mask.src.addr_bytes));
420         } else {
421                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
422                        sizeof(eth.dst.addr_bytes));
423                 memcpy(&eth_mask.dst.addr_bytes,
424                        &rte_flow_item_eth_mask.dst.addr_bytes,
425                        sizeof(eth_mask.dst.addr_bytes));
426         }
427         item.spec = &eth;
428         item.mask = &eth_mask;
429         return flow_dv_convert_modify_action(&item, modify_eth, resource,
430                                              MLX5_MODIFICATION_TYPE_SET, error);
431 }
432
433 /**
434  * Convert modify-header set VLAN VID action to DV specification.
435  *
436  * @param[in,out] resource
437  *   Pointer to the modify-header resource.
438  * @param[in] action
439  *   Pointer to action specification.
440  * @param[out] error
441  *   Pointer to the error structure.
442  *
443  * @return
444  *   0 on success, a negative errno value otherwise and rte_errno is set.
445  */
446 static int
447 flow_dv_convert_action_modify_vlan_vid
448                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
449                          const struct rte_flow_action *action,
450                          struct rte_flow_error *error)
451 {
452         const struct rte_flow_action_of_set_vlan_vid *conf =
453                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
454         int i = resource->actions_num;
455         struct mlx5_modification_cmd *actions = &resource->actions[i];
456         struct field_modify_info *field = modify_vlan_out_first_vid;
457
458         if (i >= MLX5_MODIFY_NUM)
459                 return rte_flow_error_set(error, EINVAL,
460                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
461                          "too many items to modify");
462         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
463         actions[i].field = field->id;
464         actions[i].length = field->size;
465         actions[i].offset = field->offset;
466         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
467         actions[i].data1 = conf->vlan_vid;
468         actions[i].data1 = actions[i].data1 << 16;
469         resource->actions_num = ++i;
470         return 0;
471 }
472
473 /**
474  * Convert modify-header set TP action to DV specification.
475  *
476  * @param[in,out] resource
477  *   Pointer to the modify-header resource.
478  * @param[in] action
479  *   Pointer to action specification.
480  * @param[in] items
481  *   Pointer to rte_flow_item objects list.
482  * @param[in] attr
483  *   Pointer to flow attributes structure.
484  * @param[out] error
485  *   Pointer to the error structure.
486  *
487  * @return
488  *   0 on success, a negative errno value otherwise and rte_errno is set.
489  */
490 static int
491 flow_dv_convert_action_modify_tp
492                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
493                          const struct rte_flow_action *action,
494                          const struct rte_flow_item *items,
495                          union flow_dv_attr *attr,
496                          struct rte_flow_error *error)
497 {
498         const struct rte_flow_action_set_tp *conf =
499                 (const struct rte_flow_action_set_tp *)(action->conf);
500         struct rte_flow_item item;
501         struct rte_flow_item_udp udp;
502         struct rte_flow_item_udp udp_mask;
503         struct rte_flow_item_tcp tcp;
504         struct rte_flow_item_tcp tcp_mask;
505         struct field_modify_info *field;
506
507         if (!attr->valid)
508                 flow_dv_attr_init(items, attr);
509         if (attr->udp) {
510                 memset(&udp, 0, sizeof(udp));
511                 memset(&udp_mask, 0, sizeof(udp_mask));
512                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
513                         udp.hdr.src_port = conf->port;
514                         udp_mask.hdr.src_port =
515                                         rte_flow_item_udp_mask.hdr.src_port;
516                 } else {
517                         udp.hdr.dst_port = conf->port;
518                         udp_mask.hdr.dst_port =
519                                         rte_flow_item_udp_mask.hdr.dst_port;
520                 }
521                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
522                 item.spec = &udp;
523                 item.mask = &udp_mask;
524                 field = modify_udp;
525         }
526         if (attr->tcp) {
527                 memset(&tcp, 0, sizeof(tcp));
528                 memset(&tcp_mask, 0, sizeof(tcp_mask));
529                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
530                         tcp.hdr.src_port = conf->port;
531                         tcp_mask.hdr.src_port =
532                                         rte_flow_item_tcp_mask.hdr.src_port;
533                 } else {
534                         tcp.hdr.dst_port = conf->port;
535                         tcp_mask.hdr.dst_port =
536                                         rte_flow_item_tcp_mask.hdr.dst_port;
537                 }
538                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
539                 item.spec = &tcp;
540                 item.mask = &tcp_mask;
541                 field = modify_tcp;
542         }
543         return flow_dv_convert_modify_action(&item, field, resource,
544                                              MLX5_MODIFICATION_TYPE_SET, error);
545 }
546
547 /**
548  * Convert modify-header set TTL action to DV specification.
549  *
550  * @param[in,out] resource
551  *   Pointer to the modify-header resource.
552  * @param[in] action
553  *   Pointer to action specification.
554  * @param[in] items
555  *   Pointer to rte_flow_item objects list.
556  * @param[in] attr
557  *   Pointer to flow attributes structure.
558  * @param[out] error
559  *   Pointer to the error structure.
560  *
561  * @return
562  *   0 on success, a negative errno value otherwise and rte_errno is set.
563  */
564 static int
565 flow_dv_convert_action_modify_ttl
566                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
567                          const struct rte_flow_action *action,
568                          const struct rte_flow_item *items,
569                          union flow_dv_attr *attr,
570                          struct rte_flow_error *error)
571 {
572         const struct rte_flow_action_set_ttl *conf =
573                 (const struct rte_flow_action_set_ttl *)(action->conf);
574         struct rte_flow_item item;
575         struct rte_flow_item_ipv4 ipv4;
576         struct rte_flow_item_ipv4 ipv4_mask;
577         struct rte_flow_item_ipv6 ipv6;
578         struct rte_flow_item_ipv6 ipv6_mask;
579         struct field_modify_info *field;
580
581         if (!attr->valid)
582                 flow_dv_attr_init(items, attr);
583         if (attr->ipv4) {
584                 memset(&ipv4, 0, sizeof(ipv4));
585                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
586                 ipv4.hdr.time_to_live = conf->ttl_value;
587                 ipv4_mask.hdr.time_to_live = 0xFF;
588                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
589                 item.spec = &ipv4;
590                 item.mask = &ipv4_mask;
591                 field = modify_ipv4;
592         }
593         if (attr->ipv6) {
594                 memset(&ipv6, 0, sizeof(ipv6));
595                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
596                 ipv6.hdr.hop_limits = conf->ttl_value;
597                 ipv6_mask.hdr.hop_limits = 0xFF;
598                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
599                 item.spec = &ipv6;
600                 item.mask = &ipv6_mask;
601                 field = modify_ipv6;
602         }
603         return flow_dv_convert_modify_action(&item, field, resource,
604                                              MLX5_MODIFICATION_TYPE_SET, error);
605 }
606
607 /**
608  * Convert modify-header decrement TTL action to DV specification.
609  *
610  * @param[in,out] resource
611  *   Pointer to the modify-header resource.
612  * @param[in] action
613  *   Pointer to action specification.
614  * @param[in] items
615  *   Pointer to rte_flow_item objects list.
616  * @param[in] attr
617  *   Pointer to flow attributes structure.
618  * @param[out] error
619  *   Pointer to the error structure.
620  *
621  * @return
622  *   0 on success, a negative errno value otherwise and rte_errno is set.
623  */
624 static int
625 flow_dv_convert_action_modify_dec_ttl
626                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
627                          const struct rte_flow_item *items,
628                          union flow_dv_attr *attr,
629                          struct rte_flow_error *error)
630 {
631         struct rte_flow_item item;
632         struct rte_flow_item_ipv4 ipv4;
633         struct rte_flow_item_ipv4 ipv4_mask;
634         struct rte_flow_item_ipv6 ipv6;
635         struct rte_flow_item_ipv6 ipv6_mask;
636         struct field_modify_info *field;
637
638         if (!attr->valid)
639                 flow_dv_attr_init(items, attr);
640         if (attr->ipv4) {
641                 memset(&ipv4, 0, sizeof(ipv4));
642                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
643                 ipv4.hdr.time_to_live = 0xFF;
644                 ipv4_mask.hdr.time_to_live = 0xFF;
645                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
646                 item.spec = &ipv4;
647                 item.mask = &ipv4_mask;
648                 field = modify_ipv4;
649         }
650         if (attr->ipv6) {
651                 memset(&ipv6, 0, sizeof(ipv6));
652                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
653                 ipv6.hdr.hop_limits = 0xFF;
654                 ipv6_mask.hdr.hop_limits = 0xFF;
655                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
656                 item.spec = &ipv6;
657                 item.mask = &ipv6_mask;
658                 field = modify_ipv6;
659         }
660         return flow_dv_convert_modify_action(&item, field, resource,
661                                              MLX5_MODIFICATION_TYPE_ADD, error);
662 }
663
664 /**
665  * Convert modify-header increment/decrement TCP Sequence number
666  * to DV specification.
667  *
668  * @param[in,out] resource
669  *   Pointer to the modify-header resource.
670  * @param[in] action
671  *   Pointer to action specification.
672  * @param[out] error
673  *   Pointer to the error structure.
674  *
675  * @return
676  *   0 on success, a negative errno value otherwise and rte_errno is set.
677  */
678 static int
679 flow_dv_convert_action_modify_tcp_seq
680                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
681                          const struct rte_flow_action *action,
682                          struct rte_flow_error *error)
683 {
684         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
685         uint64_t value = rte_be_to_cpu_32(*conf);
686         struct rte_flow_item item;
687         struct rte_flow_item_tcp tcp;
688         struct rte_flow_item_tcp tcp_mask;
689
690         memset(&tcp, 0, sizeof(tcp));
691         memset(&tcp_mask, 0, sizeof(tcp_mask));
692         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
693                 /*
694                  * The HW has no decrement operation, only increment operation.
695                  * To simulate decrement X from Y using increment operation
696                  * we need to add UINT32_MAX X times to Y.
697                  * Each adding of UINT32_MAX decrements Y by 1.
698                  */
699                 value *= UINT32_MAX;
700         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
701         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
702         item.type = RTE_FLOW_ITEM_TYPE_TCP;
703         item.spec = &tcp;
704         item.mask = &tcp_mask;
705         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
706                                              MLX5_MODIFICATION_TYPE_ADD, error);
707 }
708
709 /**
710  * Convert modify-header increment/decrement TCP Acknowledgment number
711  * to DV specification.
712  *
713  * @param[in,out] resource
714  *   Pointer to the modify-header resource.
715  * @param[in] action
716  *   Pointer to action specification.
717  * @param[out] error
718  *   Pointer to the error structure.
719  *
720  * @return
721  *   0 on success, a negative errno value otherwise and rte_errno is set.
722  */
723 static int
724 flow_dv_convert_action_modify_tcp_ack
725                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
726                          const struct rte_flow_action *action,
727                          struct rte_flow_error *error)
728 {
729         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
730         uint64_t value = rte_be_to_cpu_32(*conf);
731         struct rte_flow_item item;
732         struct rte_flow_item_tcp tcp;
733         struct rte_flow_item_tcp tcp_mask;
734
735         memset(&tcp, 0, sizeof(tcp));
736         memset(&tcp_mask, 0, sizeof(tcp_mask));
737         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
738                 /*
739                  * The HW has no decrement operation, only increment operation.
740                  * To simulate decrement X from Y using increment operation
741                  * we need to add UINT32_MAX X times to Y.
742                  * Each adding of UINT32_MAX decrements Y by 1.
743                  */
744                 value *= UINT32_MAX;
745         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
746         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
747         item.type = RTE_FLOW_ITEM_TYPE_TCP;
748         item.spec = &tcp;
749         item.mask = &tcp_mask;
750         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
751                                              MLX5_MODIFICATION_TYPE_ADD, error);
752 }
753
754 static enum mlx5_modification_field reg_to_field[] = {
755         [REG_A] = MLX5_MODI_META_DATA_REG_A,
756         [REG_B] = MLX5_MODI_META_DATA_REG_B,
757         [REG_C_0] = MLX5_MODI_META_REG_C_0,
758         [REG_C_1] = MLX5_MODI_META_REG_C_1,
759         [REG_C_2] = MLX5_MODI_META_REG_C_2,
760         [REG_C_3] = MLX5_MODI_META_REG_C_3,
761         [REG_C_4] = MLX5_MODI_META_REG_C_4,
762         [REG_C_5] = MLX5_MODI_META_REG_C_5,
763         [REG_C_6] = MLX5_MODI_META_REG_C_6,
764         [REG_C_7] = MLX5_MODI_META_REG_C_7,
765 };
766
767 /**
768  * Convert register set to DV specification.
769  *
770  * @param[in,out] resource
771  *   Pointer to the modify-header resource.
772  * @param[in] action
773  *   Pointer to action specification.
774  * @param[out] error
775  *   Pointer to the error structure.
776  *
777  * @return
778  *   0 on success, a negative errno value otherwise and rte_errno is set.
779  */
780 static int
781 flow_dv_convert_action_set_reg
782                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
783                          const struct rte_flow_action *action,
784                          struct rte_flow_error *error)
785 {
786         const struct mlx5_rte_flow_action_set_tag *conf = (action->conf);
787         struct mlx5_modification_cmd *actions = resource->actions;
788         uint32_t i = resource->actions_num;
789
790         if (i >= MLX5_MODIFY_NUM)
791                 return rte_flow_error_set(error, EINVAL,
792                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
793                                           "too many items to modify");
794         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
795         actions[i].field = reg_to_field[conf->id];
796         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
797         actions[i].data1 = conf->data;
798         ++i;
799         resource->actions_num = i;
800         if (!resource->actions_num)
801                 return rte_flow_error_set(error, EINVAL,
802                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
803                                           "invalid modification flow item");
804         return 0;
805 }
806
807 /**
808  * Validate META item.
809  *
810  * @param[in] dev
811  *   Pointer to the rte_eth_dev structure.
812  * @param[in] item
813  *   Item specification.
814  * @param[in] attr
815  *   Attributes of flow that includes this item.
816  * @param[out] error
817  *   Pointer to error structure.
818  *
819  * @return
820  *   0 on success, a negative errno value otherwise and rte_errno is set.
821  */
822 static int
823 flow_dv_validate_item_meta(struct rte_eth_dev *dev __rte_unused,
824                            const struct rte_flow_item *item,
825                            const struct rte_flow_attr *attr,
826                            struct rte_flow_error *error)
827 {
828         const struct rte_flow_item_meta *spec = item->spec;
829         const struct rte_flow_item_meta *mask = item->mask;
830         const struct rte_flow_item_meta nic_mask = {
831                 .data = UINT32_MAX
832         };
833         int ret;
834
835         if (!spec)
836                 return rte_flow_error_set(error, EINVAL,
837                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
838                                           item->spec,
839                                           "data cannot be empty");
840         if (!spec->data)
841                 return rte_flow_error_set(error, EINVAL,
842                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
843                                           NULL,
844                                           "data cannot be zero");
845         if (!mask)
846                 mask = &rte_flow_item_meta_mask;
847         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
848                                         (const uint8_t *)&nic_mask,
849                                         sizeof(struct rte_flow_item_meta),
850                                         error);
851         if (ret < 0)
852                 return ret;
853         if (attr->ingress)
854                 return rte_flow_error_set(error, ENOTSUP,
855                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
856                                           NULL,
857                                           "pattern not supported for ingress");
858         return 0;
859 }
860
861 /**
862  * Validate vport item.
863  *
864  * @param[in] dev
865  *   Pointer to the rte_eth_dev structure.
866  * @param[in] item
867  *   Item specification.
868  * @param[in] attr
869  *   Attributes of flow that includes this item.
870  * @param[in] item_flags
871  *   Bit-fields that holds the items detected until now.
872  * @param[out] error
873  *   Pointer to error structure.
874  *
875  * @return
876  *   0 on success, a negative errno value otherwise and rte_errno is set.
877  */
878 static int
879 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
880                               const struct rte_flow_item *item,
881                               const struct rte_flow_attr *attr,
882                               uint64_t item_flags,
883                               struct rte_flow_error *error)
884 {
885         const struct rte_flow_item_port_id *spec = item->spec;
886         const struct rte_flow_item_port_id *mask = item->mask;
887         const struct rte_flow_item_port_id switch_mask = {
888                         .id = 0xffffffff,
889         };
890         struct mlx5_priv *esw_priv;
891         struct mlx5_priv *dev_priv;
892         int ret;
893
894         if (!attr->transfer)
895                 return rte_flow_error_set(error, EINVAL,
896                                           RTE_FLOW_ERROR_TYPE_ITEM,
897                                           NULL,
898                                           "match on port id is valid only"
899                                           " when transfer flag is enabled");
900         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
901                 return rte_flow_error_set(error, ENOTSUP,
902                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
903                                           "multiple source ports are not"
904                                           " supported");
905         if (!mask)
906                 mask = &switch_mask;
907         if (mask->id != 0xffffffff)
908                 return rte_flow_error_set(error, ENOTSUP,
909                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
910                                            mask,
911                                            "no support for partial mask on"
912                                            " \"id\" field");
913         ret = mlx5_flow_item_acceptable
914                                 (item, (const uint8_t *)mask,
915                                  (const uint8_t *)&rte_flow_item_port_id_mask,
916                                  sizeof(struct rte_flow_item_port_id),
917                                  error);
918         if (ret)
919                 return ret;
920         if (!spec)
921                 return 0;
922         esw_priv = mlx5_port_to_eswitch_info(spec->id);
923         if (!esw_priv)
924                 return rte_flow_error_set(error, rte_errno,
925                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
926                                           "failed to obtain E-Switch info for"
927                                           " port");
928         dev_priv = mlx5_dev_to_eswitch_info(dev);
929         if (!dev_priv)
930                 return rte_flow_error_set(error, rte_errno,
931                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
932                                           NULL,
933                                           "failed to obtain E-Switch info");
934         if (esw_priv->domain_id != dev_priv->domain_id)
935                 return rte_flow_error_set(error, EINVAL,
936                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
937                                           "cannot match on a port from a"
938                                           " different E-Switch");
939         return 0;
940 }
941
942 /**
943  * Validate the pop VLAN action.
944  *
945  * @param[in] dev
946  *   Pointer to the rte_eth_dev structure.
947  * @param[in] action_flags
948  *   Holds the actions detected until now.
949  * @param[in] action
950  *   Pointer to the pop vlan action.
951  * @param[in] item_flags
952  *   The items found in this flow rule.
953  * @param[in] attr
954  *   Pointer to flow attributes.
955  * @param[out] error
956  *   Pointer to error structure.
957  *
958  * @return
959  *   0 on success, a negative errno value otherwise and rte_errno is set.
960  */
961 static int
962 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
963                                  uint64_t action_flags,
964                                  const struct rte_flow_action *action,
965                                  uint64_t item_flags,
966                                  const struct rte_flow_attr *attr,
967                                  struct rte_flow_error *error)
968 {
969         struct mlx5_priv *priv = dev->data->dev_private;
970
971         (void)action;
972         (void)attr;
973         if (!priv->sh->pop_vlan_action)
974                 return rte_flow_error_set(error, ENOTSUP,
975                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
976                                           NULL,
977                                           "pop vlan action is not supported");
978         /*
979          * Check for inconsistencies:
980          *  fail strip_vlan in a flow that matches packets without VLAN tags.
981          *  fail strip_vlan in a flow that matches packets without explicitly a
982          *  matching on VLAN tag ?
983          */
984         if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
985                 return rte_flow_error_set(error, ENOTSUP,
986                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
987                                           NULL,
988                                           "no support for multiple vlan pop "
989                                           "actions");
990         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
991                 return rte_flow_error_set(error, ENOTSUP,
992                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
993                                           NULL,
994                                           "cannot pop vlan without a "
995                                           "match on (outer) vlan in the flow");
996         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
997                 return rte_flow_error_set(error, EINVAL,
998                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
999                                           "wrong action order, port_id should "
1000                                           "be after pop VLAN action");
1001         return 0;
1002 }
1003
1004 /**
1005  * Get VLAN default info from vlan match info.
1006  *
1007  * @param[in] dev
1008  *   Pointer to the rte_eth_dev structure.
1009  * @param[in] item
1010  *   the list of item specifications.
1011  * @param[out] vlan
1012  *   pointer VLAN info to fill to.
1013  * @param[out] error
1014  *   Pointer to error structure.
1015  *
1016  * @return
1017  *   0 on success, a negative errno value otherwise and rte_errno is set.
1018  */
1019 static void
1020 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1021                                   struct rte_vlan_hdr *vlan)
1022 {
1023         const struct rte_flow_item_vlan nic_mask = {
1024                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1025                                 MLX5DV_FLOW_VLAN_VID_MASK),
1026                 .inner_type = RTE_BE16(0xffff),
1027         };
1028
1029         if (items == NULL)
1030                 return;
1031         for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
1032                items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
1033                 ;
1034         if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1035                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1036                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1037
1038                 if (!vlan_m)
1039                         vlan_m = &nic_mask;
1040                 /* Only full match values are accepted */
1041                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1042                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1043                         vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
1044                         vlan->vlan_tci |=
1045                                 rte_be_to_cpu_16(vlan_v->tci &
1046                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1047                 }
1048                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1049                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1050                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1051                         vlan->vlan_tci |=
1052                                 rte_be_to_cpu_16(vlan_v->tci &
1053                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1054                 }
1055                 if (vlan_m->inner_type == nic_mask.inner_type)
1056                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1057                                                            vlan_m->inner_type);
1058         }
1059 }
1060
1061 /**
1062  * Validate the push VLAN action.
1063  *
1064  * @param[in] action_flags
1065  *   Holds the actions detected until now.
1066  * @param[in] action
1067  *   Pointer to the encap action.
1068  * @param[in] attr
1069  *   Pointer to flow attributes
1070  * @param[out] error
1071  *   Pointer to error structure.
1072  *
1073  * @return
1074  *   0 on success, a negative errno value otherwise and rte_errno is set.
1075  */
1076 static int
1077 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1078                                   uint64_t item_flags,
1079                                   const struct rte_flow_action *action,
1080                                   const struct rte_flow_attr *attr,
1081                                   struct rte_flow_error *error)
1082 {
1083         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1084
1085         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1086             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1087                 return rte_flow_error_set(error, EINVAL,
1088                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1089                                           "invalid vlan ethertype");
1090         if (action_flags &
1091                 (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1092                 return rte_flow_error_set(error, ENOTSUP,
1093                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1094                                           "no support for multiple VLAN "
1095                                           "actions");
1096         if (!mlx5_flow_find_action
1097                         (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) &&
1098             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1099                 return rte_flow_error_set(error, ENOTSUP,
1100                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
1101                                 "push VLAN needs to match on VLAN in order to "
1102                                 "get VLAN VID information because there is "
1103                                 "no followed set VLAN VID action");
1104         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1105                 return rte_flow_error_set(error, EINVAL,
1106                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1107                                           "wrong action order, port_id should "
1108                                           "be after push VLAN");
1109         (void)attr;
1110         return 0;
1111 }
1112
1113 /**
1114  * Validate the set VLAN PCP.
1115  *
1116  * @param[in] action_flags
1117  *   Holds the actions detected until now.
1118  * @param[in] actions
1119  *   Pointer to the list of actions remaining in the flow rule.
1120  * @param[in] attr
1121  *   Pointer to flow attributes
1122  * @param[out] error
1123  *   Pointer to error structure.
1124  *
1125  * @return
1126  *   0 on success, a negative errno value otherwise and rte_errno is set.
1127  */
1128 static int
1129 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1130                                      const struct rte_flow_action actions[],
1131                                      struct rte_flow_error *error)
1132 {
1133         const struct rte_flow_action *action = actions;
1134         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1135
1136         if (conf->vlan_pcp > 7)
1137                 return rte_flow_error_set(error, EINVAL,
1138                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1139                                           "VLAN PCP value is too big");
1140         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1141                 return rte_flow_error_set(error, ENOTSUP,
1142                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1143                                           "set VLAN PCP action must follow "
1144                                           "the push VLAN action");
1145         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1146                 return rte_flow_error_set(error, ENOTSUP,
1147                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1148                                           "Multiple VLAN PCP modification are "
1149                                           "not supported");
1150         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1151                 return rte_flow_error_set(error, EINVAL,
1152                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1153                                           "wrong action order, port_id should "
1154                                           "be after set VLAN PCP");
1155         return 0;
1156 }
1157
1158 /**
1159  * Validate the set VLAN VID.
1160  *
1161  * @param[in] item_flags
1162  *   Holds the items detected in this rule.
1163  * @param[in] actions
1164  *   Pointer to the list of actions remaining in the flow rule.
1165  * @param[in] attr
1166  *   Pointer to flow attributes
1167  * @param[out] error
1168  *   Pointer to error structure.
1169  *
1170  * @return
1171  *   0 on success, a negative errno value otherwise and rte_errno is set.
1172  */
1173 static int
1174 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1175                                      uint64_t action_flags,
1176                                      const struct rte_flow_action actions[],
1177                                      struct rte_flow_error *error)
1178 {
1179         const struct rte_flow_action *action = actions;
1180         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1181
1182         if (conf->vlan_vid > RTE_BE16(0xFFE))
1183                 return rte_flow_error_set(error, EINVAL,
1184                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1185                                           "VLAN VID value is too big");
1186         /* there is an of_push_vlan action before us */
1187         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
1188                 if (mlx5_flow_find_action(actions + 1,
1189                                           RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1190                         return rte_flow_error_set(error, ENOTSUP,
1191                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
1192                                         "Multiple VLAN VID modifications are "
1193                                         "not supported");
1194                 else
1195                         return 0;
1196         }
1197
1198         /*
1199          * Action is on an existing VLAN header:
1200          *    Need to verify this is a single modify CID action.
1201          *   Rule mast include a match on outer VLAN.
1202          */
1203         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1204                 return rte_flow_error_set(error, ENOTSUP,
1205                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1206                                           "Multiple VLAN VID modifications are "
1207                                           "not supported");
1208         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1209                 return rte_flow_error_set(error, EINVAL,
1210                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1211                                           "match on VLAN is required in order "
1212                                           "to set VLAN VID");
1213         if (action_flags & MLX5_FLOW_ACTION_PORT_ID)
1214                 return rte_flow_error_set(error, EINVAL,
1215                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1216                                           "wrong action order, port_id should "
1217                                           "be after set VLAN VID");
1218         return 0;
1219 }
1220
1221 /**
1222  * Validate count action.
1223  *
1224  * @param[in] dev
1225  *   device otr.
1226  * @param[out] error
1227  *   Pointer to error structure.
1228  *
1229  * @return
1230  *   0 on success, a negative errno value otherwise and rte_errno is set.
1231  */
1232 static int
1233 flow_dv_validate_action_count(struct rte_eth_dev *dev,
1234                               struct rte_flow_error *error)
1235 {
1236         struct mlx5_priv *priv = dev->data->dev_private;
1237
1238         if (!priv->config.devx)
1239                 goto notsup_err;
1240 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
1241         return 0;
1242 #endif
1243 notsup_err:
1244         return rte_flow_error_set
1245                       (error, ENOTSUP,
1246                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1247                        NULL,
1248                        "count action not supported");
1249 }
1250
1251 /**
1252  * Validate the L2 encap action.
1253  *
1254  * @param[in] action_flags
1255  *   Holds the actions detected until now.
1256  * @param[in] action
1257  *   Pointer to the encap action.
1258  * @param[in] attr
1259  *   Pointer to flow attributes
1260  * @param[out] error
1261  *   Pointer to error structure.
1262  *
1263  * @return
1264  *   0 on success, a negative errno value otherwise and rte_errno is set.
1265  */
1266 static int
1267 flow_dv_validate_action_l2_encap(uint64_t action_flags,
1268                                  const struct rte_flow_action *action,
1269                                  const struct rte_flow_attr *attr,
1270                                  struct rte_flow_error *error)
1271 {
1272         if (!(action->conf))
1273                 return rte_flow_error_set(error, EINVAL,
1274                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1275                                           "configuration cannot be null");
1276         if (action_flags & MLX5_FLOW_ACTION_DROP)
1277                 return rte_flow_error_set(error, EINVAL,
1278                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1279                                           "can't drop and encap in same flow");
1280         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1281                 return rte_flow_error_set(error, EINVAL,
1282                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1283                                           "can only have a single encap or"
1284                                           " decap action in a flow");
1285         if (!attr->transfer && attr->ingress)
1286                 return rte_flow_error_set(error, ENOTSUP,
1287                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1288                                           NULL,
1289                                           "encap action not supported for "
1290                                           "ingress");
1291         return 0;
1292 }
1293
1294 /**
1295  * Validate the L2 decap action.
1296  *
1297  * @param[in] action_flags
1298  *   Holds the actions detected until now.
1299  * @param[in] attr
1300  *   Pointer to flow attributes
1301  * @param[out] error
1302  *   Pointer to error structure.
1303  *
1304  * @return
1305  *   0 on success, a negative errno value otherwise and rte_errno is set.
1306  */
1307 static int
1308 flow_dv_validate_action_l2_decap(uint64_t action_flags,
1309                                  const struct rte_flow_attr *attr,
1310                                  struct rte_flow_error *error)
1311 {
1312         if (action_flags & MLX5_FLOW_ACTION_DROP)
1313                 return rte_flow_error_set(error, EINVAL,
1314                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1315                                           "can't drop and decap in same flow");
1316         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1317                 return rte_flow_error_set(error, EINVAL,
1318                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1319                                           "can only have a single encap or"
1320                                           " decap action in a flow");
1321         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1322                 return rte_flow_error_set(error, EINVAL,
1323                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1324                                           "can't have decap action after"
1325                                           " modify action");
1326         if (attr->egress)
1327                 return rte_flow_error_set(error, ENOTSUP,
1328                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1329                                           NULL,
1330                                           "decap action not supported for "
1331                                           "egress");
1332         return 0;
1333 }
1334
1335 /**
1336  * Validate the raw encap action.
1337  *
1338  * @param[in] action_flags
1339  *   Holds the actions detected until now.
1340  * @param[in] action
1341  *   Pointer to the encap action.
1342  * @param[in] attr
1343  *   Pointer to flow attributes
1344  * @param[out] error
1345  *   Pointer to error structure.
1346  *
1347  * @return
1348  *   0 on success, a negative errno value otherwise and rte_errno is set.
1349  */
1350 static int
1351 flow_dv_validate_action_raw_encap(uint64_t action_flags,
1352                                   const struct rte_flow_action *action,
1353                                   const struct rte_flow_attr *attr,
1354                                   struct rte_flow_error *error)
1355 {
1356         const struct rte_flow_action_raw_encap *raw_encap =
1357                 (const struct rte_flow_action_raw_encap *)action->conf;
1358         if (!(action->conf))
1359                 return rte_flow_error_set(error, EINVAL,
1360                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1361                                           "configuration cannot be null");
1362         if (action_flags & MLX5_FLOW_ACTION_DROP)
1363                 return rte_flow_error_set(error, EINVAL,
1364                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1365                                           "can't drop and encap in same flow");
1366         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1367                 return rte_flow_error_set(error, EINVAL,
1368                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1369                                           "can only have a single encap"
1370                                           " action in a flow");
1371         /* encap without preceding decap is not supported for ingress */
1372         if (!attr->transfer &&  attr->ingress &&
1373             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
1374                 return rte_flow_error_set(error, ENOTSUP,
1375                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1376                                           NULL,
1377                                           "encap action not supported for "
1378                                           "ingress");
1379         if (!raw_encap->size || !raw_encap->data)
1380                 return rte_flow_error_set(error, EINVAL,
1381                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1382                                           "raw encap data cannot be empty");
1383         return 0;
1384 }
1385
1386 /**
1387  * Validate the raw decap action.
1388  *
1389  * @param[in] action_flags
1390  *   Holds the actions detected until now.
1391  * @param[in] action
1392  *   Pointer to the encap action.
1393  * @param[in] attr
1394  *   Pointer to flow attributes
1395  * @param[out] error
1396  *   Pointer to error structure.
1397  *
1398  * @return
1399  *   0 on success, a negative errno value otherwise and rte_errno is set.
1400  */
1401 static int
1402 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1403                                   const struct rte_flow_action *action,
1404                                   const struct rte_flow_attr *attr,
1405                                   struct rte_flow_error *error)
1406 {
1407         if (action_flags & MLX5_FLOW_ACTION_DROP)
1408                 return rte_flow_error_set(error, EINVAL,
1409                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1410                                           "can't drop and decap in same flow");
1411         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1412                 return rte_flow_error_set(error, EINVAL,
1413                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1414                                           "can't have encap action before"
1415                                           " decap action");
1416         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1417                 return rte_flow_error_set(error, EINVAL,
1418                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1419                                           "can only have a single decap"
1420                                           " action in a flow");
1421         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1422                 return rte_flow_error_set(error, EINVAL,
1423                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1424                                           "can't have decap action after"
1425                                           " modify action");
1426         /* decap action is valid on egress only if it is followed by encap */
1427         if (attr->egress) {
1428                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1429                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1430                        action++) {
1431                 }
1432                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1433                         return rte_flow_error_set
1434                                         (error, ENOTSUP,
1435                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1436                                          NULL, "decap action not supported"
1437                                          " for egress");
1438         }
1439         return 0;
1440 }
1441
1442 /**
1443  * Find existing encap/decap resource or create and register a new one.
1444  *
1445  * @param dev[in, out]
1446  *   Pointer to rte_eth_dev structure.
1447  * @param[in, out] resource
1448  *   Pointer to encap/decap resource.
1449  * @parm[in, out] dev_flow
1450  *   Pointer to the dev_flow.
1451  * @param[out] error
1452  *   pointer to error structure.
1453  *
1454  * @return
1455  *   0 on success otherwise -errno and errno is set.
1456  */
1457 static int
1458 flow_dv_encap_decap_resource_register
1459                         (struct rte_eth_dev *dev,
1460                          struct mlx5_flow_dv_encap_decap_resource *resource,
1461                          struct mlx5_flow *dev_flow,
1462                          struct rte_flow_error *error)
1463 {
1464         struct mlx5_priv *priv = dev->data->dev_private;
1465         struct mlx5_ibv_shared *sh = priv->sh;
1466         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1467         struct rte_flow *flow = dev_flow->flow;
1468         struct mlx5dv_dr_domain *domain;
1469
1470         resource->flags = flow->group ? 0 : 1;
1471         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1472                 domain = sh->fdb_domain;
1473         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1474                 domain = sh->rx_domain;
1475         else
1476                 domain = sh->tx_domain;
1477
1478         /* Lookup a matching resource from cache. */
1479         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1480                 if (resource->reformat_type == cache_resource->reformat_type &&
1481                     resource->ft_type == cache_resource->ft_type &&
1482                     resource->flags == cache_resource->flags &&
1483                     resource->size == cache_resource->size &&
1484                     !memcmp((const void *)resource->buf,
1485                             (const void *)cache_resource->buf,
1486                             resource->size)) {
1487                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1488                                 (void *)cache_resource,
1489                                 rte_atomic32_read(&cache_resource->refcnt));
1490                         rte_atomic32_inc(&cache_resource->refcnt);
1491                         dev_flow->dv.encap_decap = cache_resource;
1492                         return 0;
1493                 }
1494         }
1495         /* Register new encap/decap resource. */
1496         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1497         if (!cache_resource)
1498                 return rte_flow_error_set(error, ENOMEM,
1499                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1500                                           "cannot allocate resource memory");
1501         *cache_resource = *resource;
1502         cache_resource->verbs_action =
1503                 mlx5_glue->dv_create_flow_action_packet_reformat
1504                         (sh->ctx, cache_resource->reformat_type,
1505                          cache_resource->ft_type, domain, cache_resource->flags,
1506                          cache_resource->size,
1507                          (cache_resource->size ? cache_resource->buf : NULL));
1508         if (!cache_resource->verbs_action) {
1509                 rte_free(cache_resource);
1510                 return rte_flow_error_set(error, ENOMEM,
1511                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1512                                           NULL, "cannot create action");
1513         }
1514         rte_atomic32_init(&cache_resource->refcnt);
1515         rte_atomic32_inc(&cache_resource->refcnt);
1516         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1517         dev_flow->dv.encap_decap = cache_resource;
1518         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1519                 (void *)cache_resource,
1520                 rte_atomic32_read(&cache_resource->refcnt));
1521         return 0;
1522 }
1523
1524 /**
1525  * Find existing table jump resource or create and register a new one.
1526  *
1527  * @param dev[in, out]
1528  *   Pointer to rte_eth_dev structure.
1529  * @param[in, out] resource
1530  *   Pointer to jump table resource.
1531  * @parm[in, out] dev_flow
1532  *   Pointer to the dev_flow.
1533  * @param[out] error
1534  *   pointer to error structure.
1535  *
1536  * @return
1537  *   0 on success otherwise -errno and errno is set.
1538  */
1539 static int
1540 flow_dv_jump_tbl_resource_register
1541                         (struct rte_eth_dev *dev,
1542                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1543                          struct mlx5_flow *dev_flow,
1544                          struct rte_flow_error *error)
1545 {
1546         struct mlx5_priv *priv = dev->data->dev_private;
1547         struct mlx5_ibv_shared *sh = priv->sh;
1548         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1549
1550         /* Lookup a matching resource from cache. */
1551         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1552                 if (resource->tbl == cache_resource->tbl) {
1553                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1554                                 (void *)cache_resource,
1555                                 rte_atomic32_read(&cache_resource->refcnt));
1556                         rte_atomic32_inc(&cache_resource->refcnt);
1557                         dev_flow->dv.jump = cache_resource;
1558                         return 0;
1559                 }
1560         }
1561         /* Register new jump table resource. */
1562         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1563         if (!cache_resource)
1564                 return rte_flow_error_set(error, ENOMEM,
1565                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1566                                           "cannot allocate resource memory");
1567         *cache_resource = *resource;
1568         cache_resource->action =
1569                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1570                 (resource->tbl->obj);
1571         if (!cache_resource->action) {
1572                 rte_free(cache_resource);
1573                 return rte_flow_error_set(error, ENOMEM,
1574                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1575                                           NULL, "cannot create action");
1576         }
1577         rte_atomic32_init(&cache_resource->refcnt);
1578         rte_atomic32_inc(&cache_resource->refcnt);
1579         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1580         dev_flow->dv.jump = cache_resource;
1581         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1582                 (void *)cache_resource,
1583                 rte_atomic32_read(&cache_resource->refcnt));
1584         return 0;
1585 }
1586
1587 /**
1588  * Find existing table port ID resource or create and register a new one.
1589  *
1590  * @param dev[in, out]
1591  *   Pointer to rte_eth_dev structure.
1592  * @param[in, out] resource
1593  *   Pointer to port ID action resource.
1594  * @parm[in, out] dev_flow
1595  *   Pointer to the dev_flow.
1596  * @param[out] error
1597  *   pointer to error structure.
1598  *
1599  * @return
1600  *   0 on success otherwise -errno and errno is set.
1601  */
1602 static int
1603 flow_dv_port_id_action_resource_register
1604                         (struct rte_eth_dev *dev,
1605                          struct mlx5_flow_dv_port_id_action_resource *resource,
1606                          struct mlx5_flow *dev_flow,
1607                          struct rte_flow_error *error)
1608 {
1609         struct mlx5_priv *priv = dev->data->dev_private;
1610         struct mlx5_ibv_shared *sh = priv->sh;
1611         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1612
1613         /* Lookup a matching resource from cache. */
1614         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1615                 if (resource->port_id == cache_resource->port_id) {
1616                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1617                                 "refcnt %d++",
1618                                 (void *)cache_resource,
1619                                 rte_atomic32_read(&cache_resource->refcnt));
1620                         rte_atomic32_inc(&cache_resource->refcnt);
1621                         dev_flow->dv.port_id_action = cache_resource;
1622                         return 0;
1623                 }
1624         }
1625         /* Register new port id action resource. */
1626         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1627         if (!cache_resource)
1628                 return rte_flow_error_set(error, ENOMEM,
1629                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1630                                           "cannot allocate resource memory");
1631         *cache_resource = *resource;
1632         cache_resource->action =
1633                 mlx5_glue->dr_create_flow_action_dest_vport
1634                         (priv->sh->fdb_domain, resource->port_id);
1635         if (!cache_resource->action) {
1636                 rte_free(cache_resource);
1637                 return rte_flow_error_set(error, ENOMEM,
1638                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1639                                           NULL, "cannot create action");
1640         }
1641         rte_atomic32_init(&cache_resource->refcnt);
1642         rte_atomic32_inc(&cache_resource->refcnt);
1643         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1644         dev_flow->dv.port_id_action = cache_resource;
1645         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1646                 (void *)cache_resource,
1647                 rte_atomic32_read(&cache_resource->refcnt));
1648         return 0;
1649 }
1650
1651 /**
1652  * Find existing push vlan resource or create and register a new one.
1653  *
1654  * @param dev[in, out]
1655  *   Pointer to rte_eth_dev structure.
1656  * @param[in, out] resource
1657  *   Pointer to port ID action resource.
1658  * @parm[in, out] dev_flow
1659  *   Pointer to the dev_flow.
1660  * @param[out] error
1661  *   pointer to error structure.
1662  *
1663  * @return
1664  *   0 on success otherwise -errno and errno is set.
1665  */
1666 static int
1667 flow_dv_push_vlan_action_resource_register
1668                        (struct rte_eth_dev *dev,
1669                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
1670                         struct mlx5_flow *dev_flow,
1671                         struct rte_flow_error *error)
1672 {
1673         struct mlx5_priv *priv = dev->data->dev_private;
1674         struct mlx5_ibv_shared *sh = priv->sh;
1675         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
1676         struct mlx5dv_dr_domain *domain;
1677
1678         /* Lookup a matching resource from cache. */
1679         LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
1680                 if (resource->vlan_tag == cache_resource->vlan_tag &&
1681                     resource->ft_type == cache_resource->ft_type) {
1682                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
1683                                 "refcnt %d++",
1684                                 (void *)cache_resource,
1685                                 rte_atomic32_read(&cache_resource->refcnt));
1686                         rte_atomic32_inc(&cache_resource->refcnt);
1687                         dev_flow->dv.push_vlan_res = cache_resource;
1688                         return 0;
1689                 }
1690         }
1691         /* Register new push_vlan action resource. */
1692         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1693         if (!cache_resource)
1694                 return rte_flow_error_set(error, ENOMEM,
1695                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1696                                           "cannot allocate resource memory");
1697         *cache_resource = *resource;
1698         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1699                 domain = sh->fdb_domain;
1700         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1701                 domain = sh->rx_domain;
1702         else
1703                 domain = sh->tx_domain;
1704         cache_resource->action =
1705                 mlx5_glue->dr_create_flow_action_push_vlan(domain,
1706                                                            resource->vlan_tag);
1707         if (!cache_resource->action) {
1708                 rte_free(cache_resource);
1709                 return rte_flow_error_set(error, ENOMEM,
1710                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1711                                           NULL, "cannot create action");
1712         }
1713         rte_atomic32_init(&cache_resource->refcnt);
1714         rte_atomic32_inc(&cache_resource->refcnt);
1715         LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
1716         dev_flow->dv.push_vlan_res = cache_resource;
1717         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
1718                 (void *)cache_resource,
1719                 rte_atomic32_read(&cache_resource->refcnt));
1720         return 0;
1721 }
1722 /**
1723  * Get the size of specific rte_flow_item_type
1724  *
1725  * @param[in] item_type
1726  *   Tested rte_flow_item_type.
1727  *
1728  * @return
1729  *   sizeof struct item_type, 0 if void or irrelevant.
1730  */
1731 static size_t
1732 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1733 {
1734         size_t retval;
1735
1736         switch (item_type) {
1737         case RTE_FLOW_ITEM_TYPE_ETH:
1738                 retval = sizeof(struct rte_flow_item_eth);
1739                 break;
1740         case RTE_FLOW_ITEM_TYPE_VLAN:
1741                 retval = sizeof(struct rte_flow_item_vlan);
1742                 break;
1743         case RTE_FLOW_ITEM_TYPE_IPV4:
1744                 retval = sizeof(struct rte_flow_item_ipv4);
1745                 break;
1746         case RTE_FLOW_ITEM_TYPE_IPV6:
1747                 retval = sizeof(struct rte_flow_item_ipv6);
1748                 break;
1749         case RTE_FLOW_ITEM_TYPE_UDP:
1750                 retval = sizeof(struct rte_flow_item_udp);
1751                 break;
1752         case RTE_FLOW_ITEM_TYPE_TCP:
1753                 retval = sizeof(struct rte_flow_item_tcp);
1754                 break;
1755         case RTE_FLOW_ITEM_TYPE_VXLAN:
1756                 retval = sizeof(struct rte_flow_item_vxlan);
1757                 break;
1758         case RTE_FLOW_ITEM_TYPE_GRE:
1759                 retval = sizeof(struct rte_flow_item_gre);
1760                 break;
1761         case RTE_FLOW_ITEM_TYPE_NVGRE:
1762                 retval = sizeof(struct rte_flow_item_nvgre);
1763                 break;
1764         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1765                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1766                 break;
1767         case RTE_FLOW_ITEM_TYPE_MPLS:
1768                 retval = sizeof(struct rte_flow_item_mpls);
1769                 break;
1770         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1771         default:
1772                 retval = 0;
1773                 break;
1774         }
1775         return retval;
1776 }
1777
1778 #define MLX5_ENCAP_IPV4_VERSION         0x40
1779 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1780 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1781 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1782 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1783 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1784 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1785
1786 /**
1787  * Convert the encap action data from list of rte_flow_item to raw buffer
1788  *
1789  * @param[in] items
1790  *   Pointer to rte_flow_item objects list.
1791  * @param[out] buf
1792  *   Pointer to the output buffer.
1793  * @param[out] size
1794  *   Pointer to the output buffer size.
1795  * @param[out] error
1796  *   Pointer to the error structure.
1797  *
1798  * @return
1799  *   0 on success, a negative errno value otherwise and rte_errno is set.
1800  */
1801 static int
1802 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1803                            size_t *size, struct rte_flow_error *error)
1804 {
1805         struct rte_ether_hdr *eth = NULL;
1806         struct rte_vlan_hdr *vlan = NULL;
1807         struct rte_ipv4_hdr *ipv4 = NULL;
1808         struct rte_ipv6_hdr *ipv6 = NULL;
1809         struct rte_udp_hdr *udp = NULL;
1810         struct rte_vxlan_hdr *vxlan = NULL;
1811         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1812         struct rte_gre_hdr *gre = NULL;
1813         size_t len;
1814         size_t temp_size = 0;
1815
1816         if (!items)
1817                 return rte_flow_error_set(error, EINVAL,
1818                                           RTE_FLOW_ERROR_TYPE_ACTION,
1819                                           NULL, "invalid empty data");
1820         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1821                 len = flow_dv_get_item_len(items->type);
1822                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1823                         return rte_flow_error_set(error, EINVAL,
1824                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1825                                                   (void *)items->type,
1826                                                   "items total size is too big"
1827                                                   " for encap action");
1828                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1829                 switch (items->type) {
1830                 case RTE_FLOW_ITEM_TYPE_ETH:
1831                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1832                         break;
1833                 case RTE_FLOW_ITEM_TYPE_VLAN:
1834                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1835                         if (!eth)
1836                                 return rte_flow_error_set(error, EINVAL,
1837                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1838                                                 (void *)items->type,
1839                                                 "eth header not found");
1840                         if (!eth->ether_type)
1841                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1842                         break;
1843                 case RTE_FLOW_ITEM_TYPE_IPV4:
1844                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1845                         if (!vlan && !eth)
1846                                 return rte_flow_error_set(error, EINVAL,
1847                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1848                                                 (void *)items->type,
1849                                                 "neither eth nor vlan"
1850                                                 " header found");
1851                         if (vlan && !vlan->eth_proto)
1852                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1853                         else if (eth && !eth->ether_type)
1854                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1855                         if (!ipv4->version_ihl)
1856                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1857                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1858                         if (!ipv4->time_to_live)
1859                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1860                         break;
1861                 case RTE_FLOW_ITEM_TYPE_IPV6:
1862                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1863                         if (!vlan && !eth)
1864                                 return rte_flow_error_set(error, EINVAL,
1865                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1866                                                 (void *)items->type,
1867                                                 "neither eth nor vlan"
1868                                                 " header found");
1869                         if (vlan && !vlan->eth_proto)
1870                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1871                         else if (eth && !eth->ether_type)
1872                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1873                         if (!ipv6->vtc_flow)
1874                                 ipv6->vtc_flow =
1875                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1876                         if (!ipv6->hop_limits)
1877                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1878                         break;
1879                 case RTE_FLOW_ITEM_TYPE_UDP:
1880                         udp = (struct rte_udp_hdr *)&buf[temp_size];
1881                         if (!ipv4 && !ipv6)
1882                                 return rte_flow_error_set(error, EINVAL,
1883                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1884                                                 (void *)items->type,
1885                                                 "ip header not found");
1886                         if (ipv4 && !ipv4->next_proto_id)
1887                                 ipv4->next_proto_id = IPPROTO_UDP;
1888                         else if (ipv6 && !ipv6->proto)
1889                                 ipv6->proto = IPPROTO_UDP;
1890                         break;
1891                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1892                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1893                         if (!udp)
1894                                 return rte_flow_error_set(error, EINVAL,
1895                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1896                                                 (void *)items->type,
1897                                                 "udp header not found");
1898                         if (!udp->dst_port)
1899                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1900                         if (!vxlan->vx_flags)
1901                                 vxlan->vx_flags =
1902                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1903                         break;
1904                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1905                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1906                         if (!udp)
1907                                 return rte_flow_error_set(error, EINVAL,
1908                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1909                                                 (void *)items->type,
1910                                                 "udp header not found");
1911                         if (!vxlan_gpe->proto)
1912                                 return rte_flow_error_set(error, EINVAL,
1913                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1914                                                 (void *)items->type,
1915                                                 "next protocol not found");
1916                         if (!udp->dst_port)
1917                                 udp->dst_port =
1918                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1919                         if (!vxlan_gpe->vx_flags)
1920                                 vxlan_gpe->vx_flags =
1921                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1922                         break;
1923                 case RTE_FLOW_ITEM_TYPE_GRE:
1924                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1925                         gre = (struct rte_gre_hdr *)&buf[temp_size];
1926                         if (!gre->proto)
1927                                 return rte_flow_error_set(error, EINVAL,
1928                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1929                                                 (void *)items->type,
1930                                                 "next protocol not found");
1931                         if (!ipv4 && !ipv6)
1932                                 return rte_flow_error_set(error, EINVAL,
1933                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1934                                                 (void *)items->type,
1935                                                 "ip header not found");
1936                         if (ipv4 && !ipv4->next_proto_id)
1937                                 ipv4->next_proto_id = IPPROTO_GRE;
1938                         else if (ipv6 && !ipv6->proto)
1939                                 ipv6->proto = IPPROTO_GRE;
1940                         break;
1941                 case RTE_FLOW_ITEM_TYPE_VOID:
1942                         break;
1943                 default:
1944                         return rte_flow_error_set(error, EINVAL,
1945                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1946                                                   (void *)items->type,
1947                                                   "unsupported item type");
1948                         break;
1949                 }
1950                 temp_size += len;
1951         }
1952         *size = temp_size;
1953         return 0;
1954 }
1955
1956 static int
1957 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1958 {
1959         struct rte_ether_hdr *eth = NULL;
1960         struct rte_vlan_hdr *vlan = NULL;
1961         struct rte_ipv6_hdr *ipv6 = NULL;
1962         struct rte_udp_hdr *udp = NULL;
1963         char *next_hdr;
1964         uint16_t proto;
1965
1966         eth = (struct rte_ether_hdr *)data;
1967         next_hdr = (char *)(eth + 1);
1968         proto = RTE_BE16(eth->ether_type);
1969
1970         /* VLAN skipping */
1971         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1972                 vlan = (struct rte_vlan_hdr *)next_hdr;
1973                 proto = RTE_BE16(vlan->eth_proto);
1974                 next_hdr += sizeof(struct rte_vlan_hdr);
1975         }
1976
1977         /* HW calculates IPv4 csum. no need to proceed */
1978         if (proto == RTE_ETHER_TYPE_IPV4)
1979                 return 0;
1980
1981         /* non IPv4/IPv6 header. not supported */
1982         if (proto != RTE_ETHER_TYPE_IPV6) {
1983                 return rte_flow_error_set(error, ENOTSUP,
1984                                           RTE_FLOW_ERROR_TYPE_ACTION,
1985                                           NULL, "Cannot offload non IPv4/IPv6");
1986         }
1987
1988         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1989
1990         /* ignore non UDP */
1991         if (ipv6->proto != IPPROTO_UDP)
1992                 return 0;
1993
1994         udp = (struct rte_udp_hdr *)(ipv6 + 1);
1995         udp->dgram_cksum = 0;
1996
1997         return 0;
1998 }
1999
2000 /**
2001  * Convert L2 encap action to DV specification.
2002  *
2003  * @param[in] dev
2004  *   Pointer to rte_eth_dev structure.
2005  * @param[in] action
2006  *   Pointer to action structure.
2007  * @param[in, out] dev_flow
2008  *   Pointer to the mlx5_flow.
2009  * @param[in] transfer
2010  *   Mark if the flow is E-Switch flow.
2011  * @param[out] error
2012  *   Pointer to the error structure.
2013  *
2014  * @return
2015  *   0 on success, a negative errno value otherwise and rte_errno is set.
2016  */
2017 static int
2018 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2019                                const struct rte_flow_action *action,
2020                                struct mlx5_flow *dev_flow,
2021                                uint8_t transfer,
2022                                struct rte_flow_error *error)
2023 {
2024         const struct rte_flow_item *encap_data;
2025         const struct rte_flow_action_raw_encap *raw_encap_data;
2026         struct mlx5_flow_dv_encap_decap_resource res = {
2027                 .reformat_type =
2028                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2029                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2030                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2031         };
2032
2033         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2034                 raw_encap_data =
2035                         (const struct rte_flow_action_raw_encap *)action->conf;
2036                 res.size = raw_encap_data->size;
2037                 memcpy(res.buf, raw_encap_data->data, res.size);
2038                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
2039                         return -rte_errno;
2040         } else {
2041                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2042                         encap_data =
2043                                 ((const struct rte_flow_action_vxlan_encap *)
2044                                                 action->conf)->definition;
2045                 else
2046                         encap_data =
2047                                 ((const struct rte_flow_action_nvgre_encap *)
2048                                                 action->conf)->definition;
2049                 if (flow_dv_convert_encap_data(encap_data, res.buf,
2050                                                &res.size, error))
2051                         return -rte_errno;
2052         }
2053         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2054                 return rte_flow_error_set(error, EINVAL,
2055                                           RTE_FLOW_ERROR_TYPE_ACTION,
2056                                           NULL, "can't create L2 encap action");
2057         return 0;
2058 }
2059
2060 /**
2061  * Convert L2 decap action to DV specification.
2062  *
2063  * @param[in] dev
2064  *   Pointer to rte_eth_dev structure.
2065  * @param[in, out] dev_flow
2066  *   Pointer to the mlx5_flow.
2067  * @param[in] transfer
2068  *   Mark if the flow is E-Switch flow.
2069  * @param[out] error
2070  *   Pointer to the error structure.
2071  *
2072  * @return
2073  *   0 on success, a negative errno value otherwise and rte_errno is set.
2074  */
2075 static int
2076 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
2077                                struct mlx5_flow *dev_flow,
2078                                uint8_t transfer,
2079                                struct rte_flow_error *error)
2080 {
2081         struct mlx5_flow_dv_encap_decap_resource res = {
2082                 .size = 0,
2083                 .reformat_type =
2084                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
2085                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2086                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
2087         };
2088
2089         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2090                 return rte_flow_error_set(error, EINVAL,
2091                                           RTE_FLOW_ERROR_TYPE_ACTION,
2092                                           NULL, "can't create L2 decap action");
2093         return 0;
2094 }
2095
2096 /**
2097  * Convert raw decap/encap (L3 tunnel) action to DV specification.
2098  *
2099  * @param[in] dev
2100  *   Pointer to rte_eth_dev structure.
2101  * @param[in] action
2102  *   Pointer to action structure.
2103  * @param[in, out] dev_flow
2104  *   Pointer to the mlx5_flow.
2105  * @param[in] attr
2106  *   Pointer to the flow attributes.
2107  * @param[out] error
2108  *   Pointer to the error structure.
2109  *
2110  * @return
2111  *   0 on success, a negative errno value otherwise and rte_errno is set.
2112  */
2113 static int
2114 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
2115                                 const struct rte_flow_action *action,
2116                                 struct mlx5_flow *dev_flow,
2117                                 const struct rte_flow_attr *attr,
2118                                 struct rte_flow_error *error)
2119 {
2120         const struct rte_flow_action_raw_encap *encap_data;
2121         struct mlx5_flow_dv_encap_decap_resource res;
2122
2123         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
2124         res.size = encap_data->size;
2125         memcpy(res.buf, encap_data->data, res.size);
2126         res.reformat_type = attr->egress ?
2127                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
2128                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
2129         if (attr->transfer)
2130                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2131         else
2132                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2133                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2134         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2135                 return rte_flow_error_set(error, EINVAL,
2136                                           RTE_FLOW_ERROR_TYPE_ACTION,
2137                                           NULL, "can't create encap action");
2138         return 0;
2139 }
2140
2141 /**
2142  * Create action push VLAN.
2143  *
2144  * @param[in] dev
2145  *   Pointer to rte_eth_dev structure.
2146  * @param[in] vlan_tag
2147  *   the vlan tag to push to the Ethernet header.
2148  * @param[in, out] dev_flow
2149  *   Pointer to the mlx5_flow.
2150  * @param[in] attr
2151  *   Pointer to the flow attributes.
2152  * @param[out] error
2153  *   Pointer to the error structure.
2154  *
2155  * @return
2156  *   0 on success, a negative errno value otherwise and rte_errno is set.
2157  */
2158 static int
2159 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
2160                                 const struct rte_flow_attr *attr,
2161                                 const struct rte_vlan_hdr *vlan,
2162                                 struct mlx5_flow *dev_flow,
2163                                 struct rte_flow_error *error)
2164 {
2165         struct mlx5_flow_dv_push_vlan_action_resource res;
2166
2167         res.vlan_tag =
2168                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
2169                                  vlan->vlan_tci);
2170         if (attr->transfer)
2171                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2172         else
2173                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2174                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2175         return flow_dv_push_vlan_action_resource_register
2176                                             (dev, &res, dev_flow, error);
2177 }
2178
2179 /**
2180  * Validate the modify-header actions.
2181  *
2182  * @param[in] action_flags
2183  *   Holds the actions detected until now.
2184  * @param[in] action
2185  *   Pointer to the modify action.
2186  * @param[out] error
2187  *   Pointer to error structure.
2188  *
2189  * @return
2190  *   0 on success, a negative errno value otherwise and rte_errno is set.
2191  */
2192 static int
2193 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
2194                                    const struct rte_flow_action *action,
2195                                    struct rte_flow_error *error)
2196 {
2197         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
2198                 return rte_flow_error_set(error, EINVAL,
2199                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2200                                           NULL, "action configuration not set");
2201         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2202                 return rte_flow_error_set(error, EINVAL,
2203                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2204                                           "can't have encap action before"
2205                                           " modify action");
2206         return 0;
2207 }
2208
2209 /**
2210  * Validate the modify-header MAC address actions.
2211  *
2212  * @param[in] action_flags
2213  *   Holds the actions detected until now.
2214  * @param[in] action
2215  *   Pointer to the modify action.
2216  * @param[in] item_flags
2217  *   Holds the items detected.
2218  * @param[out] error
2219  *   Pointer to error structure.
2220  *
2221  * @return
2222  *   0 on success, a negative errno value otherwise and rte_errno is set.
2223  */
2224 static int
2225 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
2226                                    const struct rte_flow_action *action,
2227                                    const uint64_t item_flags,
2228                                    struct rte_flow_error *error)
2229 {
2230         int ret = 0;
2231
2232         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2233         if (!ret) {
2234                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
2235                         return rte_flow_error_set(error, EINVAL,
2236                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2237                                                   NULL,
2238                                                   "no L2 item in pattern");
2239         }
2240         return ret;
2241 }
2242
2243 /**
2244  * Validate the modify-header IPv4 address actions.
2245  *
2246  * @param[in] action_flags
2247  *   Holds the actions detected until now.
2248  * @param[in] action
2249  *   Pointer to the modify action.
2250  * @param[in] item_flags
2251  *   Holds the items detected.
2252  * @param[out] error
2253  *   Pointer to error structure.
2254  *
2255  * @return
2256  *   0 on success, a negative errno value otherwise and rte_errno is set.
2257  */
2258 static int
2259 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
2260                                     const struct rte_flow_action *action,
2261                                     const uint64_t item_flags,
2262                                     struct rte_flow_error *error)
2263 {
2264         int ret = 0;
2265
2266         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2267         if (!ret) {
2268                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
2269                         return rte_flow_error_set(error, EINVAL,
2270                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2271                                                   NULL,
2272                                                   "no ipv4 item in pattern");
2273         }
2274         return ret;
2275 }
2276
2277 /**
2278  * Validate the modify-header IPv6 address actions.
2279  *
2280  * @param[in] action_flags
2281  *   Holds the actions detected until now.
2282  * @param[in] action
2283  *   Pointer to the modify action.
2284  * @param[in] item_flags
2285  *   Holds the items detected.
2286  * @param[out] error
2287  *   Pointer to error structure.
2288  *
2289  * @return
2290  *   0 on success, a negative errno value otherwise and rte_errno is set.
2291  */
2292 static int
2293 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
2294                                     const struct rte_flow_action *action,
2295                                     const uint64_t item_flags,
2296                                     struct rte_flow_error *error)
2297 {
2298         int ret = 0;
2299
2300         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2301         if (!ret) {
2302                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
2303                         return rte_flow_error_set(error, EINVAL,
2304                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2305                                                   NULL,
2306                                                   "no ipv6 item in pattern");
2307         }
2308         return ret;
2309 }
2310
2311 /**
2312  * Validate the modify-header TP actions.
2313  *
2314  * @param[in] action_flags
2315  *   Holds the actions detected until now.
2316  * @param[in] action
2317  *   Pointer to the modify action.
2318  * @param[in] item_flags
2319  *   Holds the items detected.
2320  * @param[out] error
2321  *   Pointer to error structure.
2322  *
2323  * @return
2324  *   0 on success, a negative errno value otherwise and rte_errno is set.
2325  */
2326 static int
2327 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
2328                                   const struct rte_flow_action *action,
2329                                   const uint64_t item_flags,
2330                                   struct rte_flow_error *error)
2331 {
2332         int ret = 0;
2333
2334         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2335         if (!ret) {
2336                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
2337                         return rte_flow_error_set(error, EINVAL,
2338                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2339                                                   NULL, "no transport layer "
2340                                                   "in pattern");
2341         }
2342         return ret;
2343 }
2344
2345 /**
2346  * Validate the modify-header actions of increment/decrement
2347  * TCP Sequence-number.
2348  *
2349  * @param[in] action_flags
2350  *   Holds the actions detected until now.
2351  * @param[in] action
2352  *   Pointer to the modify action.
2353  * @param[in] item_flags
2354  *   Holds the items detected.
2355  * @param[out] error
2356  *   Pointer to error structure.
2357  *
2358  * @return
2359  *   0 on success, a negative errno value otherwise and rte_errno is set.
2360  */
2361 static int
2362 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
2363                                        const struct rte_flow_action *action,
2364                                        const uint64_t item_flags,
2365                                        struct rte_flow_error *error)
2366 {
2367         int ret = 0;
2368
2369         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2370         if (!ret) {
2371                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2372                         return rte_flow_error_set(error, EINVAL,
2373                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2374                                                   NULL, "no TCP item in"
2375                                                   " pattern");
2376                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
2377                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
2378                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
2379                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
2380                         return rte_flow_error_set(error, EINVAL,
2381                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2382                                                   NULL,
2383                                                   "cannot decrease and increase"
2384                                                   " TCP sequence number"
2385                                                   " at the same time");
2386         }
2387         return ret;
2388 }
2389
2390 /**
2391  * Validate the modify-header actions of increment/decrement
2392  * TCP Acknowledgment number.
2393  *
2394  * @param[in] action_flags
2395  *   Holds the actions detected until now.
2396  * @param[in] action
2397  *   Pointer to the modify action.
2398  * @param[in] item_flags
2399  *   Holds the items detected.
2400  * @param[out] error
2401  *   Pointer to error structure.
2402  *
2403  * @return
2404  *   0 on success, a negative errno value otherwise and rte_errno is set.
2405  */
2406 static int
2407 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
2408                                        const struct rte_flow_action *action,
2409                                        const uint64_t item_flags,
2410                                        struct rte_flow_error *error)
2411 {
2412         int ret = 0;
2413
2414         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2415         if (!ret) {
2416                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2417                         return rte_flow_error_set(error, EINVAL,
2418                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2419                                                   NULL, "no TCP item in"
2420                                                   " pattern");
2421                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
2422                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
2423                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
2424                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
2425                         return rte_flow_error_set(error, EINVAL,
2426                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2427                                                   NULL,
2428                                                   "cannot decrease and increase"
2429                                                   " TCP acknowledgment number"
2430                                                   " at the same time");
2431         }
2432         return ret;
2433 }
2434
2435 /**
2436  * Validate the modify-header TTL actions.
2437  *
2438  * @param[in] action_flags
2439  *   Holds the actions detected until now.
2440  * @param[in] action
2441  *   Pointer to the modify action.
2442  * @param[in] item_flags
2443  *   Holds the items detected.
2444  * @param[out] error
2445  *   Pointer to error structure.
2446  *
2447  * @return
2448  *   0 on success, a negative errno value otherwise and rte_errno is set.
2449  */
2450 static int
2451 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
2452                                    const struct rte_flow_action *action,
2453                                    const uint64_t item_flags,
2454                                    struct rte_flow_error *error)
2455 {
2456         int ret = 0;
2457
2458         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2459         if (!ret) {
2460                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
2461                         return rte_flow_error_set(error, EINVAL,
2462                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2463                                                   NULL,
2464                                                   "no IP protocol in pattern");
2465         }
2466         return ret;
2467 }
2468
2469 /**
2470  * Validate jump action.
2471  *
2472  * @param[in] action
2473  *   Pointer to the jump action.
2474  * @param[in] action_flags
2475  *   Holds the actions detected until now.
2476  * @param[in] attributes
2477  *   Pointer to flow attributes
2478  * @param[in] external
2479  *   Action belongs to flow rule created by request external to PMD.
2480  * @param[out] error
2481  *   Pointer to error structure.
2482  *
2483  * @return
2484  *   0 on success, a negative errno value otherwise and rte_errno is set.
2485  */
2486 static int
2487 flow_dv_validate_action_jump(const struct rte_flow_action *action,
2488                              uint64_t action_flags,
2489                              const struct rte_flow_attr *attributes,
2490                              bool external, struct rte_flow_error *error)
2491 {
2492         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
2493                                                     MLX5_MAX_TABLES;
2494         uint32_t target_group, table;
2495         int ret = 0;
2496
2497         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2498                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2499                 return rte_flow_error_set(error, EINVAL,
2500                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2501                                           "can't have 2 fate actions in"
2502                                           " same flow");
2503         if (!action->conf)
2504                 return rte_flow_error_set(error, EINVAL,
2505                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2506                                           NULL, "action configuration not set");
2507         target_group =
2508                 ((const struct rte_flow_action_jump *)action->conf)->group;
2509         ret = mlx5_flow_group_to_table(attributes, external, target_group,
2510                                        &table, error);
2511         if (ret)
2512                 return ret;
2513         if (table >= max_group)
2514                 return rte_flow_error_set(error, EINVAL,
2515                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2516                                           "target group index out of range");
2517         if (attributes->group >= target_group)
2518                 return rte_flow_error_set(error, EINVAL,
2519                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2520                                           "target group must be higher than"
2521                                           " the current flow group");
2522         return 0;
2523 }
2524
2525 /*
2526  * Validate the port_id action.
2527  *
2528  * @param[in] dev
2529  *   Pointer to rte_eth_dev structure.
2530  * @param[in] action_flags
2531  *   Bit-fields that holds the actions detected until now.
2532  * @param[in] action
2533  *   Port_id RTE action structure.
2534  * @param[in] attr
2535  *   Attributes of flow that includes this action.
2536  * @param[out] error
2537  *   Pointer to error structure.
2538  *
2539  * @return
2540  *   0 on success, a negative errno value otherwise and rte_errno is set.
2541  */
2542 static int
2543 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2544                                 uint64_t action_flags,
2545                                 const struct rte_flow_action *action,
2546                                 const struct rte_flow_attr *attr,
2547                                 struct rte_flow_error *error)
2548 {
2549         const struct rte_flow_action_port_id *port_id;
2550         struct mlx5_priv *act_priv;
2551         struct mlx5_priv *dev_priv;
2552         uint16_t port;
2553
2554         if (!attr->transfer)
2555                 return rte_flow_error_set(error, ENOTSUP,
2556                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2557                                           NULL,
2558                                           "port id action is valid in transfer"
2559                                           " mode only");
2560         if (!action || !action->conf)
2561                 return rte_flow_error_set(error, ENOTSUP,
2562                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2563                                           NULL,
2564                                           "port id action parameters must be"
2565                                           " specified");
2566         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2567                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2568                 return rte_flow_error_set(error, EINVAL,
2569                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2570                                           "can have only one fate actions in"
2571                                           " a flow");
2572         dev_priv = mlx5_dev_to_eswitch_info(dev);
2573         if (!dev_priv)
2574                 return rte_flow_error_set(error, rte_errno,
2575                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2576                                           NULL,
2577                                           "failed to obtain E-Switch info");
2578         port_id = action->conf;
2579         port = port_id->original ? dev->data->port_id : port_id->id;
2580         act_priv = mlx5_port_to_eswitch_info(port);
2581         if (!act_priv)
2582                 return rte_flow_error_set
2583                                 (error, rte_errno,
2584                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2585                                  "failed to obtain E-Switch port id for port");
2586         if (act_priv->domain_id != dev_priv->domain_id)
2587                 return rte_flow_error_set
2588                                 (error, EINVAL,
2589                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2590                                  "port does not belong to"
2591                                  " E-Switch being configured");
2592         return 0;
2593 }
2594
2595 /**
2596  * Find existing modify-header resource or create and register a new one.
2597  *
2598  * @param dev[in, out]
2599  *   Pointer to rte_eth_dev structure.
2600  * @param[in, out] resource
2601  *   Pointer to modify-header resource.
2602  * @parm[in, out] dev_flow
2603  *   Pointer to the dev_flow.
2604  * @param[out] error
2605  *   pointer to error structure.
2606  *
2607  * @return
2608  *   0 on success otherwise -errno and errno is set.
2609  */
2610 static int
2611 flow_dv_modify_hdr_resource_register
2612                         (struct rte_eth_dev *dev,
2613                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2614                          struct mlx5_flow *dev_flow,
2615                          struct rte_flow_error *error)
2616 {
2617         struct mlx5_priv *priv = dev->data->dev_private;
2618         struct mlx5_ibv_shared *sh = priv->sh;
2619         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2620         struct mlx5dv_dr_domain *ns;
2621
2622         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2623                 ns = sh->fdb_domain;
2624         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2625                 ns = sh->tx_domain;
2626         else
2627                 ns = sh->rx_domain;
2628         resource->flags =
2629                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2630         /* Lookup a matching resource from cache. */
2631         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2632                 if (resource->ft_type == cache_resource->ft_type &&
2633                     resource->actions_num == cache_resource->actions_num &&
2634                     resource->flags == cache_resource->flags &&
2635                     !memcmp((const void *)resource->actions,
2636                             (const void *)cache_resource->actions,
2637                             (resource->actions_num *
2638                                             sizeof(resource->actions[0])))) {
2639                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2640                                 (void *)cache_resource,
2641                                 rte_atomic32_read(&cache_resource->refcnt));
2642                         rte_atomic32_inc(&cache_resource->refcnt);
2643                         dev_flow->dv.modify_hdr = cache_resource;
2644                         return 0;
2645                 }
2646         }
2647         /* Register new modify-header resource. */
2648         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2649         if (!cache_resource)
2650                 return rte_flow_error_set(error, ENOMEM,
2651                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2652                                           "cannot allocate resource memory");
2653         *cache_resource = *resource;
2654         cache_resource->verbs_action =
2655                 mlx5_glue->dv_create_flow_action_modify_header
2656                                         (sh->ctx, cache_resource->ft_type,
2657                                          ns, cache_resource->flags,
2658                                          cache_resource->actions_num *
2659                                          sizeof(cache_resource->actions[0]),
2660                                          (uint64_t *)cache_resource->actions);
2661         if (!cache_resource->verbs_action) {
2662                 rte_free(cache_resource);
2663                 return rte_flow_error_set(error, ENOMEM,
2664                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2665                                           NULL, "cannot create action");
2666         }
2667         rte_atomic32_init(&cache_resource->refcnt);
2668         rte_atomic32_inc(&cache_resource->refcnt);
2669         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2670         dev_flow->dv.modify_hdr = cache_resource;
2671         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2672                 (void *)cache_resource,
2673                 rte_atomic32_read(&cache_resource->refcnt));
2674         return 0;
2675 }
2676
2677 #define MLX5_CNT_CONTAINER_RESIZE 64
2678
2679 /**
2680  * Get or create a flow counter.
2681  *
2682  * @param[in] dev
2683  *   Pointer to the Ethernet device structure.
2684  * @param[in] shared
2685  *   Indicate if this counter is shared with other flows.
2686  * @param[in] id
2687  *   Counter identifier.
2688  *
2689  * @return
2690  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2691  */
2692 static struct mlx5_flow_counter *
2693 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2694                                uint32_t id)
2695 {
2696         struct mlx5_priv *priv = dev->data->dev_private;
2697         struct mlx5_flow_counter *cnt = NULL;
2698         struct mlx5_devx_obj *dcs = NULL;
2699
2700         if (!priv->config.devx) {
2701                 rte_errno = ENOTSUP;
2702                 return NULL;
2703         }
2704         if (shared) {
2705                 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2706                         if (cnt->shared && cnt->id == id) {
2707                                 cnt->ref_cnt++;
2708                                 return cnt;
2709                         }
2710                 }
2711         }
2712         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2713         if (!dcs)
2714                 return NULL;
2715         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2716         if (!cnt) {
2717                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2718                 rte_errno = ENOMEM;
2719                 return NULL;
2720         }
2721         struct mlx5_flow_counter tmpl = {
2722                 .shared = shared,
2723                 .ref_cnt = 1,
2724                 .id = id,
2725                 .dcs = dcs,
2726         };
2727         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2728         if (!tmpl.action) {
2729                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2730                 rte_errno = errno;
2731                 rte_free(cnt);
2732                 return NULL;
2733         }
2734         *cnt = tmpl;
2735         TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2736         return cnt;
2737 }
2738
2739 /**
2740  * Release a flow counter.
2741  *
2742  * @param[in] dev
2743  *   Pointer to the Ethernet device structure.
2744  * @param[in] counter
2745  *   Pointer to the counter handler.
2746  */
2747 static void
2748 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2749                                  struct mlx5_flow_counter *counter)
2750 {
2751         struct mlx5_priv *priv = dev->data->dev_private;
2752
2753         if (!counter)
2754                 return;
2755         if (--counter->ref_cnt == 0) {
2756                 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2757                 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2758                 rte_free(counter);
2759         }
2760 }
2761
2762 /**
2763  * Query a devx flow counter.
2764  *
2765  * @param[in] dev
2766  *   Pointer to the Ethernet device structure.
2767  * @param[in] cnt
2768  *   Pointer to the flow counter.
2769  * @param[out] pkts
2770  *   The statistics value of packets.
2771  * @param[out] bytes
2772  *   The statistics value of bytes.
2773  *
2774  * @return
2775  *   0 on success, otherwise a negative errno value and rte_errno is set.
2776  */
2777 static inline int
2778 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2779                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2780                      uint64_t *bytes)
2781 {
2782         return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2783                                                 0, NULL, NULL, 0);
2784 }
2785
2786 /**
2787  * Get a pool by a counter.
2788  *
2789  * @param[in] cnt
2790  *   Pointer to the counter.
2791  *
2792  * @return
2793  *   The counter pool.
2794  */
2795 static struct mlx5_flow_counter_pool *
2796 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2797 {
2798         if (!cnt->batch) {
2799                 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2800                 return (struct mlx5_flow_counter_pool *)cnt - 1;
2801         }
2802         return cnt->pool;
2803 }
2804
2805 /**
2806  * Get a pool by devx counter ID.
2807  *
2808  * @param[in] cont
2809  *   Pointer to the counter container.
2810  * @param[in] id
2811  *   The counter devx ID.
2812  *
2813  * @return
2814  *   The counter pool pointer if exists, NULL otherwise,
2815  */
2816 static struct mlx5_flow_counter_pool *
2817 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2818 {
2819         struct mlx5_flow_counter_pool *pool;
2820
2821         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2822                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2823                                 MLX5_COUNTERS_PER_POOL;
2824
2825                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2826                         return pool;
2827         };
2828         return NULL;
2829 }
2830
2831 /**
2832  * Allocate a new memory for the counter values wrapped by all the needed
2833  * management.
2834  *
2835  * @param[in] dev
2836  *   Pointer to the Ethernet device structure.
2837  * @param[in] raws_n
2838  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2839  *
2840  * @return
2841  *   The new memory management pointer on success, otherwise NULL and rte_errno
2842  *   is set.
2843  */
2844 static struct mlx5_counter_stats_mem_mng *
2845 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2846 {
2847         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2848                                         (dev->data->dev_private))->sh;
2849         struct mlx5_devx_mkey_attr mkey_attr;
2850         struct mlx5_counter_stats_mem_mng *mem_mng;
2851         volatile struct flow_counter_stats *raw_data;
2852         int size = (sizeof(struct flow_counter_stats) *
2853                         MLX5_COUNTERS_PER_POOL +
2854                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2855                         sizeof(struct mlx5_counter_stats_mem_mng);
2856         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2857         int i;
2858
2859         if (!mem) {
2860                 rte_errno = ENOMEM;
2861                 return NULL;
2862         }
2863         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2864         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2865         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2866                                                  IBV_ACCESS_LOCAL_WRITE);
2867         if (!mem_mng->umem) {
2868                 rte_errno = errno;
2869                 rte_free(mem);
2870                 return NULL;
2871         }
2872         mkey_attr.addr = (uintptr_t)mem;
2873         mkey_attr.size = size;
2874         mkey_attr.umem_id = mem_mng->umem->umem_id;
2875         mkey_attr.pd = sh->pdn;
2876         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2877         if (!mem_mng->dm) {
2878                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
2879                 rte_errno = errno;
2880                 rte_free(mem);
2881                 return NULL;
2882         }
2883         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
2884         raw_data = (volatile struct flow_counter_stats *)mem;
2885         for (i = 0; i < raws_n; ++i) {
2886                 mem_mng->raws[i].mem_mng = mem_mng;
2887                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
2888         }
2889         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
2890         return mem_mng;
2891 }
2892
2893 /**
2894  * Resize a counter container.
2895  *
2896  * @param[in] dev
2897  *   Pointer to the Ethernet device structure.
2898  * @param[in] batch
2899  *   Whether the pool is for counter that was allocated by batch command.
2900  *
2901  * @return
2902  *   The new container pointer on success, otherwise NULL and rte_errno is set.
2903  */
2904 static struct mlx5_pools_container *
2905 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
2906 {
2907         struct mlx5_priv *priv = dev->data->dev_private;
2908         struct mlx5_pools_container *cont =
2909                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
2910         struct mlx5_pools_container *new_cont =
2911                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
2912         struct mlx5_counter_stats_mem_mng *mem_mng;
2913         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
2914         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
2915         int i;
2916
2917         if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
2918                 /* The last resize still hasn't detected by the host thread. */
2919                 rte_errno = EAGAIN;
2920                 return NULL;
2921         }
2922         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
2923         if (!new_cont->pools) {
2924                 rte_errno = ENOMEM;
2925                 return NULL;
2926         }
2927         if (cont->n)
2928                 memcpy(new_cont->pools, cont->pools, cont->n *
2929                        sizeof(struct mlx5_flow_counter_pool *));
2930         mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
2931                 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
2932         if (!mem_mng) {
2933                 rte_free(new_cont->pools);
2934                 return NULL;
2935         }
2936         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
2937                 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
2938                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
2939                                  i, next);
2940         new_cont->n = resize;
2941         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
2942         TAILQ_INIT(&new_cont->pool_list);
2943         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
2944         new_cont->init_mem_mng = mem_mng;
2945         rte_cio_wmb();
2946          /* Flip the master container. */
2947         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
2948         return new_cont;
2949 }
2950
2951 /**
2952  * Query a devx flow counter.
2953  *
2954  * @param[in] dev
2955  *   Pointer to the Ethernet device structure.
2956  * @param[in] cnt
2957  *   Pointer to the flow counter.
2958  * @param[out] pkts
2959  *   The statistics value of packets.
2960  * @param[out] bytes
2961  *   The statistics value of bytes.
2962  *
2963  * @return
2964  *   0 on success, otherwise a negative errno value and rte_errno is set.
2965  */
2966 static inline int
2967 _flow_dv_query_count(struct rte_eth_dev *dev,
2968                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2969                      uint64_t *bytes)
2970 {
2971         struct mlx5_priv *priv = dev->data->dev_private;
2972         struct mlx5_flow_counter_pool *pool =
2973                         flow_dv_counter_pool_get(cnt);
2974         int offset = cnt - &pool->counters_raw[0];
2975
2976         if (priv->counter_fallback)
2977                 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
2978
2979         rte_spinlock_lock(&pool->sl);
2980         /*
2981          * The single counters allocation may allocate smaller ID than the
2982          * current allocated in parallel to the host reading.
2983          * In this case the new counter values must be reported as 0.
2984          */
2985         if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
2986                 *pkts = 0;
2987                 *bytes = 0;
2988         } else {
2989                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
2990                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
2991         }
2992         rte_spinlock_unlock(&pool->sl);
2993         return 0;
2994 }
2995
2996 /**
2997  * Create and initialize a new counter pool.
2998  *
2999  * @param[in] dev
3000  *   Pointer to the Ethernet device structure.
3001  * @param[out] dcs
3002  *   The devX counter handle.
3003  * @param[in] batch
3004  *   Whether the pool is for counter that was allocated by batch command.
3005  *
3006  * @return
3007  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
3008  */
3009 static struct mlx5_flow_counter_pool *
3010 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
3011                     uint32_t batch)
3012 {
3013         struct mlx5_priv *priv = dev->data->dev_private;
3014         struct mlx5_flow_counter_pool *pool;
3015         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3016                                                                0);
3017         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
3018         uint32_t size;
3019
3020         if (cont->n == n_valid) {
3021                 cont = flow_dv_container_resize(dev, batch);
3022                 if (!cont)
3023                         return NULL;
3024         }
3025         size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
3026                         sizeof(struct mlx5_flow_counter);
3027         pool = rte_calloc(__func__, 1, size, 0);
3028         if (!pool) {
3029                 rte_errno = ENOMEM;
3030                 return NULL;
3031         }
3032         pool->min_dcs = dcs;
3033         pool->raw = cont->init_mem_mng->raws + n_valid %
3034                                                      MLX5_CNT_CONTAINER_RESIZE;
3035         pool->raw_hw = NULL;
3036         rte_spinlock_init(&pool->sl);
3037         /*
3038          * The generation of the new allocated counters in this pool is 0, 2 in
3039          * the pool generation makes all the counters valid for allocation.
3040          */
3041         rte_atomic64_set(&pool->query_gen, 0x2);
3042         TAILQ_INIT(&pool->counters);
3043         TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3044         cont->pools[n_valid] = pool;
3045         /* Pool initialization must be updated before host thread access. */
3046         rte_cio_wmb();
3047         rte_atomic16_add(&cont->n_valid, 1);
3048         return pool;
3049 }
3050
3051 /**
3052  * Prepare a new counter and/or a new counter pool.
3053  *
3054  * @param[in] dev
3055  *   Pointer to the Ethernet device structure.
3056  * @param[out] cnt_free
3057  *   Where to put the pointer of a new counter.
3058  * @param[in] batch
3059  *   Whether the pool is for counter that was allocated by batch command.
3060  *
3061  * @return
3062  *   The free counter pool pointer and @p cnt_free is set on success,
3063  *   NULL otherwise and rte_errno is set.
3064  */
3065 static struct mlx5_flow_counter_pool *
3066 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
3067                              struct mlx5_flow_counter **cnt_free,
3068                              uint32_t batch)
3069 {
3070         struct mlx5_priv *priv = dev->data->dev_private;
3071         struct mlx5_flow_counter_pool *pool;
3072         struct mlx5_devx_obj *dcs = NULL;
3073         struct mlx5_flow_counter *cnt;
3074         uint32_t i;
3075
3076         if (!batch) {
3077                 /* bulk_bitmap must be 0 for single counter allocation. */
3078                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
3079                 if (!dcs)
3080                         return NULL;
3081                 pool = flow_dv_find_pool_by_id
3082                         (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
3083                 if (!pool) {
3084                         pool = flow_dv_pool_create(dev, dcs, batch);
3085                         if (!pool) {
3086                                 mlx5_devx_cmd_destroy(dcs);
3087                                 return NULL;
3088                         }
3089                 } else if (dcs->id < pool->min_dcs->id) {
3090                         rte_atomic64_set(&pool->a64_dcs,
3091                                          (int64_t)(uintptr_t)dcs);
3092                 }
3093                 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
3094                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3095                 cnt->dcs = dcs;
3096                 *cnt_free = cnt;
3097                 return pool;
3098         }
3099         /* bulk_bitmap is in 128 counters units. */
3100         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
3101                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
3102         if (!dcs) {
3103                 rte_errno = ENODATA;
3104                 return NULL;
3105         }
3106         pool = flow_dv_pool_create(dev, dcs, batch);
3107         if (!pool) {
3108                 mlx5_devx_cmd_destroy(dcs);
3109                 return NULL;
3110         }
3111         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3112                 cnt = &pool->counters_raw[i];
3113                 cnt->pool = pool;
3114                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3115         }
3116         *cnt_free = &pool->counters_raw[0];
3117         return pool;
3118 }
3119
3120 /**
3121  * Search for existed shared counter.
3122  *
3123  * @param[in] cont
3124  *   Pointer to the relevant counter pool container.
3125  * @param[in] id
3126  *   The shared counter ID to search.
3127  *
3128  * @return
3129  *   NULL if not existed, otherwise pointer to the shared counter.
3130  */
3131 static struct mlx5_flow_counter *
3132 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
3133                               uint32_t id)
3134 {
3135         static struct mlx5_flow_counter *cnt;
3136         struct mlx5_flow_counter_pool *pool;
3137         int i;
3138
3139         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3140                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3141                         cnt = &pool->counters_raw[i];
3142                         if (cnt->ref_cnt && cnt->shared && cnt->id == id)
3143                                 return cnt;
3144                 }
3145         }
3146         return NULL;
3147 }
3148
3149 /**
3150  * Allocate a flow counter.
3151  *
3152  * @param[in] dev
3153  *   Pointer to the Ethernet device structure.
3154  * @param[in] shared
3155  *   Indicate if this counter is shared with other flows.
3156  * @param[in] id
3157  *   Counter identifier.
3158  * @param[in] group
3159  *   Counter flow group.
3160  *
3161  * @return
3162  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
3163  */
3164 static struct mlx5_flow_counter *
3165 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
3166                       uint16_t group)
3167 {
3168         struct mlx5_priv *priv = dev->data->dev_private;
3169         struct mlx5_flow_counter_pool *pool = NULL;
3170         struct mlx5_flow_counter *cnt_free = NULL;
3171         /*
3172          * Currently group 0 flow counter cannot be assigned to a flow if it is
3173          * not the first one in the batch counter allocation, so it is better
3174          * to allocate counters one by one for these flows in a separate
3175          * container.
3176          * A counter can be shared between different groups so need to take
3177          * shared counters from the single container.
3178          */
3179         uint32_t batch = (group && !shared) ? 1 : 0;
3180         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3181                                                                0);
3182
3183         if (priv->counter_fallback)
3184                 return flow_dv_counter_alloc_fallback(dev, shared, id);
3185         if (!priv->config.devx) {
3186                 rte_errno = ENOTSUP;
3187                 return NULL;
3188         }
3189         if (shared) {
3190                 cnt_free = flow_dv_counter_shared_search(cont, id);
3191                 if (cnt_free) {
3192                         if (cnt_free->ref_cnt + 1 == 0) {
3193                                 rte_errno = E2BIG;
3194                                 return NULL;
3195                         }
3196                         cnt_free->ref_cnt++;
3197                         return cnt_free;
3198                 }
3199         }
3200         /* Pools which has a free counters are in the start. */
3201         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3202                 /*
3203                  * The free counter reset values must be updated between the
3204                  * counter release to the counter allocation, so, at least one
3205                  * query must be done in this time. ensure it by saving the
3206                  * query generation in the release time.
3207                  * The free list is sorted according to the generation - so if
3208                  * the first one is not updated, all the others are not
3209                  * updated too.
3210                  */
3211                 cnt_free = TAILQ_FIRST(&pool->counters);
3212                 if (cnt_free && cnt_free->query_gen + 1 <
3213                     rte_atomic64_read(&pool->query_gen))
3214                         break;
3215                 cnt_free = NULL;
3216         }
3217         if (!cnt_free) {
3218                 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
3219                 if (!pool)
3220                         return NULL;
3221         }
3222         cnt_free->batch = batch;
3223         /* Create a DV counter action only in the first time usage. */
3224         if (!cnt_free->action) {
3225                 uint16_t offset;
3226                 struct mlx5_devx_obj *dcs;
3227
3228                 if (batch) {
3229                         offset = cnt_free - &pool->counters_raw[0];
3230                         dcs = pool->min_dcs;
3231                 } else {
3232                         offset = 0;
3233                         dcs = cnt_free->dcs;
3234                 }
3235                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
3236                                         (dcs->obj, offset);
3237                 if (!cnt_free->action) {
3238                         rte_errno = errno;
3239                         return NULL;
3240                 }
3241         }
3242         /* Update the counter reset values. */
3243         if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
3244                                  &cnt_free->bytes))
3245                 return NULL;
3246         cnt_free->shared = shared;
3247         cnt_free->ref_cnt = 1;
3248         cnt_free->id = id;
3249         if (!priv->sh->cmng.query_thread_on)
3250                 /* Start the asynchronous batch query by the host thread. */
3251                 mlx5_set_query_alarm(priv->sh);
3252         TAILQ_REMOVE(&pool->counters, cnt_free, next);
3253         if (TAILQ_EMPTY(&pool->counters)) {
3254                 /* Move the pool to the end of the container pool list. */
3255                 TAILQ_REMOVE(&cont->pool_list, pool, next);
3256                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3257         }
3258         return cnt_free;
3259 }
3260
3261 /**
3262  * Release a flow counter.
3263  *
3264  * @param[in] dev
3265  *   Pointer to the Ethernet device structure.
3266  * @param[in] counter
3267  *   Pointer to the counter handler.
3268  */
3269 static void
3270 flow_dv_counter_release(struct rte_eth_dev *dev,
3271                         struct mlx5_flow_counter *counter)
3272 {
3273         struct mlx5_priv *priv = dev->data->dev_private;
3274
3275         if (!counter)
3276                 return;
3277         if (priv->counter_fallback) {
3278                 flow_dv_counter_release_fallback(dev, counter);
3279                 return;
3280         }
3281         if (--counter->ref_cnt == 0) {
3282                 struct mlx5_flow_counter_pool *pool =
3283                                 flow_dv_counter_pool_get(counter);
3284
3285                 /* Put the counter in the end - the last updated one. */
3286                 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
3287                 counter->query_gen = rte_atomic64_read(&pool->query_gen);
3288         }
3289 }
3290
3291 /**
3292  * Verify the @p attributes will be correctly understood by the NIC and store
3293  * them in the @p flow if everything is correct.
3294  *
3295  * @param[in] dev
3296  *   Pointer to dev struct.
3297  * @param[in] attributes
3298  *   Pointer to flow attributes
3299  * @param[in] external
3300  *   This flow rule is created by request external to PMD.
3301  * @param[out] error
3302  *   Pointer to error structure.
3303  *
3304  * @return
3305  *   0 on success, a negative errno value otherwise and rte_errno is set.
3306  */
3307 static int
3308 flow_dv_validate_attributes(struct rte_eth_dev *dev,
3309                             const struct rte_flow_attr *attributes,
3310                             bool external __rte_unused,
3311                             struct rte_flow_error *error)
3312 {
3313         struct mlx5_priv *priv = dev->data->dev_private;
3314         uint32_t priority_max = priv->config.flow_prio - 1;
3315
3316 #ifndef HAVE_MLX5DV_DR
3317         if (attributes->group)
3318                 return rte_flow_error_set(error, ENOTSUP,
3319                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3320                                           NULL,
3321                                           "groups are not supported");
3322 #else
3323         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
3324                                                     MLX5_MAX_TABLES;
3325         uint32_t table;
3326         int ret;
3327
3328         ret = mlx5_flow_group_to_table(attributes, external,
3329                                        attributes->group,
3330                                        &table, error);
3331         if (ret)
3332                 return ret;
3333         if (table >= max_group)
3334                 return rte_flow_error_set(error, EINVAL,
3335                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
3336                                           "group index out of range");
3337 #endif
3338         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
3339             attributes->priority >= priority_max)
3340                 return rte_flow_error_set(error, ENOTSUP,
3341                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
3342                                           NULL,
3343                                           "priority out of range");
3344         if (attributes->transfer) {
3345                 if (!priv->config.dv_esw_en)
3346                         return rte_flow_error_set
3347                                 (error, ENOTSUP,
3348                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3349                                  "E-Switch dr is not supported");
3350                 if (!(priv->representor || priv->master))
3351                         return rte_flow_error_set
3352                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3353                                  NULL, "E-Switch configuration can only be"
3354                                  " done by a master or a representor device");
3355                 if (attributes->egress)
3356                         return rte_flow_error_set
3357                                 (error, ENOTSUP,
3358                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
3359                                  "egress is not supported");
3360         }
3361         if (!(attributes->egress ^ attributes->ingress))
3362                 return rte_flow_error_set(error, ENOTSUP,
3363                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
3364                                           "must specify exactly one of "
3365                                           "ingress or egress");
3366         return 0;
3367 }
3368
3369 /**
3370  * Internal validation function. For validating both actions and items.
3371  *
3372  * @param[in] dev
3373  *   Pointer to the rte_eth_dev structure.
3374  * @param[in] attr
3375  *   Pointer to the flow attributes.
3376  * @param[in] items
3377  *   Pointer to the list of items.
3378  * @param[in] actions
3379  *   Pointer to the list of actions.
3380  * @param[in] external
3381  *   This flow rule is created by request external to PMD.
3382  * @param[out] error
3383  *   Pointer to the error structure.
3384  *
3385  * @return
3386  *   0 on success, a negative errno value otherwise and rte_errno is set.
3387  */
3388 static int
3389 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3390                  const struct rte_flow_item items[],
3391                  const struct rte_flow_action actions[],
3392                  bool external, struct rte_flow_error *error)
3393 {
3394         int ret;
3395         uint64_t action_flags = 0;
3396         uint64_t item_flags = 0;
3397         uint64_t last_item = 0;
3398         uint8_t next_protocol = 0xff;
3399         int actions_n = 0;
3400         const struct rte_flow_item *gre_item = NULL;
3401         struct rte_flow_item_tcp nic_tcp_mask = {
3402                 .hdr = {
3403                         .tcp_flags = 0xFF,
3404                         .src_port = RTE_BE16(UINT16_MAX),
3405                         .dst_port = RTE_BE16(UINT16_MAX),
3406                 }
3407         };
3408
3409         if (items == NULL)
3410                 return -1;
3411         ret = flow_dv_validate_attributes(dev, attr, external, error);
3412         if (ret < 0)
3413                 return ret;
3414         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3415                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3416                 int type = items->type;
3417
3418                 switch (type) {
3419                 case RTE_FLOW_ITEM_TYPE_VOID:
3420                         break;
3421                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3422                         ret = flow_dv_validate_item_port_id
3423                                         (dev, items, attr, item_flags, error);
3424                         if (ret < 0)
3425                                 return ret;
3426                         last_item = MLX5_FLOW_ITEM_PORT_ID;
3427                         break;
3428                 case RTE_FLOW_ITEM_TYPE_ETH:
3429                         ret = mlx5_flow_validate_item_eth(items, item_flags,
3430                                                           error);
3431                         if (ret < 0)
3432                                 return ret;
3433                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3434                                              MLX5_FLOW_LAYER_OUTER_L2;
3435                         break;
3436                 case RTE_FLOW_ITEM_TYPE_VLAN:
3437                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
3438                                                            dev, error);
3439                         if (ret < 0)
3440                                 return ret;
3441                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
3442                                              MLX5_FLOW_LAYER_OUTER_VLAN;
3443                         break;
3444                 case RTE_FLOW_ITEM_TYPE_IPV4:
3445                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3446                                                   &item_flags, &tunnel);
3447                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
3448                                                            NULL, error);
3449                         if (ret < 0)
3450                                 return ret;
3451                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3452                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3453                         if (items->mask != NULL &&
3454                             ((const struct rte_flow_item_ipv4 *)
3455                              items->mask)->hdr.next_proto_id) {
3456                                 next_protocol =
3457                                         ((const struct rte_flow_item_ipv4 *)
3458                                          (items->spec))->hdr.next_proto_id;
3459                                 next_protocol &=
3460                                         ((const struct rte_flow_item_ipv4 *)
3461                                          (items->mask))->hdr.next_proto_id;
3462                         } else {
3463                                 /* Reset for inner layer. */
3464                                 next_protocol = 0xff;
3465                         }
3466                         break;
3467                 case RTE_FLOW_ITEM_TYPE_IPV6:
3468                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3469                                                   &item_flags, &tunnel);
3470                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
3471                                                            NULL, error);
3472                         if (ret < 0)
3473                                 return ret;
3474                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3475                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3476                         if (items->mask != NULL &&
3477                             ((const struct rte_flow_item_ipv6 *)
3478                              items->mask)->hdr.proto) {
3479                                 next_protocol =
3480                                         ((const struct rte_flow_item_ipv6 *)
3481                                          items->spec)->hdr.proto;
3482                                 next_protocol &=
3483                                         ((const struct rte_flow_item_ipv6 *)
3484                                          items->mask)->hdr.proto;
3485                         } else {
3486                                 /* Reset for inner layer. */
3487                                 next_protocol = 0xff;
3488                         }
3489                         break;
3490                 case RTE_FLOW_ITEM_TYPE_TCP:
3491                         ret = mlx5_flow_validate_item_tcp
3492                                                 (items, item_flags,
3493                                                  next_protocol,
3494                                                  &nic_tcp_mask,
3495                                                  error);
3496                         if (ret < 0)
3497                                 return ret;
3498                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3499                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3500                         break;
3501                 case RTE_FLOW_ITEM_TYPE_UDP:
3502                         ret = mlx5_flow_validate_item_udp(items, item_flags,
3503                                                           next_protocol,
3504                                                           error);
3505                         if (ret < 0)
3506                                 return ret;
3507                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3508                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3509                         break;
3510                 case RTE_FLOW_ITEM_TYPE_GRE:
3511                         ret = mlx5_flow_validate_item_gre(items, item_flags,
3512                                                           next_protocol, error);
3513                         if (ret < 0)
3514                                 return ret;
3515                         gre_item = items;
3516                         last_item = MLX5_FLOW_LAYER_GRE;
3517                         break;
3518                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3519                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
3520                                                             next_protocol,
3521                                                             error);
3522                         if (ret < 0)
3523                                 return ret;
3524                         last_item = MLX5_FLOW_LAYER_NVGRE;
3525                         break;
3526                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
3527                         ret = mlx5_flow_validate_item_gre_key
3528                                 (items, item_flags, gre_item, error);
3529                         if (ret < 0)
3530                                 return ret;
3531                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
3532                         break;
3533                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3534                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
3535                                                             error);
3536                         if (ret < 0)
3537                                 return ret;
3538                         last_item = MLX5_FLOW_LAYER_VXLAN;
3539                         break;
3540                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3541                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
3542                                                                 item_flags, dev,
3543                                                                 error);
3544                         if (ret < 0)
3545                                 return ret;
3546                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3547                         break;
3548                 case RTE_FLOW_ITEM_TYPE_GENEVE:
3549                         ret = mlx5_flow_validate_item_geneve(items,
3550                                                              item_flags, dev,
3551                                                              error);
3552                         if (ret < 0)
3553                                 return ret;
3554                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3555                         break;
3556                 case RTE_FLOW_ITEM_TYPE_MPLS:
3557                         ret = mlx5_flow_validate_item_mpls(dev, items,
3558                                                            item_flags,
3559                                                            last_item, error);
3560                         if (ret < 0)
3561                                 return ret;
3562                         last_item = MLX5_FLOW_LAYER_MPLS;
3563                         break;
3564                 case RTE_FLOW_ITEM_TYPE_META:
3565                         ret = flow_dv_validate_item_meta(dev, items, attr,
3566                                                          error);
3567                         if (ret < 0)
3568                                 return ret;
3569                         last_item = MLX5_FLOW_ITEM_METADATA;
3570                         break;
3571                 case RTE_FLOW_ITEM_TYPE_ICMP:
3572                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
3573                                                            next_protocol,
3574                                                            error);
3575                         if (ret < 0)
3576                                 return ret;
3577                         last_item = MLX5_FLOW_LAYER_ICMP;
3578                         break;
3579                 case RTE_FLOW_ITEM_TYPE_ICMP6:
3580                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3581                                                             next_protocol,
3582                                                             error);
3583                         if (ret < 0)
3584                                 return ret;
3585                         last_item = MLX5_FLOW_LAYER_ICMP6;
3586                         break;
3587                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
3588                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
3589                         break;
3590                 default:
3591                         return rte_flow_error_set(error, ENOTSUP,
3592                                                   RTE_FLOW_ERROR_TYPE_ITEM,
3593                                                   NULL, "item not supported");
3594                 }
3595                 item_flags |= last_item;
3596         }
3597         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3598                 int type = actions->type;
3599                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3600                         return rte_flow_error_set(error, ENOTSUP,
3601                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3602                                                   actions, "too many actions");
3603                 switch (type) {
3604                 case RTE_FLOW_ACTION_TYPE_VOID:
3605                         break;
3606                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3607                         ret = flow_dv_validate_action_port_id(dev,
3608                                                               action_flags,
3609                                                               actions,
3610                                                               attr,
3611                                                               error);
3612                         if (ret)
3613                                 return ret;
3614                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3615                         ++actions_n;
3616                         break;
3617                 case RTE_FLOW_ACTION_TYPE_FLAG:
3618                         ret = mlx5_flow_validate_action_flag(action_flags,
3619                                                              attr, error);
3620                         if (ret < 0)
3621                                 return ret;
3622                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3623                         ++actions_n;
3624                         break;
3625                 case RTE_FLOW_ACTION_TYPE_MARK:
3626                         ret = mlx5_flow_validate_action_mark(actions,
3627                                                              action_flags,
3628                                                              attr, error);
3629                         if (ret < 0)
3630                                 return ret;
3631                         action_flags |= MLX5_FLOW_ACTION_MARK;
3632                         ++actions_n;
3633                         break;
3634                 case RTE_FLOW_ACTION_TYPE_DROP:
3635                         ret = mlx5_flow_validate_action_drop(action_flags,
3636                                                              attr, error);
3637                         if (ret < 0)
3638                                 return ret;
3639                         action_flags |= MLX5_FLOW_ACTION_DROP;
3640                         ++actions_n;
3641                         break;
3642                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3643                         ret = mlx5_flow_validate_action_queue(actions,
3644                                                               action_flags, dev,
3645                                                               attr, error);
3646                         if (ret < 0)
3647                                 return ret;
3648                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3649                         ++actions_n;
3650                         break;
3651                 case RTE_FLOW_ACTION_TYPE_RSS:
3652                         ret = mlx5_flow_validate_action_rss(actions,
3653                                                             action_flags, dev,
3654                                                             attr, item_flags,
3655                                                             error);
3656                         if (ret < 0)
3657                                 return ret;
3658                         action_flags |= MLX5_FLOW_ACTION_RSS;
3659                         ++actions_n;
3660                         break;
3661                 case RTE_FLOW_ACTION_TYPE_COUNT:
3662                         ret = flow_dv_validate_action_count(dev, error);
3663                         if (ret < 0)
3664                                 return ret;
3665                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3666                         ++actions_n;
3667                         break;
3668                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3669                         if (flow_dv_validate_action_pop_vlan(dev,
3670                                                              action_flags,
3671                                                              actions,
3672                                                              item_flags, attr,
3673                                                              error))
3674                                 return -rte_errno;
3675                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
3676                         ++actions_n;
3677                         break;
3678                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3679                         ret = flow_dv_validate_action_push_vlan(action_flags,
3680                                                                 item_flags,
3681                                                                 actions, attr,
3682                                                                 error);
3683                         if (ret < 0)
3684                                 return ret;
3685                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
3686                         ++actions_n;
3687                         break;
3688                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3689                         ret = flow_dv_validate_action_set_vlan_pcp
3690                                                 (action_flags, actions, error);
3691                         if (ret < 0)
3692                                 return ret;
3693                         /* Count PCP with push_vlan command. */
3694                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
3695                         break;
3696                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3697                         ret = flow_dv_validate_action_set_vlan_vid
3698                                                 (item_flags, action_flags,
3699                                                  actions, error);
3700                         if (ret < 0)
3701                                 return ret;
3702                         /* Count VID with push_vlan command. */
3703                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
3704                         break;
3705                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3706                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3707                         ret = flow_dv_validate_action_l2_encap(action_flags,
3708                                                                actions, attr,
3709                                                                error);
3710                         if (ret < 0)
3711                                 return ret;
3712                         action_flags |= actions->type ==
3713                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3714                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3715                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3716                         ++actions_n;
3717                         break;
3718                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3719                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3720                         ret = flow_dv_validate_action_l2_decap(action_flags,
3721                                                                attr, error);
3722                         if (ret < 0)
3723                                 return ret;
3724                         action_flags |= actions->type ==
3725                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3726                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3727                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3728                         ++actions_n;
3729                         break;
3730                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3731                         ret = flow_dv_validate_action_raw_encap(action_flags,
3732                                                                 actions, attr,
3733                                                                 error);
3734                         if (ret < 0)
3735                                 return ret;
3736                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3737                         ++actions_n;
3738                         break;
3739                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3740                         ret = flow_dv_validate_action_raw_decap(action_flags,
3741                                                                 actions, attr,
3742                                                                 error);
3743                         if (ret < 0)
3744                                 return ret;
3745                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3746                         ++actions_n;
3747                         break;
3748                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3749                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3750                         ret = flow_dv_validate_action_modify_mac(action_flags,
3751                                                                  actions,
3752                                                                  item_flags,
3753                                                                  error);
3754                         if (ret < 0)
3755                                 return ret;
3756                         /* Count all modify-header actions as one action. */
3757                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3758                                 ++actions_n;
3759                         action_flags |= actions->type ==
3760                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3761                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
3762                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
3763                         break;
3764
3765                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3766                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3767                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
3768                                                                   actions,
3769                                                                   item_flags,
3770                                                                   error);
3771                         if (ret < 0)
3772                                 return ret;
3773                         /* Count all modify-header actions as one action. */
3774                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3775                                 ++actions_n;
3776                         action_flags |= actions->type ==
3777                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3778                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3779                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
3780                         break;
3781                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3782                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3783                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
3784                                                                   actions,
3785                                                                   item_flags,
3786                                                                   error);
3787                         if (ret < 0)
3788                                 return ret;
3789                         /* Count all modify-header actions as one action. */
3790                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3791                                 ++actions_n;
3792                         action_flags |= actions->type ==
3793                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3794                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3795                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
3796                         break;
3797                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3798                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3799                         ret = flow_dv_validate_action_modify_tp(action_flags,
3800                                                                 actions,
3801                                                                 item_flags,
3802                                                                 error);
3803                         if (ret < 0)
3804                                 return ret;
3805                         /* Count all modify-header actions as one action. */
3806                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3807                                 ++actions_n;
3808                         action_flags |= actions->type ==
3809                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3810                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
3811                                                 MLX5_FLOW_ACTION_SET_TP_DST;
3812                         break;
3813                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3814                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3815                         ret = flow_dv_validate_action_modify_ttl(action_flags,
3816                                                                  actions,
3817                                                                  item_flags,
3818                                                                  error);
3819                         if (ret < 0)
3820                                 return ret;
3821                         /* Count all modify-header actions as one action. */
3822                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3823                                 ++actions_n;
3824                         action_flags |= actions->type ==
3825                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
3826                                                 MLX5_FLOW_ACTION_SET_TTL :
3827                                                 MLX5_FLOW_ACTION_DEC_TTL;
3828                         break;
3829                 case RTE_FLOW_ACTION_TYPE_JUMP:
3830                         ret = flow_dv_validate_action_jump(actions,
3831                                                            action_flags,
3832                                                            attr, external,
3833                                                            error);
3834                         if (ret)
3835                                 return ret;
3836                         ++actions_n;
3837                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3838                         break;
3839                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3840                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3841                         ret = flow_dv_validate_action_modify_tcp_seq
3842                                                                 (action_flags,
3843                                                                  actions,
3844                                                                  item_flags,
3845                                                                  error);
3846                         if (ret < 0)
3847                                 return ret;
3848                         /* Count all modify-header actions as one action. */
3849                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3850                                 ++actions_n;
3851                         action_flags |= actions->type ==
3852                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
3853                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
3854                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
3855                         break;
3856                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
3857                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
3858                         ret = flow_dv_validate_action_modify_tcp_ack
3859                                                                 (action_flags,
3860                                                                  actions,
3861                                                                  item_flags,
3862                                                                  error);
3863                         if (ret < 0)
3864                                 return ret;
3865                         /* Count all modify-header actions as one action. */
3866                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3867                                 ++actions_n;
3868                         action_flags |= actions->type ==
3869                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
3870                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
3871                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
3872                         break;
3873                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
3874                         break;
3875                 default:
3876                         return rte_flow_error_set(error, ENOTSUP,
3877                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3878                                                   actions,
3879                                                   "action not supported");
3880                 }
3881         }
3882         if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
3883             (action_flags & MLX5_FLOW_VLAN_ACTIONS))
3884                 return rte_flow_error_set(error, ENOTSUP,
3885                                           RTE_FLOW_ERROR_TYPE_ACTION,
3886                                           actions,
3887                                           "can't have vxlan and vlan"
3888                                           " actions in the same rule");
3889         /* Eswitch has few restrictions on using items and actions */
3890         if (attr->transfer) {
3891                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3892                         return rte_flow_error_set(error, ENOTSUP,
3893                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3894                                                   NULL,
3895                                                   "unsupported action FLAG");
3896                 if (action_flags & MLX5_FLOW_ACTION_MARK)
3897                         return rte_flow_error_set(error, ENOTSUP,
3898                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3899                                                   NULL,
3900                                                   "unsupported action MARK");
3901                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
3902                         return rte_flow_error_set(error, ENOTSUP,
3903                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3904                                                   NULL,
3905                                                   "unsupported action QUEUE");
3906                 if (action_flags & MLX5_FLOW_ACTION_RSS)
3907                         return rte_flow_error_set(error, ENOTSUP,
3908                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3909                                                   NULL,
3910                                                   "unsupported action RSS");
3911                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3912                         return rte_flow_error_set(error, EINVAL,
3913                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3914                                                   actions,
3915                                                   "no fate action is found");
3916         } else {
3917                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
3918                         return rte_flow_error_set(error, EINVAL,
3919                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3920                                                   actions,
3921                                                   "no fate action is found");
3922         }
3923         return 0;
3924 }
3925
3926 /**
3927  * Internal preparation function. Allocates the DV flow size,
3928  * this size is constant.
3929  *
3930  * @param[in] attr
3931  *   Pointer to the flow attributes.
3932  * @param[in] items
3933  *   Pointer to the list of items.
3934  * @param[in] actions
3935  *   Pointer to the list of actions.
3936  * @param[out] error
3937  *   Pointer to the error structure.
3938  *
3939  * @return
3940  *   Pointer to mlx5_flow object on success,
3941  *   otherwise NULL and rte_errno is set.
3942  */
3943 static struct mlx5_flow *
3944 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
3945                 const struct rte_flow_item items[] __rte_unused,
3946                 const struct rte_flow_action actions[] __rte_unused,
3947                 struct rte_flow_error *error)
3948 {
3949         uint32_t size = sizeof(struct mlx5_flow);
3950         struct mlx5_flow *flow;
3951
3952         flow = rte_calloc(__func__, 1, size, 0);
3953         if (!flow) {
3954                 rte_flow_error_set(error, ENOMEM,
3955                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3956                                    "not enough memory to create flow");
3957                 return NULL;
3958         }
3959         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
3960         return flow;
3961 }
3962
3963 #ifndef NDEBUG
3964 /**
3965  * Sanity check for match mask and value. Similar to check_valid_spec() in
3966  * kernel driver. If unmasked bit is present in value, it returns failure.
3967  *
3968  * @param match_mask
3969  *   pointer to match mask buffer.
3970  * @param match_value
3971  *   pointer to match value buffer.
3972  *
3973  * @return
3974  *   0 if valid, -EINVAL otherwise.
3975  */
3976 static int
3977 flow_dv_check_valid_spec(void *match_mask, void *match_value)
3978 {
3979         uint8_t *m = match_mask;
3980         uint8_t *v = match_value;
3981         unsigned int i;
3982
3983         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
3984                 if (v[i] & ~m[i]) {
3985                         DRV_LOG(ERR,
3986                                 "match_value differs from match_criteria"
3987                                 " %p[%u] != %p[%u]",
3988                                 match_value, i, match_mask, i);
3989                         return -EINVAL;
3990                 }
3991         }
3992         return 0;
3993 }
3994 #endif
3995
3996 /**
3997  * Add Ethernet item to matcher and to the value.
3998  *
3999  * @param[in, out] matcher
4000  *   Flow matcher.
4001  * @param[in, out] key
4002  *   Flow matcher value.
4003  * @param[in] item
4004  *   Flow pattern to translate.
4005  * @param[in] inner
4006  *   Item is inner pattern.
4007  */
4008 static void
4009 flow_dv_translate_item_eth(void *matcher, void *key,
4010                            const struct rte_flow_item *item, int inner)
4011 {
4012         const struct rte_flow_item_eth *eth_m = item->mask;
4013         const struct rte_flow_item_eth *eth_v = item->spec;
4014         const struct rte_flow_item_eth nic_mask = {
4015                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4016                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4017                 .type = RTE_BE16(0xffff),
4018         };
4019         void *headers_m;
4020         void *headers_v;
4021         char *l24_v;
4022         unsigned int i;
4023
4024         if (!eth_v)
4025                 return;
4026         if (!eth_m)
4027                 eth_m = &nic_mask;
4028         if (inner) {
4029                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4030                                          inner_headers);
4031                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4032         } else {
4033                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4034                                          outer_headers);
4035                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4036         }
4037         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
4038                &eth_m->dst, sizeof(eth_m->dst));
4039         /* The value must be in the range of the mask. */
4040         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
4041         for (i = 0; i < sizeof(eth_m->dst); ++i)
4042                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
4043         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
4044                &eth_m->src, sizeof(eth_m->src));
4045         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
4046         /* The value must be in the range of the mask. */
4047         for (i = 0; i < sizeof(eth_m->dst); ++i)
4048                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
4049         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4050                  rte_be_to_cpu_16(eth_m->type));
4051         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
4052         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
4053 }
4054
4055 /**
4056  * Add VLAN item to matcher and to the value.
4057  *
4058  * @param[in, out] dev_flow
4059  *   Flow descriptor.
4060  * @param[in, out] matcher
4061  *   Flow matcher.
4062  * @param[in, out] key
4063  *   Flow matcher value.
4064  * @param[in] item
4065  *   Flow pattern to translate.
4066  * @param[in] inner
4067  *   Item is inner pattern.
4068  */
4069 static void
4070 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
4071                             void *matcher, void *key,
4072                             const struct rte_flow_item *item,
4073                             int inner)
4074 {
4075         const struct rte_flow_item_vlan *vlan_m = item->mask;
4076         const struct rte_flow_item_vlan *vlan_v = item->spec;
4077         void *headers_m;
4078         void *headers_v;
4079         uint16_t tci_m;
4080         uint16_t tci_v;
4081
4082         if (!vlan_v)
4083                 return;
4084         if (!vlan_m)
4085                 vlan_m = &rte_flow_item_vlan_mask;
4086         if (inner) {
4087                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4088                                          inner_headers);
4089                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4090         } else {
4091                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4092                                          outer_headers);
4093                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4094                 /*
4095                  * This is workaround, masks are not supported,
4096                  * and pre-validated.
4097                  */
4098                 dev_flow->dv.vf_vlan.tag =
4099                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
4100         }
4101         tci_m = rte_be_to_cpu_16(vlan_m->tci);
4102         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
4103         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
4104         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
4105         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
4106         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
4107         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
4108         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
4109         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
4110         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
4111         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4112                  rte_be_to_cpu_16(vlan_m->inner_type));
4113         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
4114                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
4115 }
4116
4117 /**
4118  * Add IPV4 item to matcher and to the value.
4119  *
4120  * @param[in, out] matcher
4121  *   Flow matcher.
4122  * @param[in, out] key
4123  *   Flow matcher value.
4124  * @param[in] item
4125  *   Flow pattern to translate.
4126  * @param[in] inner
4127  *   Item is inner pattern.
4128  * @param[in] group
4129  *   The group to insert the rule.
4130  */
4131 static void
4132 flow_dv_translate_item_ipv4(void *matcher, void *key,
4133                             const struct rte_flow_item *item,
4134                             int inner, uint32_t group)
4135 {
4136         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
4137         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
4138         const struct rte_flow_item_ipv4 nic_mask = {
4139                 .hdr = {
4140                         .src_addr = RTE_BE32(0xffffffff),
4141                         .dst_addr = RTE_BE32(0xffffffff),
4142                         .type_of_service = 0xff,
4143                         .next_proto_id = 0xff,
4144                 },
4145         };
4146         void *headers_m;
4147         void *headers_v;
4148         char *l24_m;
4149         char *l24_v;
4150         uint8_t tos;
4151
4152         if (inner) {
4153                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4154                                          inner_headers);
4155                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4156         } else {
4157                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4158                                          outer_headers);
4159                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4160         }
4161         if (group == 0)
4162                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4163         else
4164                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
4165         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
4166         if (!ipv4_v)
4167                 return;
4168         if (!ipv4_m)
4169                 ipv4_m = &nic_mask;
4170         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4171                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4172         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4173                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4174         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
4175         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
4176         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4177                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4178         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4179                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4180         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
4181         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
4182         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
4183         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
4184                  ipv4_m->hdr.type_of_service);
4185         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
4186         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
4187                  ipv4_m->hdr.type_of_service >> 2);
4188         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
4189         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4190                  ipv4_m->hdr.next_proto_id);
4191         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4192                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
4193 }
4194
4195 /**
4196  * Add IPV6 item to matcher and to the value.
4197  *
4198  * @param[in, out] matcher
4199  *   Flow matcher.
4200  * @param[in, out] key
4201  *   Flow matcher value.
4202  * @param[in] item
4203  *   Flow pattern to translate.
4204  * @param[in] inner
4205  *   Item is inner pattern.
4206  * @param[in] group
4207  *   The group to insert the rule.
4208  */
4209 static void
4210 flow_dv_translate_item_ipv6(void *matcher, void *key,
4211                             const struct rte_flow_item *item,
4212                             int inner, uint32_t group)
4213 {
4214         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
4215         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
4216         const struct rte_flow_item_ipv6 nic_mask = {
4217                 .hdr = {
4218                         .src_addr =
4219                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4220                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4221                         .dst_addr =
4222                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4223                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4224                         .vtc_flow = RTE_BE32(0xffffffff),
4225                         .proto = 0xff,
4226                         .hop_limits = 0xff,
4227                 },
4228         };
4229         void *headers_m;
4230         void *headers_v;
4231         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4232         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4233         char *l24_m;
4234         char *l24_v;
4235         uint32_t vtc_m;
4236         uint32_t vtc_v;
4237         int i;
4238         int size;
4239
4240         if (inner) {
4241                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4242                                          inner_headers);
4243                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4244         } else {
4245                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4246                                          outer_headers);
4247                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4248         }
4249         if (group == 0)
4250                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4251         else
4252                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
4253         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
4254         if (!ipv6_v)
4255                 return;
4256         if (!ipv6_m)
4257                 ipv6_m = &nic_mask;
4258         size = sizeof(ipv6_m->hdr.dst_addr);
4259         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4260                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4261         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4262                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4263         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
4264         for (i = 0; i < size; ++i)
4265                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
4266         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4267                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4268         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4269                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4270         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
4271         for (i = 0; i < size; ++i)
4272                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
4273         /* TOS. */
4274         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
4275         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
4276         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
4277         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
4278         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
4279         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
4280         /* Label. */
4281         if (inner) {
4282                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
4283                          vtc_m);
4284                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
4285                          vtc_v);
4286         } else {
4287                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
4288                          vtc_m);
4289                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
4290                          vtc_v);
4291         }
4292         /* Protocol. */
4293         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4294                  ipv6_m->hdr.proto);
4295         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4296                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
4297 }
4298
4299 /**
4300  * Add TCP item to matcher and to the value.
4301  *
4302  * @param[in, out] matcher
4303  *   Flow matcher.
4304  * @param[in, out] key
4305  *   Flow matcher value.
4306  * @param[in] item
4307  *   Flow pattern to translate.
4308  * @param[in] inner
4309  *   Item is inner pattern.
4310  */
4311 static void
4312 flow_dv_translate_item_tcp(void *matcher, void *key,
4313                            const struct rte_flow_item *item,
4314                            int inner)
4315 {
4316         const struct rte_flow_item_tcp *tcp_m = item->mask;
4317         const struct rte_flow_item_tcp *tcp_v = item->spec;
4318         void *headers_m;
4319         void *headers_v;
4320
4321         if (inner) {
4322                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4323                                          inner_headers);
4324                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4325         } else {
4326                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4327                                          outer_headers);
4328                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4329         }
4330         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4331         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
4332         if (!tcp_v)
4333                 return;
4334         if (!tcp_m)
4335                 tcp_m = &rte_flow_item_tcp_mask;
4336         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
4337                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
4338         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
4339                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
4340         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
4341                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
4342         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
4343                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
4344         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
4345                  tcp_m->hdr.tcp_flags);
4346         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
4347                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
4348 }
4349
4350 /**
4351  * Add UDP item to matcher and to the value.
4352  *
4353  * @param[in, out] matcher
4354  *   Flow matcher.
4355  * @param[in, out] key
4356  *   Flow matcher value.
4357  * @param[in] item
4358  *   Flow pattern to translate.
4359  * @param[in] inner
4360  *   Item is inner pattern.
4361  */
4362 static void
4363 flow_dv_translate_item_udp(void *matcher, void *key,
4364                            const struct rte_flow_item *item,
4365                            int inner)
4366 {
4367         const struct rte_flow_item_udp *udp_m = item->mask;
4368         const struct rte_flow_item_udp *udp_v = item->spec;
4369         void *headers_m;
4370         void *headers_v;
4371
4372         if (inner) {
4373                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4374                                          inner_headers);
4375                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4376         } else {
4377                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4378                                          outer_headers);
4379                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4380         }
4381         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4382         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
4383         if (!udp_v)
4384                 return;
4385         if (!udp_m)
4386                 udp_m = &rte_flow_item_udp_mask;
4387         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
4388                  rte_be_to_cpu_16(udp_m->hdr.src_port));
4389         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
4390                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
4391         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
4392                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
4393         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4394                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
4395 }
4396
4397 /**
4398  * Add GRE optional Key item to matcher and to the value.
4399  *
4400  * @param[in, out] matcher
4401  *   Flow matcher.
4402  * @param[in, out] key
4403  *   Flow matcher value.
4404  * @param[in] item
4405  *   Flow pattern to translate.
4406  * @param[in] inner
4407  *   Item is inner pattern.
4408  */
4409 static void
4410 flow_dv_translate_item_gre_key(void *matcher, void *key,
4411                                    const struct rte_flow_item *item)
4412 {
4413         const rte_be32_t *key_m = item->mask;
4414         const rte_be32_t *key_v = item->spec;
4415         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4416         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4417         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
4418
4419         if (!key_v)
4420                 return;
4421         if (!key_m)
4422                 key_m = &gre_key_default_mask;
4423         /* GRE K bit must be on and should already be validated */
4424         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
4425         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
4426         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
4427                  rte_be_to_cpu_32(*key_m) >> 8);
4428         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
4429                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
4430         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
4431                  rte_be_to_cpu_32(*key_m) & 0xFF);
4432         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
4433                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
4434 }
4435
4436 /**
4437  * Add GRE item to matcher and to the value.
4438  *
4439  * @param[in, out] matcher
4440  *   Flow matcher.
4441  * @param[in, out] key
4442  *   Flow matcher value.
4443  * @param[in] item
4444  *   Flow pattern to translate.
4445  * @param[in] inner
4446  *   Item is inner pattern.
4447  */
4448 static void
4449 flow_dv_translate_item_gre(void *matcher, void *key,
4450                            const struct rte_flow_item *item,
4451                            int inner)
4452 {
4453         const struct rte_flow_item_gre *gre_m = item->mask;
4454         const struct rte_flow_item_gre *gre_v = item->spec;
4455         void *headers_m;
4456         void *headers_v;
4457         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4458         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4459         struct {
4460                 union {
4461                         __extension__
4462                         struct {
4463                                 uint16_t version:3;
4464                                 uint16_t rsvd0:9;
4465                                 uint16_t s_present:1;
4466                                 uint16_t k_present:1;
4467                                 uint16_t rsvd_bit1:1;
4468                                 uint16_t c_present:1;
4469                         };
4470                         uint16_t value;
4471                 };
4472         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
4473
4474         if (inner) {
4475                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4476                                          inner_headers);
4477                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4478         } else {
4479                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4480                                          outer_headers);
4481                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4482         }
4483         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4484         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
4485         if (!gre_v)
4486                 return;
4487         if (!gre_m)
4488                 gre_m = &rte_flow_item_gre_mask;
4489         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
4490                  rte_be_to_cpu_16(gre_m->protocol));
4491         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4492                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
4493         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
4494         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
4495         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
4496                  gre_crks_rsvd0_ver_m.c_present);
4497         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
4498                  gre_crks_rsvd0_ver_v.c_present &
4499                  gre_crks_rsvd0_ver_m.c_present);
4500         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
4501                  gre_crks_rsvd0_ver_m.k_present);
4502         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
4503                  gre_crks_rsvd0_ver_v.k_present &
4504                  gre_crks_rsvd0_ver_m.k_present);
4505         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
4506                  gre_crks_rsvd0_ver_m.s_present);
4507         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
4508                  gre_crks_rsvd0_ver_v.s_present &
4509                  gre_crks_rsvd0_ver_m.s_present);
4510 }
4511
4512 /**
4513  * Add NVGRE item to matcher and to the value.
4514  *
4515  * @param[in, out] matcher
4516  *   Flow matcher.
4517  * @param[in, out] key
4518  *   Flow matcher value.
4519  * @param[in] item
4520  *   Flow pattern to translate.
4521  * @param[in] inner
4522  *   Item is inner pattern.
4523  */
4524 static void
4525 flow_dv_translate_item_nvgre(void *matcher, void *key,
4526                              const struct rte_flow_item *item,
4527                              int inner)
4528 {
4529         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
4530         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
4531         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4532         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4533         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
4534         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
4535         char *gre_key_m;
4536         char *gre_key_v;
4537         int size;
4538         int i;
4539
4540         /* For NVGRE, GRE header fields must be set with defined values. */
4541         const struct rte_flow_item_gre gre_spec = {
4542                 .c_rsvd0_ver = RTE_BE16(0x2000),
4543                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
4544         };
4545         const struct rte_flow_item_gre gre_mask = {
4546                 .c_rsvd0_ver = RTE_BE16(0xB000),
4547                 .protocol = RTE_BE16(UINT16_MAX),
4548         };
4549         const struct rte_flow_item gre_item = {
4550                 .spec = &gre_spec,
4551                 .mask = &gre_mask,
4552                 .last = NULL,
4553         };
4554         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
4555         if (!nvgre_v)
4556                 return;
4557         if (!nvgre_m)
4558                 nvgre_m = &rte_flow_item_nvgre_mask;
4559         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
4560         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
4561         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
4562         memcpy(gre_key_m, tni_flow_id_m, size);
4563         for (i = 0; i < size; ++i)
4564                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
4565 }
4566
4567 /**
4568  * Add VXLAN item to matcher and to the value.
4569  *
4570  * @param[in, out] matcher
4571  *   Flow matcher.
4572  * @param[in, out] key
4573  *   Flow matcher value.
4574  * @param[in] item
4575  *   Flow pattern to translate.
4576  * @param[in] inner
4577  *   Item is inner pattern.
4578  */
4579 static void
4580 flow_dv_translate_item_vxlan(void *matcher, void *key,
4581                              const struct rte_flow_item *item,
4582                              int inner)
4583 {
4584         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
4585         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
4586         void *headers_m;
4587         void *headers_v;
4588         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4589         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4590         char *vni_m;
4591         char *vni_v;
4592         uint16_t dport;
4593         int size;
4594         int i;
4595
4596         if (inner) {
4597                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4598                                          inner_headers);
4599                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4600         } else {
4601                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4602                                          outer_headers);
4603                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4604         }
4605         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
4606                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
4607         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4608                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4609                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4610         }
4611         if (!vxlan_v)
4612                 return;
4613         if (!vxlan_m)
4614                 vxlan_m = &rte_flow_item_vxlan_mask;
4615         size = sizeof(vxlan_m->vni);
4616         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
4617         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
4618         memcpy(vni_m, vxlan_m->vni, size);
4619         for (i = 0; i < size; ++i)
4620                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
4621 }
4622
4623 /**
4624  * Add Geneve item to matcher and to the value.
4625  *
4626  * @param[in, out] matcher
4627  *   Flow matcher.
4628  * @param[in, out] key
4629  *   Flow matcher value.
4630  * @param[in] item
4631  *   Flow pattern to translate.
4632  * @param[in] inner
4633  *   Item is inner pattern.
4634  */
4635
4636 static void
4637 flow_dv_translate_item_geneve(void *matcher, void *key,
4638                               const struct rte_flow_item *item, int inner)
4639 {
4640         const struct rte_flow_item_geneve *geneve_m = item->mask;
4641         const struct rte_flow_item_geneve *geneve_v = item->spec;
4642         void *headers_m;
4643         void *headers_v;
4644         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4645         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4646         uint16_t dport;
4647         uint16_t gbhdr_m;
4648         uint16_t gbhdr_v;
4649         char *vni_m;
4650         char *vni_v;
4651         size_t size, i;
4652
4653         if (inner) {
4654                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4655                                          inner_headers);
4656                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4657         } else {
4658                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4659                                          outer_headers);
4660                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4661         }
4662         dport = MLX5_UDP_PORT_GENEVE;
4663         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4664                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4665                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4666         }
4667         if (!geneve_v)
4668                 return;
4669         if (!geneve_m)
4670                 geneve_m = &rte_flow_item_geneve_mask;
4671         size = sizeof(geneve_m->vni);
4672         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
4673         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
4674         memcpy(vni_m, geneve_m->vni, size);
4675         for (i = 0; i < size; ++i)
4676                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
4677         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
4678                  rte_be_to_cpu_16(geneve_m->protocol));
4679         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
4680                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
4681         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
4682         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
4683         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
4684                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4685         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
4686                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4687         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
4688                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4689         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
4690                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
4691                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4692 }
4693
4694 /**
4695  * Add MPLS item to matcher and to the value.
4696  *
4697  * @param[in, out] matcher
4698  *   Flow matcher.
4699  * @param[in, out] key
4700  *   Flow matcher value.
4701  * @param[in] item
4702  *   Flow pattern to translate.
4703  * @param[in] prev_layer
4704  *   The protocol layer indicated in previous item.
4705  * @param[in] inner
4706  *   Item is inner pattern.
4707  */
4708 static void
4709 flow_dv_translate_item_mpls(void *matcher, void *key,
4710                             const struct rte_flow_item *item,
4711                             uint64_t prev_layer,
4712                             int inner)
4713 {
4714         const uint32_t *in_mpls_m = item->mask;
4715         const uint32_t *in_mpls_v = item->spec;
4716         uint32_t *out_mpls_m = 0;
4717         uint32_t *out_mpls_v = 0;
4718         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4719         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4720         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4721                                      misc_parameters_2);
4722         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4723         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4724         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4725
4726         switch (prev_layer) {
4727         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4728                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4729                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4730                          MLX5_UDP_PORT_MPLS);
4731                 break;
4732         case MLX5_FLOW_LAYER_GRE:
4733                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4734                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4735                          RTE_ETHER_TYPE_MPLS);
4736                 break;
4737         default:
4738                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4739                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4740                          IPPROTO_MPLS);
4741                 break;
4742         }
4743         if (!in_mpls_v)
4744                 return;
4745         if (!in_mpls_m)
4746                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4747         switch (prev_layer) {
4748         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4749                 out_mpls_m =
4750                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4751                                                  outer_first_mpls_over_udp);
4752                 out_mpls_v =
4753                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4754                                                  outer_first_mpls_over_udp);
4755                 break;
4756         case MLX5_FLOW_LAYER_GRE:
4757                 out_mpls_m =
4758                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4759                                                  outer_first_mpls_over_gre);
4760                 out_mpls_v =
4761                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4762                                                  outer_first_mpls_over_gre);
4763                 break;
4764         default:
4765                 /* Inner MPLS not over GRE is not supported. */
4766                 if (!inner) {
4767                         out_mpls_m =
4768                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4769                                                          misc2_m,
4770                                                          outer_first_mpls);
4771                         out_mpls_v =
4772                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4773                                                          misc2_v,
4774                                                          outer_first_mpls);
4775                 }
4776                 break;
4777         }
4778         if (out_mpls_m && out_mpls_v) {
4779                 *out_mpls_m = *in_mpls_m;
4780                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4781         }
4782 }
4783
4784 /**
4785  * Add META item to matcher
4786  *
4787  * @param[in, out] matcher
4788  *   Flow matcher.
4789  * @param[in, out] key
4790  *   Flow matcher value.
4791  * @param[in] item
4792  *   Flow pattern to translate.
4793  * @param[in] inner
4794  *   Item is inner pattern.
4795  */
4796 static void
4797 flow_dv_translate_item_meta(void *matcher, void *key,
4798                             const struct rte_flow_item *item)
4799 {
4800         const struct rte_flow_item_meta *meta_m;
4801         const struct rte_flow_item_meta *meta_v;
4802         void *misc2_m =
4803                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4804         void *misc2_v =
4805                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4806
4807         meta_m = (const void *)item->mask;
4808         if (!meta_m)
4809                 meta_m = &rte_flow_item_meta_mask;
4810         meta_v = (const void *)item->spec;
4811         if (meta_v) {
4812                 MLX5_SET(fte_match_set_misc2, misc2_m,
4813                          metadata_reg_a, meta_m->data);
4814                 MLX5_SET(fte_match_set_misc2, misc2_v,
4815                          metadata_reg_a, meta_v->data & meta_m->data);
4816         }
4817 }
4818
4819 /**
4820  * Add vport metadata Reg C0 item to matcher
4821  *
4822  * @param[in, out] matcher
4823  *   Flow matcher.
4824  * @param[in, out] key
4825  *   Flow matcher value.
4826  * @param[in] reg
4827  *   Flow pattern to translate.
4828  */
4829 static void
4830 flow_dv_translate_item_meta_vport(void *matcher, void *key,
4831                                   uint32_t value, uint32_t mask)
4832 {
4833         void *misc2_m =
4834                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4835         void *misc2_v =
4836                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4837
4838         MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, mask);
4839         MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, value);
4840 }
4841
4842 /**
4843  * Add tag item to matcher
4844  *
4845  * @param[in, out] matcher
4846  *   Flow matcher.
4847  * @param[in, out] key
4848  *   Flow matcher value.
4849  * @param[in] item
4850  *   Flow pattern to translate.
4851  */
4852 static void
4853 flow_dv_translate_item_tag(void *matcher, void *key,
4854                            const struct rte_flow_item *item)
4855 {
4856         void *misc2_m =
4857                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4858         void *misc2_v =
4859                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4860         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
4861         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
4862         enum modify_reg reg = tag_v->id;
4863         rte_be32_t value = tag_v->data;
4864         rte_be32_t mask = tag_m->data;
4865
4866         switch (reg) {
4867         case REG_A:
4868                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
4869                                 rte_be_to_cpu_32(mask));
4870                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
4871                                 rte_be_to_cpu_32(value));
4872                 break;
4873         case REG_B:
4874                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b,
4875                                  rte_be_to_cpu_32(mask));
4876                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b,
4877                                 rte_be_to_cpu_32(value));
4878                 break;
4879         case REG_C_0:
4880                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0,
4881                                  rte_be_to_cpu_32(mask));
4882                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0,
4883                                 rte_be_to_cpu_32(value));
4884                 break;
4885         case REG_C_1:
4886                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1,
4887                                  rte_be_to_cpu_32(mask));
4888                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1,
4889                                 rte_be_to_cpu_32(value));
4890                 break;
4891         case REG_C_2:
4892                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2,
4893                                  rte_be_to_cpu_32(mask));
4894                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2,
4895                                 rte_be_to_cpu_32(value));
4896                 break;
4897         case REG_C_3:
4898                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3,
4899                                  rte_be_to_cpu_32(mask));
4900                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3,
4901                                 rte_be_to_cpu_32(value));
4902                 break;
4903         case REG_C_4:
4904                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4,
4905                                  rte_be_to_cpu_32(mask));
4906                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4,
4907                                 rte_be_to_cpu_32(value));
4908                 break;
4909         case REG_C_5:
4910                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5,
4911                                  rte_be_to_cpu_32(mask));
4912                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5,
4913                                 rte_be_to_cpu_32(value));
4914                 break;
4915         case REG_C_6:
4916                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6,
4917                                  rte_be_to_cpu_32(mask));
4918                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6,
4919                                 rte_be_to_cpu_32(value));
4920                 break;
4921         case REG_C_7:
4922                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7,
4923                                  rte_be_to_cpu_32(mask));
4924                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7,
4925                                 rte_be_to_cpu_32(value));
4926                 break;
4927         }
4928 }
4929
4930 /**
4931  * Add source vport match to the specified matcher.
4932  *
4933  * @param[in, out] matcher
4934  *   Flow matcher.
4935  * @param[in, out] key
4936  *   Flow matcher value.
4937  * @param[in] port
4938  *   Source vport value to match
4939  * @param[in] mask
4940  *   Mask
4941  */
4942 static void
4943 flow_dv_translate_item_source_vport(void *matcher, void *key,
4944                                     int16_t port, uint16_t mask)
4945 {
4946         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4947         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4948
4949         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
4950         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
4951 }
4952
4953 /**
4954  * Translate port-id item to eswitch match on  port-id.
4955  *
4956  * @param[in] dev
4957  *   The devich to configure through.
4958  * @param[in, out] matcher
4959  *   Flow matcher.
4960  * @param[in, out] key
4961  *   Flow matcher value.
4962  * @param[in] item
4963  *   Flow pattern to translate.
4964  *
4965  * @return
4966  *   0 on success, a negative errno value otherwise.
4967  */
4968 static int
4969 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
4970                                void *key, const struct rte_flow_item *item)
4971 {
4972         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
4973         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
4974         struct mlx5_priv *priv;
4975         uint16_t mask, id;
4976
4977         mask = pid_m ? pid_m->id : 0xffff;
4978         id = pid_v ? pid_v->id : dev->data->port_id;
4979         priv = mlx5_port_to_eswitch_info(id);
4980         if (!priv)
4981                 return -rte_errno;
4982         /* Translate to vport field or to metadata, depending on mode. */
4983         if (priv->vport_meta_mask)
4984                 flow_dv_translate_item_meta_vport(matcher, key,
4985                                                   priv->vport_meta_tag,
4986                                                   priv->vport_meta_mask);
4987         else
4988                 flow_dv_translate_item_source_vport(matcher, key,
4989                                                     priv->vport_id, mask);
4990         return 0;
4991 }
4992
4993 /**
4994  * Add ICMP6 item to matcher and to the value.
4995  *
4996  * @param[in, out] matcher
4997  *   Flow matcher.
4998  * @param[in, out] key
4999  *   Flow matcher value.
5000  * @param[in] item
5001  *   Flow pattern to translate.
5002  * @param[in] inner
5003  *   Item is inner pattern.
5004  */
5005 static void
5006 flow_dv_translate_item_icmp6(void *matcher, void *key,
5007                               const struct rte_flow_item *item,
5008                               int inner)
5009 {
5010         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
5011         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
5012         void *headers_m;
5013         void *headers_v;
5014         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5015                                      misc_parameters_3);
5016         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5017         if (inner) {
5018                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5019                                          inner_headers);
5020                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5021         } else {
5022                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5023                                          outer_headers);
5024                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5025         }
5026         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5027         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
5028         if (!icmp6_v)
5029                 return;
5030         if (!icmp6_m)
5031                 icmp6_m = &rte_flow_item_icmp6_mask;
5032         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
5033         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
5034                  icmp6_v->type & icmp6_m->type);
5035         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
5036         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
5037                  icmp6_v->code & icmp6_m->code);
5038 }
5039
5040 /**
5041  * Add ICMP item to matcher and to the value.
5042  *
5043  * @param[in, out] matcher
5044  *   Flow matcher.
5045  * @param[in, out] key
5046  *   Flow matcher value.
5047  * @param[in] item
5048  *   Flow pattern to translate.
5049  * @param[in] inner
5050  *   Item is inner pattern.
5051  */
5052 static void
5053 flow_dv_translate_item_icmp(void *matcher, void *key,
5054                             const struct rte_flow_item *item,
5055                             int inner)
5056 {
5057         const struct rte_flow_item_icmp *icmp_m = item->mask;
5058         const struct rte_flow_item_icmp *icmp_v = item->spec;
5059         void *headers_m;
5060         void *headers_v;
5061         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5062                                      misc_parameters_3);
5063         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5064         if (inner) {
5065                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5066                                          inner_headers);
5067                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5068         } else {
5069                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5070                                          outer_headers);
5071                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5072         }
5073         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5074         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
5075         if (!icmp_v)
5076                 return;
5077         if (!icmp_m)
5078                 icmp_m = &rte_flow_item_icmp_mask;
5079         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
5080                  icmp_m->hdr.icmp_type);
5081         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
5082                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
5083         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
5084                  icmp_m->hdr.icmp_code);
5085         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
5086                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
5087 }
5088
5089 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
5090
5091 #define HEADER_IS_ZERO(match_criteria, headers)                              \
5092         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
5093                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
5094
5095 /**
5096  * Calculate flow matcher enable bitmap.
5097  *
5098  * @param match_criteria
5099  *   Pointer to flow matcher criteria.
5100  *
5101  * @return
5102  *   Bitmap of enabled fields.
5103  */
5104 static uint8_t
5105 flow_dv_matcher_enable(uint32_t *match_criteria)
5106 {
5107         uint8_t match_criteria_enable;
5108
5109         match_criteria_enable =
5110                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
5111                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
5112         match_criteria_enable |=
5113                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
5114                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
5115         match_criteria_enable |=
5116                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
5117                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
5118         match_criteria_enable |=
5119                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
5120                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
5121         match_criteria_enable |=
5122                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
5123                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
5124         return match_criteria_enable;
5125 }
5126
5127
5128 /**
5129  * Get a flow table.
5130  *
5131  * @param dev[in, out]
5132  *   Pointer to rte_eth_dev structure.
5133  * @param[in] table_id
5134  *   Table id to use.
5135  * @param[in] egress
5136  *   Direction of the table.
5137  * @param[in] transfer
5138  *   E-Switch or NIC flow.
5139  * @param[out] error
5140  *   pointer to error structure.
5141  *
5142  * @return
5143  *   Returns tables resource based on the index, NULL in case of failed.
5144  */
5145 static struct mlx5_flow_tbl_resource *
5146 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
5147                          uint32_t table_id, uint8_t egress,
5148                          uint8_t transfer,
5149                          struct rte_flow_error *error)
5150 {
5151         struct mlx5_priv *priv = dev->data->dev_private;
5152         struct mlx5_ibv_shared *sh = priv->sh;
5153         struct mlx5_flow_tbl_resource *tbl;
5154
5155 #ifdef HAVE_MLX5DV_DR
5156         if (transfer) {
5157                 tbl = &sh->fdb_tbl[table_id];
5158                 if (!tbl->obj)
5159                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5160                                 (sh->fdb_domain, table_id);
5161         } else if (egress) {
5162                 tbl = &sh->tx_tbl[table_id];
5163                 if (!tbl->obj)
5164                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5165                                 (sh->tx_domain, table_id);
5166         } else {
5167                 tbl = &sh->rx_tbl[table_id];
5168                 if (!tbl->obj)
5169                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5170                                 (sh->rx_domain, table_id);
5171         }
5172         if (!tbl->obj) {
5173                 rte_flow_error_set(error, ENOMEM,
5174                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5175                                    NULL, "cannot create table");
5176                 return NULL;
5177         }
5178         rte_atomic32_inc(&tbl->refcnt);
5179         return tbl;
5180 #else
5181         (void)error;
5182         (void)tbl;
5183         if (transfer)
5184                 return &sh->fdb_tbl[table_id];
5185         else if (egress)
5186                 return &sh->tx_tbl[table_id];
5187         else
5188                 return &sh->rx_tbl[table_id];
5189 #endif
5190 }
5191
5192 /**
5193  * Release a flow table.
5194  *
5195  * @param[in] tbl
5196  *   Table resource to be released.
5197  *
5198  * @return
5199  *   Returns 0 if table was released, else return 1;
5200  */
5201 static int
5202 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
5203 {
5204         if (!tbl)
5205                 return 0;
5206         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
5207                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
5208                 tbl->obj = NULL;
5209                 return 0;
5210         }
5211         return 1;
5212 }
5213
5214 /**
5215  * Register the flow matcher.
5216  *
5217  * @param dev[in, out]
5218  *   Pointer to rte_eth_dev structure.
5219  * @param[in, out] matcher
5220  *   Pointer to flow matcher.
5221  * @parm[in, out] dev_flow
5222  *   Pointer to the dev_flow.
5223  * @param[out] error
5224  *   pointer to error structure.
5225  *
5226  * @return
5227  *   0 on success otherwise -errno and errno is set.
5228  */
5229 static int
5230 flow_dv_matcher_register(struct rte_eth_dev *dev,
5231                          struct mlx5_flow_dv_matcher *matcher,
5232                          struct mlx5_flow *dev_flow,
5233                          struct rte_flow_error *error)
5234 {
5235         struct mlx5_priv *priv = dev->data->dev_private;
5236         struct mlx5_ibv_shared *sh = priv->sh;
5237         struct mlx5_flow_dv_matcher *cache_matcher;
5238         struct mlx5dv_flow_matcher_attr dv_attr = {
5239                 .type = IBV_FLOW_ATTR_NORMAL,
5240                 .match_mask = (void *)&matcher->mask,
5241         };
5242         struct mlx5_flow_tbl_resource *tbl = NULL;
5243
5244         /* Lookup from cache. */
5245         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
5246                 if (matcher->crc == cache_matcher->crc &&
5247                     matcher->priority == cache_matcher->priority &&
5248                     matcher->egress == cache_matcher->egress &&
5249                     matcher->group == cache_matcher->group &&
5250                     matcher->transfer == cache_matcher->transfer &&
5251                     !memcmp((const void *)matcher->mask.buf,
5252                             (const void *)cache_matcher->mask.buf,
5253                             cache_matcher->mask.size)) {
5254                         DRV_LOG(DEBUG,
5255                                 "priority %hd use %s matcher %p: refcnt %d++",
5256                                 cache_matcher->priority,
5257                                 cache_matcher->egress ? "tx" : "rx",
5258                                 (void *)cache_matcher,
5259                                 rte_atomic32_read(&cache_matcher->refcnt));
5260                         rte_atomic32_inc(&cache_matcher->refcnt);
5261                         dev_flow->dv.matcher = cache_matcher;
5262                         return 0;
5263                 }
5264         }
5265         /* Register new matcher. */
5266         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
5267         if (!cache_matcher)
5268                 return rte_flow_error_set(error, ENOMEM,
5269                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5270                                           "cannot allocate matcher memory");
5271         tbl = flow_dv_tbl_resource_get(dev, matcher->group,
5272                                        matcher->egress, matcher->transfer,
5273                                        error);
5274         if (!tbl) {
5275                 rte_free(cache_matcher);
5276                 return rte_flow_error_set(error, ENOMEM,
5277                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5278                                           NULL, "cannot create table");
5279         }
5280         *cache_matcher = *matcher;
5281         dv_attr.match_criteria_enable =
5282                 flow_dv_matcher_enable(cache_matcher->mask.buf);
5283         dv_attr.priority = matcher->priority;
5284         if (matcher->egress)
5285                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
5286         cache_matcher->matcher_object =
5287                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
5288         if (!cache_matcher->matcher_object) {
5289                 rte_free(cache_matcher);
5290 #ifdef HAVE_MLX5DV_DR
5291                 flow_dv_tbl_resource_release(tbl);
5292 #endif
5293                 return rte_flow_error_set(error, ENOMEM,
5294                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5295                                           NULL, "cannot create matcher");
5296         }
5297         rte_atomic32_inc(&cache_matcher->refcnt);
5298         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
5299         dev_flow->dv.matcher = cache_matcher;
5300         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
5301                 cache_matcher->priority,
5302                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
5303                 rte_atomic32_read(&cache_matcher->refcnt));
5304         rte_atomic32_inc(&tbl->refcnt);
5305         return 0;
5306 }
5307
5308 /**
5309  * Find existing tag resource or create and register a new one.
5310  *
5311  * @param dev[in, out]
5312  *   Pointer to rte_eth_dev structure.
5313  * @param[in, out] resource
5314  *   Pointer to tag resource.
5315  * @parm[in, out] dev_flow
5316  *   Pointer to the dev_flow.
5317  * @param[out] error
5318  *   pointer to error structure.
5319  *
5320  * @return
5321  *   0 on success otherwise -errno and errno is set.
5322  */
5323 static int
5324 flow_dv_tag_resource_register
5325                         (struct rte_eth_dev *dev,
5326                          struct mlx5_flow_dv_tag_resource *resource,
5327                          struct mlx5_flow *dev_flow,
5328                          struct rte_flow_error *error)
5329 {
5330         struct mlx5_priv *priv = dev->data->dev_private;
5331         struct mlx5_ibv_shared *sh = priv->sh;
5332         struct mlx5_flow_dv_tag_resource *cache_resource;
5333
5334         /* Lookup a matching resource from cache. */
5335         LIST_FOREACH(cache_resource, &sh->tags, next) {
5336                 if (resource->tag == cache_resource->tag) {
5337                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
5338                                 (void *)cache_resource,
5339                                 rte_atomic32_read(&cache_resource->refcnt));
5340                         rte_atomic32_inc(&cache_resource->refcnt);
5341                         dev_flow->flow->tag_resource = cache_resource;
5342                         return 0;
5343                 }
5344         }
5345         /* Register new  resource. */
5346         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
5347         if (!cache_resource)
5348                 return rte_flow_error_set(error, ENOMEM,
5349                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5350                                           "cannot allocate resource memory");
5351         *cache_resource = *resource;
5352         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
5353                 (resource->tag);
5354         if (!cache_resource->action) {
5355                 rte_free(cache_resource);
5356                 return rte_flow_error_set(error, ENOMEM,
5357                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5358                                           NULL, "cannot create action");
5359         }
5360         rte_atomic32_init(&cache_resource->refcnt);
5361         rte_atomic32_inc(&cache_resource->refcnt);
5362         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
5363         dev_flow->flow->tag_resource = cache_resource;
5364         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
5365                 (void *)cache_resource,
5366                 rte_atomic32_read(&cache_resource->refcnt));
5367         return 0;
5368 }
5369
5370 /**
5371  * Release the tag.
5372  *
5373  * @param dev
5374  *   Pointer to Ethernet device.
5375  * @param flow
5376  *   Pointer to mlx5_flow.
5377  *
5378  * @return
5379  *   1 while a reference on it exists, 0 when freed.
5380  */
5381 static int
5382 flow_dv_tag_release(struct rte_eth_dev *dev,
5383                     struct mlx5_flow_dv_tag_resource *tag)
5384 {
5385         assert(tag);
5386         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
5387                 dev->data->port_id, (void *)tag,
5388                 rte_atomic32_read(&tag->refcnt));
5389         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
5390                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
5391                 LIST_REMOVE(tag, next);
5392                 DRV_LOG(DEBUG, "port %u tag %p: removed",
5393                         dev->data->port_id, (void *)tag);
5394                 rte_free(tag);
5395                 return 0;
5396         }
5397         return 1;
5398 }
5399
5400 /**
5401  * Translate port ID action to vport.
5402  *
5403  * @param[in] dev
5404  *   Pointer to rte_eth_dev structure.
5405  * @param[in] action
5406  *   Pointer to the port ID action.
5407  * @param[out] dst_port_id
5408  *   The target port ID.
5409  * @param[out] error
5410  *   Pointer to the error structure.
5411  *
5412  * @return
5413  *   0 on success, a negative errno value otherwise and rte_errno is set.
5414  */
5415 static int
5416 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
5417                                  const struct rte_flow_action *action,
5418                                  uint32_t *dst_port_id,
5419                                  struct rte_flow_error *error)
5420 {
5421         uint32_t port;
5422         struct mlx5_priv *priv;
5423         const struct rte_flow_action_port_id *conf =
5424                         (const struct rte_flow_action_port_id *)action->conf;
5425
5426         port = conf->original ? dev->data->port_id : conf->id;
5427         priv = mlx5_port_to_eswitch_info(port);
5428         if (!priv)
5429                 return rte_flow_error_set(error, -rte_errno,
5430                                           RTE_FLOW_ERROR_TYPE_ACTION,
5431                                           NULL,
5432                                           "No eswitch info was found for port");
5433         if (priv->vport_meta_mask)
5434                 *dst_port_id = priv->vport_meta_tag;
5435         else
5436                 *dst_port_id = priv->vport_id;
5437         return 0;
5438 }
5439
5440 /**
5441  * Add Tx queue matcher
5442  *
5443  * @param[in] dev
5444  *   Pointer to the dev struct.
5445  * @param[in, out] matcher
5446  *   Flow matcher.
5447  * @param[in, out] key
5448  *   Flow matcher value.
5449  * @param[in] item
5450  *   Flow pattern to translate.
5451  * @param[in] inner
5452  *   Item is inner pattern.
5453  */
5454 static void
5455 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
5456                                 void *matcher, void *key,
5457                                 const struct rte_flow_item *item)
5458 {
5459         const struct mlx5_rte_flow_item_tx_queue *queue_m;
5460         const struct mlx5_rte_flow_item_tx_queue *queue_v;
5461         void *misc_m =
5462                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5463         void *misc_v =
5464                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5465         struct mlx5_txq_ctrl *txq;
5466         uint32_t queue;
5467
5468
5469         queue_m = (const void *)item->mask;
5470         if (!queue_m)
5471                 return;
5472         queue_v = (const void *)item->spec;
5473         if (!queue_v)
5474                 return;
5475         txq = mlx5_txq_get(dev, queue_v->queue);
5476         if (!txq)
5477                 return;
5478         queue = txq->obj->sq->id;
5479         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
5480         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
5481                  queue & queue_m->queue);
5482         mlx5_txq_release(dev, queue_v->queue);
5483 }
5484
5485 /**
5486  * Fill the flow with DV spec.
5487  *
5488  * @param[in] dev
5489  *   Pointer to rte_eth_dev structure.
5490  * @param[in, out] dev_flow
5491  *   Pointer to the sub flow.
5492  * @param[in] attr
5493  *   Pointer to the flow attributes.
5494  * @param[in] items
5495  *   Pointer to the list of items.
5496  * @param[in] actions
5497  *   Pointer to the list of actions.
5498  * @param[out] error
5499  *   Pointer to the error structure.
5500  *
5501  * @return
5502  *   0 on success, a negative errno value otherwise and rte_errno is set.
5503  */
5504 static int
5505 flow_dv_translate(struct rte_eth_dev *dev,
5506                   struct mlx5_flow *dev_flow,
5507                   const struct rte_flow_attr *attr,
5508                   const struct rte_flow_item items[],
5509                   const struct rte_flow_action actions[],
5510                   struct rte_flow_error *error)
5511 {
5512         struct mlx5_priv *priv = dev->data->dev_private;
5513         struct rte_flow *flow = dev_flow->flow;
5514         uint64_t item_flags = 0;
5515         uint64_t last_item = 0;
5516         uint64_t action_flags = 0;
5517         uint64_t priority = attr->priority;
5518         struct mlx5_flow_dv_matcher matcher = {
5519                 .mask = {
5520                         .size = sizeof(matcher.mask.buf),
5521                 },
5522         };
5523         int actions_n = 0;
5524         bool actions_end = false;
5525         struct mlx5_flow_dv_modify_hdr_resource res = {
5526                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
5527                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
5528         };
5529         union flow_dv_attr flow_attr = { .attr = 0 };
5530         struct mlx5_flow_dv_tag_resource tag_resource;
5531         uint32_t modify_action_position = UINT32_MAX;
5532         void *match_mask = matcher.mask.buf;
5533         void *match_value = dev_flow->dv.value.buf;
5534         uint8_t next_protocol = 0xff;
5535         struct rte_vlan_hdr vlan = { 0 };
5536         uint32_t table;
5537         int ret = 0;
5538
5539         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
5540                                        &table, error);
5541         if (ret)
5542                 return ret;
5543         flow->group = table;
5544         if (attr->transfer)
5545                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
5546         if (priority == MLX5_FLOW_PRIO_RSVD)
5547                 priority = priv->config.flow_prio - 1;
5548         for (; !actions_end ; actions++) {
5549                 const struct rte_flow_action_queue *queue;
5550                 const struct rte_flow_action_rss *rss;
5551                 const struct rte_flow_action *action = actions;
5552                 const struct rte_flow_action_count *count = action->conf;
5553                 const uint8_t *rss_key;
5554                 const struct rte_flow_action_jump *jump_data;
5555                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
5556                 struct mlx5_flow_tbl_resource *tbl;
5557                 uint32_t port_id = 0;
5558                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
5559                 int action_type = actions->type;
5560                 const struct rte_flow_action *found_action = NULL;
5561
5562                 switch (action_type) {
5563                 case RTE_FLOW_ACTION_TYPE_VOID:
5564                         break;
5565                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5566                         if (flow_dv_translate_action_port_id(dev, action,
5567                                                              &port_id, error))
5568                                 return -rte_errno;
5569                         port_id_resource.port_id = port_id;
5570                         if (flow_dv_port_id_action_resource_register
5571                             (dev, &port_id_resource, dev_flow, error))
5572                                 return -rte_errno;
5573                         dev_flow->dv.actions[actions_n++] =
5574                                 dev_flow->dv.port_id_action->action;
5575                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5576                         break;
5577                 case RTE_FLOW_ACTION_TYPE_FLAG:
5578                         tag_resource.tag =
5579                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
5580                         if (!flow->tag_resource)
5581                                 if (flow_dv_tag_resource_register
5582                                     (dev, &tag_resource, dev_flow, error))
5583                                         return errno;
5584                         dev_flow->dv.actions[actions_n++] =
5585                                 flow->tag_resource->action;
5586                         action_flags |= MLX5_FLOW_ACTION_FLAG;
5587                         break;
5588                 case RTE_FLOW_ACTION_TYPE_MARK:
5589                         tag_resource.tag = mlx5_flow_mark_set
5590                               (((const struct rte_flow_action_mark *)
5591                                (actions->conf))->id);
5592                         if (!flow->tag_resource)
5593                                 if (flow_dv_tag_resource_register
5594                                     (dev, &tag_resource, dev_flow, error))
5595                                         return errno;
5596                         dev_flow->dv.actions[actions_n++] =
5597                                 flow->tag_resource->action;
5598                         action_flags |= MLX5_FLOW_ACTION_MARK;
5599                         break;
5600                 case RTE_FLOW_ACTION_TYPE_DROP:
5601                         action_flags |= MLX5_FLOW_ACTION_DROP;
5602                         break;
5603                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5604                         queue = actions->conf;
5605                         flow->rss.queue_num = 1;
5606                         (*flow->queue)[0] = queue->index;
5607                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5608                         break;
5609                 case RTE_FLOW_ACTION_TYPE_RSS:
5610                         rss = actions->conf;
5611                         if (flow->queue)
5612                                 memcpy((*flow->queue), rss->queue,
5613                                        rss->queue_num * sizeof(uint16_t));
5614                         flow->rss.queue_num = rss->queue_num;
5615                         /* NULL RSS key indicates default RSS key. */
5616                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
5617                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
5618                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
5619                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
5620                         flow->rss.level = rss->level;
5621                         action_flags |= MLX5_FLOW_ACTION_RSS;
5622                         break;
5623                 case RTE_FLOW_ACTION_TYPE_COUNT:
5624                         if (!priv->config.devx) {
5625                                 rte_errno = ENOTSUP;
5626                                 goto cnt_err;
5627                         }
5628                         flow->counter = flow_dv_counter_alloc(dev,
5629                                                               count->shared,
5630                                                               count->id,
5631                                                               flow->group);
5632                         if (flow->counter == NULL)
5633                                 goto cnt_err;
5634                         dev_flow->dv.actions[actions_n++] =
5635                                 flow->counter->action;
5636                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5637                         break;
5638 cnt_err:
5639                         if (rte_errno == ENOTSUP)
5640                                 return rte_flow_error_set
5641                                               (error, ENOTSUP,
5642                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5643                                                NULL,
5644                                                "count action not supported");
5645                         else
5646                                 return rte_flow_error_set
5647                                                 (error, rte_errno,
5648                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5649                                                  action,
5650                                                  "cannot create counter"
5651                                                   " object.");
5652                         break;
5653                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5654                         dev_flow->dv.actions[actions_n++] =
5655                                                 priv->sh->pop_vlan_action;
5656                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5657                         break;
5658                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5659                         flow_dev_get_vlan_info_from_items(items, &vlan);
5660                         vlan.eth_proto = rte_be_to_cpu_16
5661                              ((((const struct rte_flow_action_of_push_vlan *)
5662                                                    actions->conf)->ethertype));
5663                         found_action = mlx5_flow_find_action
5664                                         (actions + 1,
5665                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
5666                         if (found_action)
5667                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5668                         found_action = mlx5_flow_find_action
5669                                         (actions + 1,
5670                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
5671                         if (found_action)
5672                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5673                         if (flow_dv_create_action_push_vlan
5674                                             (dev, attr, &vlan, dev_flow, error))
5675                                 return -rte_errno;
5676                         dev_flow->dv.actions[actions_n++] =
5677                                            dev_flow->dv.push_vlan_res->action;
5678                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5679                         break;
5680                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5681                         /* of_vlan_push action handled this action */
5682                         assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
5683                         break;
5684                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5685                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
5686                                 break;
5687                         flow_dev_get_vlan_info_from_items(items, &vlan);
5688                         mlx5_update_vlan_vid_pcp(actions, &vlan);
5689                         /* If no VLAN push - this is a modify header action */
5690                         if (flow_dv_convert_action_modify_vlan_vid
5691                                                         (&res, actions, error))
5692                                 return -rte_errno;
5693                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5694                         break;
5695                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5696                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5697                         if (flow_dv_create_action_l2_encap(dev, actions,
5698                                                            dev_flow,
5699                                                            attr->transfer,
5700                                                            error))
5701                                 return -rte_errno;
5702                         dev_flow->dv.actions[actions_n++] =
5703                                 dev_flow->dv.encap_decap->verbs_action;
5704                         action_flags |= actions->type ==
5705                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
5706                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
5707                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
5708                         break;
5709                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5710                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5711                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
5712                                                            attr->transfer,
5713                                                            error))
5714                                 return -rte_errno;
5715                         dev_flow->dv.actions[actions_n++] =
5716                                 dev_flow->dv.encap_decap->verbs_action;
5717                         action_flags |= actions->type ==
5718                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
5719                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
5720                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
5721                         break;
5722                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5723                         /* Handle encap with preceding decap. */
5724                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
5725                                 if (flow_dv_create_action_raw_encap
5726                                         (dev, actions, dev_flow, attr, error))
5727                                         return -rte_errno;
5728                                 dev_flow->dv.actions[actions_n++] =
5729                                         dev_flow->dv.encap_decap->verbs_action;
5730                         } else {
5731                                 /* Handle encap without preceding decap. */
5732                                 if (flow_dv_create_action_l2_encap
5733                                     (dev, actions, dev_flow, attr->transfer,
5734                                      error))
5735                                         return -rte_errno;
5736                                 dev_flow->dv.actions[actions_n++] =
5737                                         dev_flow->dv.encap_decap->verbs_action;
5738                         }
5739                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
5740                         break;
5741                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5742                         /* Check if this decap is followed by encap. */
5743                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
5744                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
5745                                action++) {
5746                         }
5747                         /* Handle decap only if it isn't followed by encap. */
5748                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5749                                 if (flow_dv_create_action_l2_decap
5750                                     (dev, dev_flow, attr->transfer, error))
5751                                         return -rte_errno;
5752                                 dev_flow->dv.actions[actions_n++] =
5753                                         dev_flow->dv.encap_decap->verbs_action;
5754                         }
5755                         /* If decap is followed by encap, handle it at encap. */
5756                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
5757                         break;
5758                 case RTE_FLOW_ACTION_TYPE_JUMP:
5759                         jump_data = action->conf;
5760                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
5761                                                        jump_data->group, &table,
5762                                                        error);
5763                         if (ret)
5764                                 return ret;
5765                         tbl = flow_dv_tbl_resource_get(dev, table,
5766                                                        attr->egress,
5767                                                        attr->transfer, error);
5768                         if (!tbl)
5769                                 return rte_flow_error_set
5770                                                 (error, errno,
5771                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5772                                                  NULL,
5773                                                  "cannot create jump action.");
5774                         jump_tbl_resource.tbl = tbl;
5775                         if (flow_dv_jump_tbl_resource_register
5776                             (dev, &jump_tbl_resource, dev_flow, error)) {
5777                                 flow_dv_tbl_resource_release(tbl);
5778                                 return rte_flow_error_set
5779                                                 (error, errno,
5780                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5781                                                  NULL,
5782                                                  "cannot create jump action.");
5783                         }
5784                         dev_flow->dv.actions[actions_n++] =
5785                                 dev_flow->dv.jump->action;
5786                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5787                         break;
5788                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5789                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5790                         if (flow_dv_convert_action_modify_mac(&res, actions,
5791                                                               error))
5792                                 return -rte_errno;
5793                         action_flags |= actions->type ==
5794                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5795                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
5796                                         MLX5_FLOW_ACTION_SET_MAC_DST;
5797                         break;
5798                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5799                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5800                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
5801                                                                error))
5802                                 return -rte_errno;
5803                         action_flags |= actions->type ==
5804                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5805                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
5806                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
5807                         break;
5808                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5809                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5810                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
5811                                                                error))
5812                                 return -rte_errno;
5813                         action_flags |= actions->type ==
5814                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5815                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
5816                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
5817                         break;
5818                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5819                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5820                         if (flow_dv_convert_action_modify_tp(&res, actions,
5821                                                              items, &flow_attr,
5822                                                              error))
5823                                 return -rte_errno;
5824                         action_flags |= actions->type ==
5825                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5826                                         MLX5_FLOW_ACTION_SET_TP_SRC :
5827                                         MLX5_FLOW_ACTION_SET_TP_DST;
5828                         break;
5829                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5830                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
5831                                                                   &flow_attr,
5832                                                                   error))
5833                                 return -rte_errno;
5834                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
5835                         break;
5836                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5837                         if (flow_dv_convert_action_modify_ttl(&res, actions,
5838                                                              items, &flow_attr,
5839                                                              error))
5840                                 return -rte_errno;
5841                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
5842                         break;
5843                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5844                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5845                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
5846                                                                   error))
5847                                 return -rte_errno;
5848                         action_flags |= actions->type ==
5849                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5850                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
5851                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5852                         break;
5853
5854                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5855                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5856                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
5857                                                                   error))
5858                                 return -rte_errno;
5859                         action_flags |= actions->type ==
5860                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5861                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
5862                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
5863                         break;
5864                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5865                         if (flow_dv_convert_action_set_reg(&res, actions,
5866                                                            error))
5867                                 return -rte_errno;
5868                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5869                         break;
5870                 case RTE_FLOW_ACTION_TYPE_END:
5871                         actions_end = true;
5872                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
5873                                 /* create modify action if needed. */
5874                                 if (flow_dv_modify_hdr_resource_register
5875                                                                 (dev, &res,
5876                                                                  dev_flow,
5877                                                                  error))
5878                                         return -rte_errno;
5879                                 dev_flow->dv.actions[modify_action_position] =
5880                                         dev_flow->dv.modify_hdr->verbs_action;
5881                         }
5882                         break;
5883                 default:
5884                         break;
5885                 }
5886                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
5887                     modify_action_position == UINT32_MAX)
5888                         modify_action_position = actions_n++;
5889         }
5890         dev_flow->dv.actions_n = actions_n;
5891         dev_flow->actions = action_flags;
5892         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5893                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5894                 int item_type = items->type;
5895
5896                 switch (item_type) {
5897                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5898                         flow_dv_translate_item_port_id(dev, match_mask,
5899                                                        match_value, items);
5900                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5901                         break;
5902                 case RTE_FLOW_ITEM_TYPE_ETH:
5903                         flow_dv_translate_item_eth(match_mask, match_value,
5904                                                    items, tunnel);
5905                         matcher.priority = MLX5_PRIORITY_MAP_L2;
5906                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5907                                              MLX5_FLOW_LAYER_OUTER_L2;
5908                         break;
5909                 case RTE_FLOW_ITEM_TYPE_VLAN:
5910                         flow_dv_translate_item_vlan(dev_flow,
5911                                                     match_mask, match_value,
5912                                                     items, tunnel);
5913                         matcher.priority = MLX5_PRIORITY_MAP_L2;
5914                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
5915                                               MLX5_FLOW_LAYER_INNER_VLAN) :
5916                                              (MLX5_FLOW_LAYER_OUTER_L2 |
5917                                               MLX5_FLOW_LAYER_OUTER_VLAN);
5918                         break;
5919                 case RTE_FLOW_ITEM_TYPE_IPV4:
5920                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5921                                                   &item_flags, &tunnel);
5922                         flow_dv_translate_item_ipv4(match_mask, match_value,
5923                                                     items, tunnel, flow->group);
5924                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5925                         dev_flow->dv.hash_fields |=
5926                                 mlx5_flow_hashfields_adjust
5927                                         (dev_flow, tunnel,
5928                                          MLX5_IPV4_LAYER_TYPES,
5929                                          MLX5_IPV4_IBV_RX_HASH);
5930                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5931                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5932                         if (items->mask != NULL &&
5933                             ((const struct rte_flow_item_ipv4 *)
5934                              items->mask)->hdr.next_proto_id) {
5935                                 next_protocol =
5936                                         ((const struct rte_flow_item_ipv4 *)
5937                                          (items->spec))->hdr.next_proto_id;
5938                                 next_protocol &=
5939                                         ((const struct rte_flow_item_ipv4 *)
5940                                          (items->mask))->hdr.next_proto_id;
5941                         } else {
5942                                 /* Reset for inner layer. */
5943                                 next_protocol = 0xff;
5944                         }
5945                         break;
5946                 case RTE_FLOW_ITEM_TYPE_IPV6:
5947                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5948                                                   &item_flags, &tunnel);
5949                         flow_dv_translate_item_ipv6(match_mask, match_value,
5950                                                     items, tunnel, flow->group);
5951                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5952                         dev_flow->dv.hash_fields |=
5953                                 mlx5_flow_hashfields_adjust
5954                                         (dev_flow, tunnel,
5955                                          MLX5_IPV6_LAYER_TYPES,
5956                                          MLX5_IPV6_IBV_RX_HASH);
5957                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5958                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5959                         if (items->mask != NULL &&
5960                             ((const struct rte_flow_item_ipv6 *)
5961                              items->mask)->hdr.proto) {
5962                                 next_protocol =
5963                                         ((const struct rte_flow_item_ipv6 *)
5964                                          items->spec)->hdr.proto;
5965                                 next_protocol &=
5966                                         ((const struct rte_flow_item_ipv6 *)
5967                                          items->mask)->hdr.proto;
5968                         } else {
5969                                 /* Reset for inner layer. */
5970                                 next_protocol = 0xff;
5971                         }
5972                         break;
5973                 case RTE_FLOW_ITEM_TYPE_TCP:
5974                         flow_dv_translate_item_tcp(match_mask, match_value,
5975                                                    items, tunnel);
5976                         matcher.priority = MLX5_PRIORITY_MAP_L4;
5977                         dev_flow->dv.hash_fields |=
5978                                 mlx5_flow_hashfields_adjust
5979                                         (dev_flow, tunnel, ETH_RSS_TCP,
5980                                          IBV_RX_HASH_SRC_PORT_TCP |
5981                                          IBV_RX_HASH_DST_PORT_TCP);
5982                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5983                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5984                         break;
5985                 case RTE_FLOW_ITEM_TYPE_UDP:
5986                         flow_dv_translate_item_udp(match_mask, match_value,
5987                                                    items, tunnel);
5988                         matcher.priority = MLX5_PRIORITY_MAP_L4;
5989                         dev_flow->dv.hash_fields |=
5990                                 mlx5_flow_hashfields_adjust
5991                                         (dev_flow, tunnel, ETH_RSS_UDP,
5992                                          IBV_RX_HASH_SRC_PORT_UDP |
5993                                          IBV_RX_HASH_DST_PORT_UDP);
5994                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5995                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5996                         break;
5997                 case RTE_FLOW_ITEM_TYPE_GRE:
5998                         flow_dv_translate_item_gre(match_mask, match_value,
5999                                                    items, tunnel);
6000                         last_item = MLX5_FLOW_LAYER_GRE;
6001                         break;
6002                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6003                         flow_dv_translate_item_gre_key(match_mask,
6004                                                        match_value, items);
6005                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
6006                         break;
6007                 case RTE_FLOW_ITEM_TYPE_NVGRE:
6008                         flow_dv_translate_item_nvgre(match_mask, match_value,
6009                                                      items, tunnel);
6010                         last_item = MLX5_FLOW_LAYER_GRE;
6011                         break;
6012                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6013                         flow_dv_translate_item_vxlan(match_mask, match_value,
6014                                                      items, tunnel);
6015                         last_item = MLX5_FLOW_LAYER_VXLAN;
6016                         break;
6017                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6018                         flow_dv_translate_item_vxlan(match_mask, match_value,
6019                                                      items, tunnel);
6020                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6021                         break;
6022                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6023                         flow_dv_translate_item_geneve(match_mask, match_value,
6024                                                       items, tunnel);
6025                         last_item = MLX5_FLOW_LAYER_GENEVE;
6026                         break;
6027                 case RTE_FLOW_ITEM_TYPE_MPLS:
6028                         flow_dv_translate_item_mpls(match_mask, match_value,
6029                                                     items, last_item, tunnel);
6030                         last_item = MLX5_FLOW_LAYER_MPLS;
6031                         break;
6032                 case RTE_FLOW_ITEM_TYPE_META:
6033                         flow_dv_translate_item_meta(match_mask, match_value,
6034                                                     items);
6035                         last_item = MLX5_FLOW_ITEM_METADATA;
6036                         break;
6037                 case RTE_FLOW_ITEM_TYPE_ICMP:
6038                         flow_dv_translate_item_icmp(match_mask, match_value,
6039                                                     items, tunnel);
6040                         last_item = MLX5_FLOW_LAYER_ICMP;
6041                         break;
6042                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6043                         flow_dv_translate_item_icmp6(match_mask, match_value,
6044                                                       items, tunnel);
6045                         last_item = MLX5_FLOW_LAYER_ICMP6;
6046                         break;
6047                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6048                         flow_dv_translate_item_tag(match_mask, match_value,
6049                                                    items);
6050                         last_item = MLX5_FLOW_ITEM_TAG;
6051                         break;
6052                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6053                         flow_dv_translate_item_tx_queue(dev, match_mask,
6054                                                         match_value,
6055                                                         items);
6056                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
6057                         break;
6058                 default:
6059                         break;
6060                 }
6061                 item_flags |= last_item;
6062         }
6063         /*
6064          * In case of ingress traffic when E-Switch mode is enabled,
6065          * we have two cases where we need to set the source port manually.
6066          * The first one, is in case of Nic steering rule, and the second is
6067          * E-Switch rule where no port_id item was found. In both cases
6068          * the source port is set according the current port in use.
6069          */
6070         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
6071             (priv->representor || priv->master)) {
6072                 if (flow_dv_translate_item_port_id(dev, match_mask,
6073                                                    match_value, NULL))
6074                         return -rte_errno;
6075         }
6076         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
6077                                          dev_flow->dv.value.buf));
6078         dev_flow->layers = item_flags;
6079         /* Register matcher. */
6080         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
6081                                     matcher.mask.size);
6082         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
6083                                                      matcher.priority);
6084         matcher.egress = attr->egress;
6085         matcher.group = flow->group;
6086         matcher.transfer = attr->transfer;
6087         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
6088                 return -rte_errno;
6089         return 0;
6090 }
6091
6092 /**
6093  * Apply the flow to the NIC.
6094  *
6095  * @param[in] dev
6096  *   Pointer to the Ethernet device structure.
6097  * @param[in, out] flow
6098  *   Pointer to flow structure.
6099  * @param[out] error
6100  *   Pointer to error structure.
6101  *
6102  * @return
6103  *   0 on success, a negative errno value otherwise and rte_errno is set.
6104  */
6105 static int
6106 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
6107               struct rte_flow_error *error)
6108 {
6109         struct mlx5_flow_dv *dv;
6110         struct mlx5_flow *dev_flow;
6111         struct mlx5_priv *priv = dev->data->dev_private;
6112         int n;
6113         int err;
6114
6115         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6116                 dv = &dev_flow->dv;
6117                 n = dv->actions_n;
6118                 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
6119                         if (flow->transfer) {
6120                                 dv->actions[n++] = priv->sh->esw_drop_action;
6121                         } else {
6122                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
6123                                 if (!dv->hrxq) {
6124                                         rte_flow_error_set
6125                                                 (error, errno,
6126                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6127                                                  NULL,
6128                                                  "cannot get drop hash queue");
6129                                         goto error;
6130                                 }
6131                                 dv->actions[n++] = dv->hrxq->action;
6132                         }
6133                 } else if (dev_flow->actions &
6134                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
6135                         struct mlx5_hrxq *hrxq;
6136
6137                         hrxq = mlx5_hrxq_get(dev, flow->key,
6138                                              MLX5_RSS_HASH_KEY_LEN,
6139                                              dv->hash_fields,
6140                                              (*flow->queue),
6141                                              flow->rss.queue_num);
6142                         if (!hrxq) {
6143                                 hrxq = mlx5_hrxq_new
6144                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
6145                                          dv->hash_fields, (*flow->queue),
6146                                          flow->rss.queue_num,
6147                                          !!(dev_flow->layers &
6148                                             MLX5_FLOW_LAYER_TUNNEL));
6149                         }
6150                         if (!hrxq) {
6151                                 rte_flow_error_set
6152                                         (error, rte_errno,
6153                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6154                                          "cannot get hash queue");
6155                                 goto error;
6156                         }
6157                         dv->hrxq = hrxq;
6158                         dv->actions[n++] = dv->hrxq->action;
6159                 }
6160                 dv->flow =
6161                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
6162                                                   (void *)&dv->value, n,
6163                                                   dv->actions);
6164                 if (!dv->flow) {
6165                         rte_flow_error_set(error, errno,
6166                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6167                                            NULL,
6168                                            "hardware refuses to create flow");
6169                         goto error;
6170                 }
6171                 if (priv->vmwa_context &&
6172                     dev_flow->dv.vf_vlan.tag &&
6173                     !dev_flow->dv.vf_vlan.created) {
6174                         /*
6175                          * The rule contains the VLAN pattern.
6176                          * For VF we are going to create VLAN
6177                          * interface to make hypervisor set correct
6178                          * e-Switch vport context.
6179                          */
6180                         mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
6181                 }
6182         }
6183         return 0;
6184 error:
6185         err = rte_errno; /* Save rte_errno before cleanup. */
6186         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6187                 struct mlx5_flow_dv *dv = &dev_flow->dv;
6188                 if (dv->hrxq) {
6189                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6190                                 mlx5_hrxq_drop_release(dev);
6191                         else
6192                                 mlx5_hrxq_release(dev, dv->hrxq);
6193                         dv->hrxq = NULL;
6194                 }
6195                 if (dev_flow->dv.vf_vlan.tag &&
6196                     dev_flow->dv.vf_vlan.created)
6197                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6198         }
6199         rte_errno = err; /* Restore rte_errno. */
6200         return -rte_errno;
6201 }
6202
6203 /**
6204  * Release the flow matcher.
6205  *
6206  * @param dev
6207  *   Pointer to Ethernet device.
6208  * @param flow
6209  *   Pointer to mlx5_flow.
6210  *
6211  * @return
6212  *   1 while a reference on it exists, 0 when freed.
6213  */
6214 static int
6215 flow_dv_matcher_release(struct rte_eth_dev *dev,
6216                         struct mlx5_flow *flow)
6217 {
6218         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
6219         struct mlx5_priv *priv = dev->data->dev_private;
6220         struct mlx5_ibv_shared *sh = priv->sh;
6221         struct mlx5_flow_tbl_resource *tbl;
6222
6223         assert(matcher->matcher_object);
6224         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
6225                 dev->data->port_id, (void *)matcher,
6226                 rte_atomic32_read(&matcher->refcnt));
6227         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
6228                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
6229                            (matcher->matcher_object));
6230                 LIST_REMOVE(matcher, next);
6231                 if (matcher->egress)
6232                         tbl = &sh->tx_tbl[matcher->group];
6233                 else
6234                         tbl = &sh->rx_tbl[matcher->group];
6235                 flow_dv_tbl_resource_release(tbl);
6236                 rte_free(matcher);
6237                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
6238                         dev->data->port_id, (void *)matcher);
6239                 return 0;
6240         }
6241         return 1;
6242 }
6243
6244 /**
6245  * Release an encap/decap resource.
6246  *
6247  * @param flow
6248  *   Pointer to mlx5_flow.
6249  *
6250  * @return
6251  *   1 while a reference on it exists, 0 when freed.
6252  */
6253 static int
6254 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
6255 {
6256         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
6257                                                 flow->dv.encap_decap;
6258
6259         assert(cache_resource->verbs_action);
6260         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
6261                 (void *)cache_resource,
6262                 rte_atomic32_read(&cache_resource->refcnt));
6263         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6264                 claim_zero(mlx5_glue->destroy_flow_action
6265                                 (cache_resource->verbs_action));
6266                 LIST_REMOVE(cache_resource, next);
6267                 rte_free(cache_resource);
6268                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
6269                         (void *)cache_resource);
6270                 return 0;
6271         }
6272         return 1;
6273 }
6274
6275 /**
6276  * Release an jump to table action resource.
6277  *
6278  * @param flow
6279  *   Pointer to mlx5_flow.
6280  *
6281  * @return
6282  *   1 while a reference on it exists, 0 when freed.
6283  */
6284 static int
6285 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
6286 {
6287         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
6288                                                 flow->dv.jump;
6289
6290         assert(cache_resource->action);
6291         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
6292                 (void *)cache_resource,
6293                 rte_atomic32_read(&cache_resource->refcnt));
6294         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6295                 claim_zero(mlx5_glue->destroy_flow_action
6296                                 (cache_resource->action));
6297                 LIST_REMOVE(cache_resource, next);
6298                 flow_dv_tbl_resource_release(cache_resource->tbl);
6299                 rte_free(cache_resource);
6300                 DRV_LOG(DEBUG, "jump table resource %p: removed",
6301                         (void *)cache_resource);
6302                 return 0;
6303         }
6304         return 1;
6305 }
6306
6307 /**
6308  * Release a modify-header resource.
6309  *
6310  * @param flow
6311  *   Pointer to mlx5_flow.
6312  *
6313  * @return
6314  *   1 while a reference on it exists, 0 when freed.
6315  */
6316 static int
6317 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
6318 {
6319         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
6320                                                 flow->dv.modify_hdr;
6321
6322         assert(cache_resource->verbs_action);
6323         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
6324                 (void *)cache_resource,
6325                 rte_atomic32_read(&cache_resource->refcnt));
6326         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6327                 claim_zero(mlx5_glue->destroy_flow_action
6328                                 (cache_resource->verbs_action));
6329                 LIST_REMOVE(cache_resource, next);
6330                 rte_free(cache_resource);
6331                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
6332                         (void *)cache_resource);
6333                 return 0;
6334         }
6335         return 1;
6336 }
6337
6338 /**
6339  * Release port ID action resource.
6340  *
6341  * @param flow
6342  *   Pointer to mlx5_flow.
6343  *
6344  * @return
6345  *   1 while a reference on it exists, 0 when freed.
6346  */
6347 static int
6348 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
6349 {
6350         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
6351                 flow->dv.port_id_action;
6352
6353         assert(cache_resource->action);
6354         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
6355                 (void *)cache_resource,
6356                 rte_atomic32_read(&cache_resource->refcnt));
6357         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6358                 claim_zero(mlx5_glue->destroy_flow_action
6359                                 (cache_resource->action));
6360                 LIST_REMOVE(cache_resource, next);
6361                 rte_free(cache_resource);
6362                 DRV_LOG(DEBUG, "port id action resource %p: removed",
6363                         (void *)cache_resource);
6364                 return 0;
6365         }
6366         return 1;
6367 }
6368
6369 /**
6370  * Release push vlan action resource.
6371  *
6372  * @param flow
6373  *   Pointer to mlx5_flow.
6374  *
6375  * @return
6376  *   1 while a reference on it exists, 0 when freed.
6377  */
6378 static int
6379 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
6380 {
6381         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
6382                 flow->dv.push_vlan_res;
6383
6384         assert(cache_resource->action);
6385         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
6386                 (void *)cache_resource,
6387                 rte_atomic32_read(&cache_resource->refcnt));
6388         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6389                 claim_zero(mlx5_glue->destroy_flow_action
6390                                 (cache_resource->action));
6391                 LIST_REMOVE(cache_resource, next);
6392                 rte_free(cache_resource);
6393                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
6394                         (void *)cache_resource);
6395                 return 0;
6396         }
6397         return 1;
6398 }
6399
6400 /**
6401  * Remove the flow from the NIC but keeps it in memory.
6402  *
6403  * @param[in] dev
6404  *   Pointer to Ethernet device.
6405  * @param[in, out] flow
6406  *   Pointer to flow structure.
6407  */
6408 static void
6409 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6410 {
6411         struct mlx5_flow_dv *dv;
6412         struct mlx5_flow *dev_flow;
6413
6414         if (!flow)
6415                 return;
6416         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6417                 dv = &dev_flow->dv;
6418                 if (dv->flow) {
6419                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
6420                         dv->flow = NULL;
6421                 }
6422                 if (dv->hrxq) {
6423                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6424                                 mlx5_hrxq_drop_release(dev);
6425                         else
6426                                 mlx5_hrxq_release(dev, dv->hrxq);
6427                         dv->hrxq = NULL;
6428                 }
6429                 if (dev_flow->dv.vf_vlan.tag &&
6430                     dev_flow->dv.vf_vlan.created)
6431                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6432         }
6433 }
6434
6435 /**
6436  * Remove the flow from the NIC and the memory.
6437  *
6438  * @param[in] dev
6439  *   Pointer to the Ethernet device structure.
6440  * @param[in, out] flow
6441  *   Pointer to flow structure.
6442  */
6443 static void
6444 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6445 {
6446         struct mlx5_flow *dev_flow;
6447
6448         if (!flow)
6449                 return;
6450         flow_dv_remove(dev, flow);
6451         if (flow->counter) {
6452                 flow_dv_counter_release(dev, flow->counter);
6453                 flow->counter = NULL;
6454         }
6455         if (flow->tag_resource) {
6456                 flow_dv_tag_release(dev, flow->tag_resource);
6457                 flow->tag_resource = NULL;
6458         }
6459         while (!LIST_EMPTY(&flow->dev_flows)) {
6460                 dev_flow = LIST_FIRST(&flow->dev_flows);
6461                 LIST_REMOVE(dev_flow, next);
6462                 if (dev_flow->dv.matcher)
6463                         flow_dv_matcher_release(dev, dev_flow);
6464                 if (dev_flow->dv.encap_decap)
6465                         flow_dv_encap_decap_resource_release(dev_flow);
6466                 if (dev_flow->dv.modify_hdr)
6467                         flow_dv_modify_hdr_resource_release(dev_flow);
6468                 if (dev_flow->dv.jump)
6469                         flow_dv_jump_tbl_resource_release(dev_flow);
6470                 if (dev_flow->dv.port_id_action)
6471                         flow_dv_port_id_action_resource_release(dev_flow);
6472                 if (dev_flow->dv.push_vlan_res)
6473                         flow_dv_push_vlan_action_resource_release(dev_flow);
6474                 rte_free(dev_flow);
6475         }
6476 }
6477
6478 /**
6479  * Query a dv flow  rule for its statistics via devx.
6480  *
6481  * @param[in] dev
6482  *   Pointer to Ethernet device.
6483  * @param[in] flow
6484  *   Pointer to the sub flow.
6485  * @param[out] data
6486  *   data retrieved by the query.
6487  * @param[out] error
6488  *   Perform verbose error reporting if not NULL.
6489  *
6490  * @return
6491  *   0 on success, a negative errno value otherwise and rte_errno is set.
6492  */
6493 static int
6494 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
6495                     void *data, struct rte_flow_error *error)
6496 {
6497         struct mlx5_priv *priv = dev->data->dev_private;
6498         struct rte_flow_query_count *qc = data;
6499
6500         if (!priv->config.devx)
6501                 return rte_flow_error_set(error, ENOTSUP,
6502                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6503                                           NULL,
6504                                           "counters are not supported");
6505         if (flow->counter) {
6506                 uint64_t pkts, bytes;
6507                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
6508                                                &bytes);
6509
6510                 if (err)
6511                         return rte_flow_error_set(error, -err,
6512                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6513                                         NULL, "cannot read counters");
6514                 qc->hits_set = 1;
6515                 qc->bytes_set = 1;
6516                 qc->hits = pkts - flow->counter->hits;
6517                 qc->bytes = bytes - flow->counter->bytes;
6518                 if (qc->reset) {
6519                         flow->counter->hits = pkts;
6520                         flow->counter->bytes = bytes;
6521                 }
6522                 return 0;
6523         }
6524         return rte_flow_error_set(error, EINVAL,
6525                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6526                                   NULL,
6527                                   "counters are not available");
6528 }
6529
6530 /**
6531  * Query a flow.
6532  *
6533  * @see rte_flow_query()
6534  * @see rte_flow_ops
6535  */
6536 static int
6537 flow_dv_query(struct rte_eth_dev *dev,
6538               struct rte_flow *flow __rte_unused,
6539               const struct rte_flow_action *actions __rte_unused,
6540               void *data __rte_unused,
6541               struct rte_flow_error *error __rte_unused)
6542 {
6543         int ret = -EINVAL;
6544
6545         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6546                 switch (actions->type) {
6547                 case RTE_FLOW_ACTION_TYPE_VOID:
6548                         break;
6549                 case RTE_FLOW_ACTION_TYPE_COUNT:
6550                         ret = flow_dv_query_count(dev, flow, data, error);
6551                         break;
6552                 default:
6553                         return rte_flow_error_set(error, ENOTSUP,
6554                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6555                                                   actions,
6556                                                   "action not supported");
6557                 }
6558         }
6559         return ret;
6560 }
6561
6562 /*
6563  * Mutex-protected thunk to flow_dv_translate().
6564  */
6565 static int
6566 flow_d_translate(struct rte_eth_dev *dev,
6567                  struct mlx5_flow *dev_flow,
6568                  const struct rte_flow_attr *attr,
6569                  const struct rte_flow_item items[],
6570                  const struct rte_flow_action actions[],
6571                  struct rte_flow_error *error)
6572 {
6573         int ret;
6574
6575         flow_d_shared_lock(dev);
6576         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
6577         flow_d_shared_unlock(dev);
6578         return ret;
6579 }
6580
6581 /*
6582  * Mutex-protected thunk to flow_dv_apply().
6583  */
6584 static int
6585 flow_d_apply(struct rte_eth_dev *dev,
6586              struct rte_flow *flow,
6587              struct rte_flow_error *error)
6588 {
6589         int ret;
6590
6591         flow_d_shared_lock(dev);
6592         ret = flow_dv_apply(dev, flow, error);
6593         flow_d_shared_unlock(dev);
6594         return ret;
6595 }
6596
6597 /*
6598  * Mutex-protected thunk to flow_dv_remove().
6599  */
6600 static void
6601 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6602 {
6603         flow_d_shared_lock(dev);
6604         flow_dv_remove(dev, flow);
6605         flow_d_shared_unlock(dev);
6606 }
6607
6608 /*
6609  * Mutex-protected thunk to flow_dv_destroy().
6610  */
6611 static void
6612 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6613 {
6614         flow_d_shared_lock(dev);
6615         flow_dv_destroy(dev, flow);
6616         flow_d_shared_unlock(dev);
6617 }
6618
6619 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
6620         .validate = flow_dv_validate,
6621         .prepare = flow_dv_prepare,
6622         .translate = flow_d_translate,
6623         .apply = flow_d_apply,
6624         .remove = flow_d_remove,
6625         .destroy = flow_d_destroy,
6626         .query = flow_dv_query,
6627 };
6628
6629 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */