net/mlx5: fix set VLAN ID/PCP in new header
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29 #include <rte_vxlan.h>
30
31 #include "mlx5.h"
32 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #include "mlx5_prm.h"
36 #include "mlx5_rxtx.h"
37
38 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
39
40 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
41 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
42 #endif
43
44 #ifndef HAVE_MLX5DV_DR_ESWITCH
45 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
46 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
47 #endif
48 #endif
49
50 #ifndef HAVE_MLX5DV_DR
51 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
52 #endif
53
54 /* VLAN header definitions */
55 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
56 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
57 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
58 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
59 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
60
61 union flow_dv_attr {
62         struct {
63                 uint32_t valid:1;
64                 uint32_t ipv4:1;
65                 uint32_t ipv6:1;
66                 uint32_t tcp:1;
67                 uint32_t udp:1;
68                 uint32_t reserved:27;
69         };
70         uint32_t attr;
71 };
72
73 /**
74  * Initialize flow attributes structure according to flow items' types.
75  *
76  * @param[in] item
77  *   Pointer to item specification.
78  * @param[out] attr
79  *   Pointer to flow attributes structure.
80  */
81 static void
82 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
83 {
84         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
85                 switch (item->type) {
86                 case RTE_FLOW_ITEM_TYPE_IPV4:
87                         attr->ipv4 = 1;
88                         break;
89                 case RTE_FLOW_ITEM_TYPE_IPV6:
90                         attr->ipv6 = 1;
91                         break;
92                 case RTE_FLOW_ITEM_TYPE_UDP:
93                         attr->udp = 1;
94                         break;
95                 case RTE_FLOW_ITEM_TYPE_TCP:
96                         attr->tcp = 1;
97                         break;
98                 default:
99                         break;
100                 }
101         }
102         attr->valid = 1;
103 }
104
105 struct field_modify_info {
106         uint32_t size; /* Size of field in protocol header, in bytes. */
107         uint32_t offset; /* Offset of field in protocol header, in bytes. */
108         enum mlx5_modification_field id;
109 };
110
111 struct field_modify_info modify_eth[] = {
112         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
113         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
114         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
115         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
116         {0, 0, 0},
117 };
118
119 struct field_modify_info modify_vlan_out_first_vid[] = {
120         /* Size in bits !!! */
121         {12, 0, MLX5_MODI_OUT_FIRST_VID},
122         {0, 0, 0},
123 };
124
125 struct field_modify_info modify_ipv4[] = {
126         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
127         {4, 12, MLX5_MODI_OUT_SIPV4},
128         {4, 16, MLX5_MODI_OUT_DIPV4},
129         {0, 0, 0},
130 };
131
132 struct field_modify_info modify_ipv6[] = {
133         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
134         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
135         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
136         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
137         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
138         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
139         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
140         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
141         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
142         {0, 0, 0},
143 };
144
145 struct field_modify_info modify_udp[] = {
146         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
147         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
148         {0, 0, 0},
149 };
150
151 struct field_modify_info modify_tcp[] = {
152         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
153         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
154         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
155         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
156         {0, 0, 0},
157 };
158
159 static void
160 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
161                           uint8_t next_protocol, uint64_t *item_flags,
162                           int *tunnel)
163 {
164         assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
165                item->type == RTE_FLOW_ITEM_TYPE_IPV6);
166         if (next_protocol == IPPROTO_IPIP) {
167                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
168                 *tunnel = 1;
169         }
170         if (next_protocol == IPPROTO_IPV6) {
171                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
172                 *tunnel = 1;
173         }
174 }
175
176 /**
177  * Acquire the synchronizing object to protect multithreaded access
178  * to shared dv context. Lock occurs only if context is actually
179  * shared, i.e. we have multiport IB device and representors are
180  * created.
181  *
182  * @param[in] dev
183  *   Pointer to the rte_eth_dev structure.
184  */
185 static void
186 flow_d_shared_lock(struct rte_eth_dev *dev)
187 {
188         struct mlx5_priv *priv = dev->data->dev_private;
189         struct mlx5_ibv_shared *sh = priv->sh;
190
191         if (sh->dv_refcnt > 1) {
192                 int ret;
193
194                 ret = pthread_mutex_lock(&sh->dv_mutex);
195                 assert(!ret);
196                 (void)ret;
197         }
198 }
199
200 static void
201 flow_d_shared_unlock(struct rte_eth_dev *dev)
202 {
203         struct mlx5_priv *priv = dev->data->dev_private;
204         struct mlx5_ibv_shared *sh = priv->sh;
205
206         if (sh->dv_refcnt > 1) {
207                 int ret;
208
209                 ret = pthread_mutex_unlock(&sh->dv_mutex);
210                 assert(!ret);
211                 (void)ret;
212         }
213 }
214
215 /* Update VLAN's VID/PCP based on input rte_flow_action.
216  *
217  * @param[in] action
218  *   Pointer to struct rte_flow_action.
219  * @param[out] vlan
220  *   Pointer to struct rte_vlan_hdr.
221  */
222 static void
223 mlx5_update_vlan_vid_pcp(const struct rte_flow_action *action,
224                          struct rte_vlan_hdr *vlan)
225 {
226         uint16_t vlan_tci;
227         if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP) {
228                 vlan_tci =
229                     ((const struct rte_flow_action_of_set_vlan_pcp *)
230                                                action->conf)->vlan_pcp;
231                 vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
232                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
233                 vlan->vlan_tci |= vlan_tci;
234         } else if (action->type == RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) {
235                 vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
236                 vlan->vlan_tci |= rte_be_to_cpu_16
237                     (((const struct rte_flow_action_of_set_vlan_vid *)
238                                              action->conf)->vlan_vid);
239         }
240 }
241
242 /**
243  * Convert modify-header action to DV specification.
244  *
245  * @param[in] item
246  *   Pointer to item specification.
247  * @param[in] field
248  *   Pointer to field modification information.
249  * @param[in,out] resource
250  *   Pointer to the modify-header resource.
251  * @param[in] type
252  *   Type of modification.
253  * @param[out] error
254  *   Pointer to the error structure.
255  *
256  * @return
257  *   0 on success, a negative errno value otherwise and rte_errno is set.
258  */
259 static int
260 flow_dv_convert_modify_action(struct rte_flow_item *item,
261                               struct field_modify_info *field,
262                               struct mlx5_flow_dv_modify_hdr_resource *resource,
263                               uint32_t type,
264                               struct rte_flow_error *error)
265 {
266         uint32_t i = resource->actions_num;
267         struct mlx5_modification_cmd *actions = resource->actions;
268         const uint8_t *spec = item->spec;
269         const uint8_t *mask = item->mask;
270         uint32_t set;
271
272         while (field->size) {
273                 set = 0;
274                 /* Generate modify command for each mask segment. */
275                 memcpy(&set, &mask[field->offset], field->size);
276                 if (set) {
277                         if (i >= MLX5_MODIFY_NUM)
278                                 return rte_flow_error_set(error, EINVAL,
279                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
280                                          "too many items to modify");
281                         actions[i].action_type = type;
282                         actions[i].field = field->id;
283                         actions[i].length = field->size ==
284                                         4 ? 0 : field->size * 8;
285                         rte_memcpy(&actions[i].data[4 - field->size],
286                                    &spec[field->offset], field->size);
287                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
288                         ++i;
289                 }
290                 if (resource->actions_num != i)
291                         resource->actions_num = i;
292                 field++;
293         }
294         if (!resource->actions_num)
295                 return rte_flow_error_set(error, EINVAL,
296                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
297                                           "invalid modification flow item");
298         return 0;
299 }
300
301 /**
302  * Convert modify-header set IPv4 address action to DV specification.
303  *
304  * @param[in,out] resource
305  *   Pointer to the modify-header resource.
306  * @param[in] action
307  *   Pointer to action specification.
308  * @param[out] error
309  *   Pointer to the error structure.
310  *
311  * @return
312  *   0 on success, a negative errno value otherwise and rte_errno is set.
313  */
314 static int
315 flow_dv_convert_action_modify_ipv4
316                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
317                          const struct rte_flow_action *action,
318                          struct rte_flow_error *error)
319 {
320         const struct rte_flow_action_set_ipv4 *conf =
321                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
322         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
323         struct rte_flow_item_ipv4 ipv4;
324         struct rte_flow_item_ipv4 ipv4_mask;
325
326         memset(&ipv4, 0, sizeof(ipv4));
327         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
328         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
329                 ipv4.hdr.src_addr = conf->ipv4_addr;
330                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
331         } else {
332                 ipv4.hdr.dst_addr = conf->ipv4_addr;
333                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
334         }
335         item.spec = &ipv4;
336         item.mask = &ipv4_mask;
337         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
338                                              MLX5_MODIFICATION_TYPE_SET, error);
339 }
340
341 /**
342  * Convert modify-header set IPv6 address action to DV specification.
343  *
344  * @param[in,out] resource
345  *   Pointer to the modify-header resource.
346  * @param[in] action
347  *   Pointer to action specification.
348  * @param[out] error
349  *   Pointer to the error structure.
350  *
351  * @return
352  *   0 on success, a negative errno value otherwise and rte_errno is set.
353  */
354 static int
355 flow_dv_convert_action_modify_ipv6
356                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
357                          const struct rte_flow_action *action,
358                          struct rte_flow_error *error)
359 {
360         const struct rte_flow_action_set_ipv6 *conf =
361                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
362         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
363         struct rte_flow_item_ipv6 ipv6;
364         struct rte_flow_item_ipv6 ipv6_mask;
365
366         memset(&ipv6, 0, sizeof(ipv6));
367         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
368         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
369                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
370                        sizeof(ipv6.hdr.src_addr));
371                 memcpy(&ipv6_mask.hdr.src_addr,
372                        &rte_flow_item_ipv6_mask.hdr.src_addr,
373                        sizeof(ipv6.hdr.src_addr));
374         } else {
375                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
376                        sizeof(ipv6.hdr.dst_addr));
377                 memcpy(&ipv6_mask.hdr.dst_addr,
378                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
379                        sizeof(ipv6.hdr.dst_addr));
380         }
381         item.spec = &ipv6;
382         item.mask = &ipv6_mask;
383         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
384                                              MLX5_MODIFICATION_TYPE_SET, error);
385 }
386
387 /**
388  * Convert modify-header set MAC address action to DV specification.
389  *
390  * @param[in,out] resource
391  *   Pointer to the modify-header resource.
392  * @param[in] action
393  *   Pointer to action specification.
394  * @param[out] error
395  *   Pointer to the error structure.
396  *
397  * @return
398  *   0 on success, a negative errno value otherwise and rte_errno is set.
399  */
400 static int
401 flow_dv_convert_action_modify_mac
402                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
403                          const struct rte_flow_action *action,
404                          struct rte_flow_error *error)
405 {
406         const struct rte_flow_action_set_mac *conf =
407                 (const struct rte_flow_action_set_mac *)(action->conf);
408         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
409         struct rte_flow_item_eth eth;
410         struct rte_flow_item_eth eth_mask;
411
412         memset(&eth, 0, sizeof(eth));
413         memset(&eth_mask, 0, sizeof(eth_mask));
414         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
415                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
416                        sizeof(eth.src.addr_bytes));
417                 memcpy(&eth_mask.src.addr_bytes,
418                        &rte_flow_item_eth_mask.src.addr_bytes,
419                        sizeof(eth_mask.src.addr_bytes));
420         } else {
421                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
422                        sizeof(eth.dst.addr_bytes));
423                 memcpy(&eth_mask.dst.addr_bytes,
424                        &rte_flow_item_eth_mask.dst.addr_bytes,
425                        sizeof(eth_mask.dst.addr_bytes));
426         }
427         item.spec = &eth;
428         item.mask = &eth_mask;
429         return flow_dv_convert_modify_action(&item, modify_eth, resource,
430                                              MLX5_MODIFICATION_TYPE_SET, error);
431 }
432
433 /**
434  * Convert modify-header set VLAN VID action to DV specification.
435  *
436  * @param[in,out] resource
437  *   Pointer to the modify-header resource.
438  * @param[in] action
439  *   Pointer to action specification.
440  * @param[out] error
441  *   Pointer to the error structure.
442  *
443  * @return
444  *   0 on success, a negative errno value otherwise and rte_errno is set.
445  */
446 static int
447 flow_dv_convert_action_modify_vlan_vid
448                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
449                          const struct rte_flow_action *action,
450                          struct rte_flow_error *error)
451 {
452         const struct rte_flow_action_of_set_vlan_vid *conf =
453                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
454         int i = resource->actions_num;
455         struct mlx5_modification_cmd *actions = &resource->actions[i];
456         struct field_modify_info *field = modify_vlan_out_first_vid;
457
458         if (i >= MLX5_MODIFY_NUM)
459                 return rte_flow_error_set(error, EINVAL,
460                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
461                          "too many items to modify");
462         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
463         actions[i].field = field->id;
464         actions[i].length = field->size;
465         actions[i].offset = field->offset;
466         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
467         actions[i].data1 = conf->vlan_vid;
468         actions[i].data1 = actions[i].data1 << 16;
469         resource->actions_num = ++i;
470         return 0;
471 }
472
473 /**
474  * Convert modify-header set TP action to DV specification.
475  *
476  * @param[in,out] resource
477  *   Pointer to the modify-header resource.
478  * @param[in] action
479  *   Pointer to action specification.
480  * @param[in] items
481  *   Pointer to rte_flow_item objects list.
482  * @param[in] attr
483  *   Pointer to flow attributes structure.
484  * @param[out] error
485  *   Pointer to the error structure.
486  *
487  * @return
488  *   0 on success, a negative errno value otherwise and rte_errno is set.
489  */
490 static int
491 flow_dv_convert_action_modify_tp
492                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
493                          const struct rte_flow_action *action,
494                          const struct rte_flow_item *items,
495                          union flow_dv_attr *attr,
496                          struct rte_flow_error *error)
497 {
498         const struct rte_flow_action_set_tp *conf =
499                 (const struct rte_flow_action_set_tp *)(action->conf);
500         struct rte_flow_item item;
501         struct rte_flow_item_udp udp;
502         struct rte_flow_item_udp udp_mask;
503         struct rte_flow_item_tcp tcp;
504         struct rte_flow_item_tcp tcp_mask;
505         struct field_modify_info *field;
506
507         if (!attr->valid)
508                 flow_dv_attr_init(items, attr);
509         if (attr->udp) {
510                 memset(&udp, 0, sizeof(udp));
511                 memset(&udp_mask, 0, sizeof(udp_mask));
512                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
513                         udp.hdr.src_port = conf->port;
514                         udp_mask.hdr.src_port =
515                                         rte_flow_item_udp_mask.hdr.src_port;
516                 } else {
517                         udp.hdr.dst_port = conf->port;
518                         udp_mask.hdr.dst_port =
519                                         rte_flow_item_udp_mask.hdr.dst_port;
520                 }
521                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
522                 item.spec = &udp;
523                 item.mask = &udp_mask;
524                 field = modify_udp;
525         }
526         if (attr->tcp) {
527                 memset(&tcp, 0, sizeof(tcp));
528                 memset(&tcp_mask, 0, sizeof(tcp_mask));
529                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
530                         tcp.hdr.src_port = conf->port;
531                         tcp_mask.hdr.src_port =
532                                         rte_flow_item_tcp_mask.hdr.src_port;
533                 } else {
534                         tcp.hdr.dst_port = conf->port;
535                         tcp_mask.hdr.dst_port =
536                                         rte_flow_item_tcp_mask.hdr.dst_port;
537                 }
538                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
539                 item.spec = &tcp;
540                 item.mask = &tcp_mask;
541                 field = modify_tcp;
542         }
543         return flow_dv_convert_modify_action(&item, field, resource,
544                                              MLX5_MODIFICATION_TYPE_SET, error);
545 }
546
547 /**
548  * Convert modify-header set TTL action to DV specification.
549  *
550  * @param[in,out] resource
551  *   Pointer to the modify-header resource.
552  * @param[in] action
553  *   Pointer to action specification.
554  * @param[in] items
555  *   Pointer to rte_flow_item objects list.
556  * @param[in] attr
557  *   Pointer to flow attributes structure.
558  * @param[out] error
559  *   Pointer to the error structure.
560  *
561  * @return
562  *   0 on success, a negative errno value otherwise and rte_errno is set.
563  */
564 static int
565 flow_dv_convert_action_modify_ttl
566                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
567                          const struct rte_flow_action *action,
568                          const struct rte_flow_item *items,
569                          union flow_dv_attr *attr,
570                          struct rte_flow_error *error)
571 {
572         const struct rte_flow_action_set_ttl *conf =
573                 (const struct rte_flow_action_set_ttl *)(action->conf);
574         struct rte_flow_item item;
575         struct rte_flow_item_ipv4 ipv4;
576         struct rte_flow_item_ipv4 ipv4_mask;
577         struct rte_flow_item_ipv6 ipv6;
578         struct rte_flow_item_ipv6 ipv6_mask;
579         struct field_modify_info *field;
580
581         if (!attr->valid)
582                 flow_dv_attr_init(items, attr);
583         if (attr->ipv4) {
584                 memset(&ipv4, 0, sizeof(ipv4));
585                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
586                 ipv4.hdr.time_to_live = conf->ttl_value;
587                 ipv4_mask.hdr.time_to_live = 0xFF;
588                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
589                 item.spec = &ipv4;
590                 item.mask = &ipv4_mask;
591                 field = modify_ipv4;
592         }
593         if (attr->ipv6) {
594                 memset(&ipv6, 0, sizeof(ipv6));
595                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
596                 ipv6.hdr.hop_limits = conf->ttl_value;
597                 ipv6_mask.hdr.hop_limits = 0xFF;
598                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
599                 item.spec = &ipv6;
600                 item.mask = &ipv6_mask;
601                 field = modify_ipv6;
602         }
603         return flow_dv_convert_modify_action(&item, field, resource,
604                                              MLX5_MODIFICATION_TYPE_SET, error);
605 }
606
607 /**
608  * Convert modify-header decrement TTL action to DV specification.
609  *
610  * @param[in,out] resource
611  *   Pointer to the modify-header resource.
612  * @param[in] action
613  *   Pointer to action specification.
614  * @param[in] items
615  *   Pointer to rte_flow_item objects list.
616  * @param[in] attr
617  *   Pointer to flow attributes structure.
618  * @param[out] error
619  *   Pointer to the error structure.
620  *
621  * @return
622  *   0 on success, a negative errno value otherwise and rte_errno is set.
623  */
624 static int
625 flow_dv_convert_action_modify_dec_ttl
626                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
627                          const struct rte_flow_item *items,
628                          union flow_dv_attr *attr,
629                          struct rte_flow_error *error)
630 {
631         struct rte_flow_item item;
632         struct rte_flow_item_ipv4 ipv4;
633         struct rte_flow_item_ipv4 ipv4_mask;
634         struct rte_flow_item_ipv6 ipv6;
635         struct rte_flow_item_ipv6 ipv6_mask;
636         struct field_modify_info *field;
637
638         if (!attr->valid)
639                 flow_dv_attr_init(items, attr);
640         if (attr->ipv4) {
641                 memset(&ipv4, 0, sizeof(ipv4));
642                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
643                 ipv4.hdr.time_to_live = 0xFF;
644                 ipv4_mask.hdr.time_to_live = 0xFF;
645                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
646                 item.spec = &ipv4;
647                 item.mask = &ipv4_mask;
648                 field = modify_ipv4;
649         }
650         if (attr->ipv6) {
651                 memset(&ipv6, 0, sizeof(ipv6));
652                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
653                 ipv6.hdr.hop_limits = 0xFF;
654                 ipv6_mask.hdr.hop_limits = 0xFF;
655                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
656                 item.spec = &ipv6;
657                 item.mask = &ipv6_mask;
658                 field = modify_ipv6;
659         }
660         return flow_dv_convert_modify_action(&item, field, resource,
661                                              MLX5_MODIFICATION_TYPE_ADD, error);
662 }
663
664 /**
665  * Convert modify-header increment/decrement TCP Sequence number
666  * to DV specification.
667  *
668  * @param[in,out] resource
669  *   Pointer to the modify-header resource.
670  * @param[in] action
671  *   Pointer to action specification.
672  * @param[out] error
673  *   Pointer to the error structure.
674  *
675  * @return
676  *   0 on success, a negative errno value otherwise and rte_errno is set.
677  */
678 static int
679 flow_dv_convert_action_modify_tcp_seq
680                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
681                          const struct rte_flow_action *action,
682                          struct rte_flow_error *error)
683 {
684         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
685         uint64_t value = rte_be_to_cpu_32(*conf);
686         struct rte_flow_item item;
687         struct rte_flow_item_tcp tcp;
688         struct rte_flow_item_tcp tcp_mask;
689
690         memset(&tcp, 0, sizeof(tcp));
691         memset(&tcp_mask, 0, sizeof(tcp_mask));
692         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
693                 /*
694                  * The HW has no decrement operation, only increment operation.
695                  * To simulate decrement X from Y using increment operation
696                  * we need to add UINT32_MAX X times to Y.
697                  * Each adding of UINT32_MAX decrements Y by 1.
698                  */
699                 value *= UINT32_MAX;
700         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
701         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
702         item.type = RTE_FLOW_ITEM_TYPE_TCP;
703         item.spec = &tcp;
704         item.mask = &tcp_mask;
705         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
706                                              MLX5_MODIFICATION_TYPE_ADD, error);
707 }
708
709 /**
710  * Convert modify-header increment/decrement TCP Acknowledgment number
711  * to DV specification.
712  *
713  * @param[in,out] resource
714  *   Pointer to the modify-header resource.
715  * @param[in] action
716  *   Pointer to action specification.
717  * @param[out] error
718  *   Pointer to the error structure.
719  *
720  * @return
721  *   0 on success, a negative errno value otherwise and rte_errno is set.
722  */
723 static int
724 flow_dv_convert_action_modify_tcp_ack
725                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
726                          const struct rte_flow_action *action,
727                          struct rte_flow_error *error)
728 {
729         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
730         uint64_t value = rte_be_to_cpu_32(*conf);
731         struct rte_flow_item item;
732         struct rte_flow_item_tcp tcp;
733         struct rte_flow_item_tcp tcp_mask;
734
735         memset(&tcp, 0, sizeof(tcp));
736         memset(&tcp_mask, 0, sizeof(tcp_mask));
737         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
738                 /*
739                  * The HW has no decrement operation, only increment operation.
740                  * To simulate decrement X from Y using increment operation
741                  * we need to add UINT32_MAX X times to Y.
742                  * Each adding of UINT32_MAX decrements Y by 1.
743                  */
744                 value *= UINT32_MAX;
745         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
746         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
747         item.type = RTE_FLOW_ITEM_TYPE_TCP;
748         item.spec = &tcp;
749         item.mask = &tcp_mask;
750         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
751                                              MLX5_MODIFICATION_TYPE_ADD, error);
752 }
753
754 static enum mlx5_modification_field reg_to_field[] = {
755         [REG_A] = MLX5_MODI_META_DATA_REG_A,
756         [REG_B] = MLX5_MODI_META_DATA_REG_B,
757         [REG_C_0] = MLX5_MODI_META_REG_C_0,
758         [REG_C_1] = MLX5_MODI_META_REG_C_1,
759         [REG_C_2] = MLX5_MODI_META_REG_C_2,
760         [REG_C_3] = MLX5_MODI_META_REG_C_3,
761         [REG_C_4] = MLX5_MODI_META_REG_C_4,
762         [REG_C_5] = MLX5_MODI_META_REG_C_5,
763         [REG_C_6] = MLX5_MODI_META_REG_C_6,
764         [REG_C_7] = MLX5_MODI_META_REG_C_7,
765 };
766
767 /**
768  * Convert register set to DV specification.
769  *
770  * @param[in,out] resource
771  *   Pointer to the modify-header resource.
772  * @param[in] action
773  *   Pointer to action specification.
774  * @param[out] error
775  *   Pointer to the error structure.
776  *
777  * @return
778  *   0 on success, a negative errno value otherwise and rte_errno is set.
779  */
780 static int
781 flow_dv_convert_action_set_reg
782                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
783                          const struct rte_flow_action *action,
784                          struct rte_flow_error *error)
785 {
786         const struct mlx5_rte_flow_action_set_tag *conf = (action->conf);
787         struct mlx5_modification_cmd *actions = resource->actions;
788         uint32_t i = resource->actions_num;
789
790         if (i >= MLX5_MODIFY_NUM)
791                 return rte_flow_error_set(error, EINVAL,
792                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
793                                           "too many items to modify");
794         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
795         actions[i].field = reg_to_field[conf->id];
796         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
797         actions[i].data1 = conf->data;
798         ++i;
799         resource->actions_num = i;
800         if (!resource->actions_num)
801                 return rte_flow_error_set(error, EINVAL,
802                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
803                                           "invalid modification flow item");
804         return 0;
805 }
806
807 /**
808  * Validate META item.
809  *
810  * @param[in] dev
811  *   Pointer to the rte_eth_dev structure.
812  * @param[in] item
813  *   Item specification.
814  * @param[in] attr
815  *   Attributes of flow that includes this item.
816  * @param[out] error
817  *   Pointer to error structure.
818  *
819  * @return
820  *   0 on success, a negative errno value otherwise and rte_errno is set.
821  */
822 static int
823 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
824                            const struct rte_flow_item *item,
825                            const struct rte_flow_attr *attr,
826                            struct rte_flow_error *error)
827 {
828         const struct rte_flow_item_meta *spec = item->spec;
829         const struct rte_flow_item_meta *mask = item->mask;
830         const struct rte_flow_item_meta nic_mask = {
831                 .data = RTE_BE32(UINT32_MAX)
832         };
833         int ret;
834         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
835
836         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
837                 return rte_flow_error_set(error, EPERM,
838                                           RTE_FLOW_ERROR_TYPE_ITEM,
839                                           NULL,
840                                           "match on metadata offload "
841                                           "configuration is off for this port");
842         if (!spec)
843                 return rte_flow_error_set(error, EINVAL,
844                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
845                                           item->spec,
846                                           "data cannot be empty");
847         if (!spec->data)
848                 return rte_flow_error_set(error, EINVAL,
849                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
850                                           NULL,
851                                           "data cannot be zero");
852         if (!mask)
853                 mask = &rte_flow_item_meta_mask;
854         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
855                                         (const uint8_t *)&nic_mask,
856                                         sizeof(struct rte_flow_item_meta),
857                                         error);
858         if (ret < 0)
859                 return ret;
860         if (attr->ingress)
861                 return rte_flow_error_set(error, ENOTSUP,
862                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
863                                           NULL,
864                                           "pattern not supported for ingress");
865         return 0;
866 }
867
868 /**
869  * Validate vport item.
870  *
871  * @param[in] dev
872  *   Pointer to the rte_eth_dev structure.
873  * @param[in] item
874  *   Item specification.
875  * @param[in] attr
876  *   Attributes of flow that includes this item.
877  * @param[in] item_flags
878  *   Bit-fields that holds the items detected until now.
879  * @param[out] error
880  *   Pointer to error structure.
881  *
882  * @return
883  *   0 on success, a negative errno value otherwise and rte_errno is set.
884  */
885 static int
886 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
887                               const struct rte_flow_item *item,
888                               const struct rte_flow_attr *attr,
889                               uint64_t item_flags,
890                               struct rte_flow_error *error)
891 {
892         const struct rte_flow_item_port_id *spec = item->spec;
893         const struct rte_flow_item_port_id *mask = item->mask;
894         const struct rte_flow_item_port_id switch_mask = {
895                         .id = 0xffffffff,
896         };
897         struct mlx5_priv *esw_priv;
898         struct mlx5_priv *dev_priv;
899         int ret;
900
901         if (!attr->transfer)
902                 return rte_flow_error_set(error, EINVAL,
903                                           RTE_FLOW_ERROR_TYPE_ITEM,
904                                           NULL,
905                                           "match on port id is valid only"
906                                           " when transfer flag is enabled");
907         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
908                 return rte_flow_error_set(error, ENOTSUP,
909                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
910                                           "multiple source ports are not"
911                                           " supported");
912         if (!mask)
913                 mask = &switch_mask;
914         if (mask->id != 0xffffffff)
915                 return rte_flow_error_set(error, ENOTSUP,
916                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
917                                            mask,
918                                            "no support for partial mask on"
919                                            " \"id\" field");
920         ret = mlx5_flow_item_acceptable
921                                 (item, (const uint8_t *)mask,
922                                  (const uint8_t *)&rte_flow_item_port_id_mask,
923                                  sizeof(struct rte_flow_item_port_id),
924                                  error);
925         if (ret)
926                 return ret;
927         if (!spec)
928                 return 0;
929         esw_priv = mlx5_port_to_eswitch_info(spec->id);
930         if (!esw_priv)
931                 return rte_flow_error_set(error, rte_errno,
932                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
933                                           "failed to obtain E-Switch info for"
934                                           " port");
935         dev_priv = mlx5_dev_to_eswitch_info(dev);
936         if (!dev_priv)
937                 return rte_flow_error_set(error, rte_errno,
938                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
939                                           NULL,
940                                           "failed to obtain E-Switch info");
941         if (esw_priv->domain_id != dev_priv->domain_id)
942                 return rte_flow_error_set(error, EINVAL,
943                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
944                                           "cannot match on a port from a"
945                                           " different E-Switch");
946         return 0;
947 }
948
949 /**
950  * Validate the pop VLAN action.
951  *
952  * @param[in] dev
953  *   Pointer to the rte_eth_dev structure.
954  * @param[in] action_flags
955  *   Holds the actions detected until now.
956  * @param[in] action
957  *   Pointer to the pop vlan action.
958  * @param[in] item_flags
959  *   The items found in this flow rule.
960  * @param[in] attr
961  *   Pointer to flow attributes.
962  * @param[out] error
963  *   Pointer to error structure.
964  *
965  * @return
966  *   0 on success, a negative errno value otherwise and rte_errno is set.
967  */
968 static int
969 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
970                                  uint64_t action_flags,
971                                  const struct rte_flow_action *action,
972                                  uint64_t item_flags,
973                                  const struct rte_flow_attr *attr,
974                                  struct rte_flow_error *error)
975 {
976         struct mlx5_priv *priv = dev->data->dev_private;
977
978         (void)action;
979         (void)attr;
980         if (!priv->sh->pop_vlan_action)
981                 return rte_flow_error_set(error, ENOTSUP,
982                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
983                                           NULL,
984                                           "pop vlan action is not supported");
985         /*
986          * Check for inconsistencies:
987          *  fail strip_vlan in a flow that matches packets without VLAN tags.
988          *  fail strip_vlan in a flow that matches packets without explicitly a
989          *  matching on VLAN tag ?
990          */
991         if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
992                 return rte_flow_error_set(error, ENOTSUP,
993                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
994                                           NULL,
995                                           "no support for multiple vlan pop "
996                                           "actions");
997         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
998                 return rte_flow_error_set(error, ENOTSUP,
999                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1000                                           NULL,
1001                                           "cannot pop vlan without a "
1002                                           "match on (outer) vlan in the flow");
1003         return 0;
1004 }
1005
1006 /**
1007  * Get VLAN default info from vlan match info.
1008  *
1009  * @param[in] dev
1010  *   Pointer to the rte_eth_dev structure.
1011  * @param[in] item
1012  *   the list of item specifications.
1013  * @param[out] vlan
1014  *   pointer VLAN info to fill to.
1015  * @param[out] error
1016  *   Pointer to error structure.
1017  *
1018  * @return
1019  *   0 on success, a negative errno value otherwise and rte_errno is set.
1020  */
1021 static void
1022 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
1023                                   struct rte_vlan_hdr *vlan)
1024 {
1025         const struct rte_flow_item_vlan nic_mask = {
1026                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
1027                                 MLX5DV_FLOW_VLAN_VID_MASK),
1028                 .inner_type = RTE_BE16(0xffff),
1029         };
1030
1031         if (items == NULL)
1032                 return;
1033         for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
1034                items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
1035                 ;
1036         if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1037                 const struct rte_flow_item_vlan *vlan_m = items->mask;
1038                 const struct rte_flow_item_vlan *vlan_v = items->spec;
1039
1040                 if (!vlan_m)
1041                         vlan_m = &nic_mask;
1042                 /* Only full match values are accepted */
1043                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
1044                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
1045                         vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
1046                         vlan->vlan_tci |=
1047                                 rte_be_to_cpu_16(vlan_v->tci &
1048                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
1049                 }
1050                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
1051                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
1052                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
1053                         vlan->vlan_tci |=
1054                                 rte_be_to_cpu_16(vlan_v->tci &
1055                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
1056                 }
1057                 if (vlan_m->inner_type == nic_mask.inner_type)
1058                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
1059                                                            vlan_m->inner_type);
1060         }
1061 }
1062
1063 /**
1064  * Validate the push VLAN action.
1065  *
1066  * @param[in] action_flags
1067  *   Holds the actions detected until now.
1068  * @param[in] action
1069  *   Pointer to the encap action.
1070  * @param[in] attr
1071  *   Pointer to flow attributes
1072  * @param[out] error
1073  *   Pointer to error structure.
1074  *
1075  * @return
1076  *   0 on success, a negative errno value otherwise and rte_errno is set.
1077  */
1078 static int
1079 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1080                                   uint64_t item_flags,
1081                                   const struct rte_flow_action *action,
1082                                   const struct rte_flow_attr *attr,
1083                                   struct rte_flow_error *error)
1084 {
1085         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1086
1087         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1088             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1089                 return rte_flow_error_set(error, EINVAL,
1090                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1091                                           "invalid vlan ethertype");
1092         if (action_flags &
1093                 (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1094                 return rte_flow_error_set(error, ENOTSUP,
1095                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1096                                           "no support for multiple VLAN "
1097                                           "actions");
1098         if (!mlx5_flow_find_action
1099                         (action + 1, RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID) &&
1100             !(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1101                 return rte_flow_error_set(error, ENOTSUP,
1102                                 RTE_FLOW_ERROR_TYPE_ACTION, action,
1103                                 "push VLAN needs to match on VLAN in order to "
1104                                 "get VLAN VID information because there is "
1105                                 "no followed set VLAN VID action");
1106         (void)attr;
1107         return 0;
1108 }
1109
1110 /**
1111  * Validate the set VLAN PCP.
1112  *
1113  * @param[in] action_flags
1114  *   Holds the actions detected until now.
1115  * @param[in] actions
1116  *   Pointer to the list of actions remaining in the flow rule.
1117  * @param[in] attr
1118  *   Pointer to flow attributes
1119  * @param[out] error
1120  *   Pointer to error structure.
1121  *
1122  * @return
1123  *   0 on success, a negative errno value otherwise and rte_errno is set.
1124  */
1125 static int
1126 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1127                                      const struct rte_flow_action actions[],
1128                                      struct rte_flow_error *error)
1129 {
1130         const struct rte_flow_action *action = actions;
1131         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1132
1133         if (conf->vlan_pcp > 7)
1134                 return rte_flow_error_set(error, EINVAL,
1135                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1136                                           "VLAN PCP value is too big");
1137         if (!(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1138                 return rte_flow_error_set(error, ENOTSUP,
1139                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1140                                           "set VLAN PCP action must follow "
1141                                           "the push VLAN action");
1142         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_PCP)
1143                 return rte_flow_error_set(error, ENOTSUP,
1144                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1145                                           "Multiple VLAN PCP modification are "
1146                                           "not supported");
1147         return 0;
1148 }
1149
1150 /**
1151  * Validate the set VLAN VID.
1152  *
1153  * @param[in] item_flags
1154  *   Holds the items detected in this rule.
1155  * @param[in] actions
1156  *   Pointer to the list of actions remaining in the flow rule.
1157  * @param[in] attr
1158  *   Pointer to flow attributes
1159  * @param[out] error
1160  *   Pointer to error structure.
1161  *
1162  * @return
1163  *   0 on success, a negative errno value otherwise and rte_errno is set.
1164  */
1165 static int
1166 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1167                                      uint64_t action_flags,
1168                                      const struct rte_flow_action actions[],
1169                                      struct rte_flow_error *error)
1170 {
1171         const struct rte_flow_action *action = actions;
1172         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1173
1174         if (conf->vlan_vid > RTE_BE16(0xFFE))
1175                 return rte_flow_error_set(error, EINVAL,
1176                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1177                                           "VLAN VID value is too big");
1178         /* there is an of_push_vlan action before us */
1179         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
1180                 if (mlx5_flow_find_action(actions + 1,
1181                                           RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1182                         return rte_flow_error_set(error, ENOTSUP,
1183                                         RTE_FLOW_ERROR_TYPE_ACTION, action,
1184                                         "Multiple VLAN VID modifications are "
1185                                         "not supported");
1186                 else
1187                         return 0;
1188         }
1189
1190         /*
1191          * Action is on an existing VLAN header:
1192          *    Need to verify this is a single modify CID action.
1193          *   Rule mast include a match on outer VLAN.
1194          */
1195         if (action_flags & MLX5_FLOW_ACTION_OF_SET_VLAN_VID)
1196                 return rte_flow_error_set(error, ENOTSUP,
1197                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1198                                           "Multiple VLAN VID modifications are "
1199                                           "not supported");
1200         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1201                 return rte_flow_error_set(error, EINVAL,
1202                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1203                                           "match on VLAN is required in order "
1204                                           "to set VLAN VID");
1205         return 0;
1206 }
1207
1208 /**
1209  * Validate count action.
1210  *
1211  * @param[in] dev
1212  *   device otr.
1213  * @param[out] error
1214  *   Pointer to error structure.
1215  *
1216  * @return
1217  *   0 on success, a negative errno value otherwise and rte_errno is set.
1218  */
1219 static int
1220 flow_dv_validate_action_count(struct rte_eth_dev *dev,
1221                               struct rte_flow_error *error)
1222 {
1223         struct mlx5_priv *priv = dev->data->dev_private;
1224
1225         if (!priv->config.devx)
1226                 goto notsup_err;
1227 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
1228         return 0;
1229 #endif
1230 notsup_err:
1231         return rte_flow_error_set
1232                       (error, ENOTSUP,
1233                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1234                        NULL,
1235                        "count action not supported");
1236 }
1237
1238 /**
1239  * Validate the L2 encap action.
1240  *
1241  * @param[in] action_flags
1242  *   Holds the actions detected until now.
1243  * @param[in] action
1244  *   Pointer to the encap action.
1245  * @param[in] attr
1246  *   Pointer to flow attributes
1247  * @param[out] error
1248  *   Pointer to error structure.
1249  *
1250  * @return
1251  *   0 on success, a negative errno value otherwise and rte_errno is set.
1252  */
1253 static int
1254 flow_dv_validate_action_l2_encap(uint64_t action_flags,
1255                                  const struct rte_flow_action *action,
1256                                  const struct rte_flow_attr *attr,
1257                                  struct rte_flow_error *error)
1258 {
1259         if (!(action->conf))
1260                 return rte_flow_error_set(error, EINVAL,
1261                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1262                                           "configuration cannot be null");
1263         if (action_flags & MLX5_FLOW_ACTION_DROP)
1264                 return rte_flow_error_set(error, EINVAL,
1265                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1266                                           "can't drop and encap in same flow");
1267         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1268                 return rte_flow_error_set(error, EINVAL,
1269                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1270                                           "can only have a single encap or"
1271                                           " decap action in a flow");
1272         if (!attr->transfer && attr->ingress)
1273                 return rte_flow_error_set(error, ENOTSUP,
1274                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1275                                           NULL,
1276                                           "encap action not supported for "
1277                                           "ingress");
1278         return 0;
1279 }
1280
1281 /**
1282  * Validate the L2 decap action.
1283  *
1284  * @param[in] action_flags
1285  *   Holds the actions detected until now.
1286  * @param[in] attr
1287  *   Pointer to flow attributes
1288  * @param[out] error
1289  *   Pointer to error structure.
1290  *
1291  * @return
1292  *   0 on success, a negative errno value otherwise and rte_errno is set.
1293  */
1294 static int
1295 flow_dv_validate_action_l2_decap(uint64_t action_flags,
1296                                  const struct rte_flow_attr *attr,
1297                                  struct rte_flow_error *error)
1298 {
1299         if (action_flags & MLX5_FLOW_ACTION_DROP)
1300                 return rte_flow_error_set(error, EINVAL,
1301                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1302                                           "can't drop and decap in same flow");
1303         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1304                 return rte_flow_error_set(error, EINVAL,
1305                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1306                                           "can only have a single encap or"
1307                                           " decap action in a flow");
1308         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1309                 return rte_flow_error_set(error, EINVAL,
1310                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1311                                           "can't have decap action after"
1312                                           " modify action");
1313         if (attr->egress)
1314                 return rte_flow_error_set(error, ENOTSUP,
1315                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1316                                           NULL,
1317                                           "decap action not supported for "
1318                                           "egress");
1319         return 0;
1320 }
1321
1322 /**
1323  * Validate the raw encap action.
1324  *
1325  * @param[in] action_flags
1326  *   Holds the actions detected until now.
1327  * @param[in] action
1328  *   Pointer to the encap action.
1329  * @param[in] attr
1330  *   Pointer to flow attributes
1331  * @param[out] error
1332  *   Pointer to error structure.
1333  *
1334  * @return
1335  *   0 on success, a negative errno value otherwise and rte_errno is set.
1336  */
1337 static int
1338 flow_dv_validate_action_raw_encap(uint64_t action_flags,
1339                                   const struct rte_flow_action *action,
1340                                   const struct rte_flow_attr *attr,
1341                                   struct rte_flow_error *error)
1342 {
1343         const struct rte_flow_action_raw_encap *raw_encap =
1344                 (const struct rte_flow_action_raw_encap *)action->conf;
1345         if (!(action->conf))
1346                 return rte_flow_error_set(error, EINVAL,
1347                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1348                                           "configuration cannot be null");
1349         if (action_flags & MLX5_FLOW_ACTION_DROP)
1350                 return rte_flow_error_set(error, EINVAL,
1351                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1352                                           "can't drop and encap in same flow");
1353         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1354                 return rte_flow_error_set(error, EINVAL,
1355                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1356                                           "can only have a single encap"
1357                                           " action in a flow");
1358         /* encap without preceding decap is not supported for ingress */
1359         if (!attr->transfer &&  attr->ingress &&
1360             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
1361                 return rte_flow_error_set(error, ENOTSUP,
1362                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1363                                           NULL,
1364                                           "encap action not supported for "
1365                                           "ingress");
1366         if (!raw_encap->size || !raw_encap->data)
1367                 return rte_flow_error_set(error, EINVAL,
1368                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1369                                           "raw encap data cannot be empty");
1370         return 0;
1371 }
1372
1373 /**
1374  * Validate the raw decap action.
1375  *
1376  * @param[in] action_flags
1377  *   Holds the actions detected until now.
1378  * @param[in] action
1379  *   Pointer to the encap action.
1380  * @param[in] attr
1381  *   Pointer to flow attributes
1382  * @param[out] error
1383  *   Pointer to error structure.
1384  *
1385  * @return
1386  *   0 on success, a negative errno value otherwise and rte_errno is set.
1387  */
1388 static int
1389 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1390                                   const struct rte_flow_action *action,
1391                                   const struct rte_flow_attr *attr,
1392                                   struct rte_flow_error *error)
1393 {
1394         if (action_flags & MLX5_FLOW_ACTION_DROP)
1395                 return rte_flow_error_set(error, EINVAL,
1396                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1397                                           "can't drop and decap in same flow");
1398         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1399                 return rte_flow_error_set(error, EINVAL,
1400                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1401                                           "can't have encap action before"
1402                                           " decap action");
1403         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1404                 return rte_flow_error_set(error, EINVAL,
1405                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1406                                           "can only have a single decap"
1407                                           " action in a flow");
1408         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1409                 return rte_flow_error_set(error, EINVAL,
1410                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1411                                           "can't have decap action after"
1412                                           " modify action");
1413         /* decap action is valid on egress only if it is followed by encap */
1414         if (attr->egress) {
1415                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1416                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1417                        action++) {
1418                 }
1419                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1420                         return rte_flow_error_set
1421                                         (error, ENOTSUP,
1422                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1423                                          NULL, "decap action not supported"
1424                                          " for egress");
1425         }
1426         return 0;
1427 }
1428
1429 /**
1430  * Find existing encap/decap resource or create and register a new one.
1431  *
1432  * @param dev[in, out]
1433  *   Pointer to rte_eth_dev structure.
1434  * @param[in, out] resource
1435  *   Pointer to encap/decap resource.
1436  * @parm[in, out] dev_flow
1437  *   Pointer to the dev_flow.
1438  * @param[out] error
1439  *   pointer to error structure.
1440  *
1441  * @return
1442  *   0 on success otherwise -errno and errno is set.
1443  */
1444 static int
1445 flow_dv_encap_decap_resource_register
1446                         (struct rte_eth_dev *dev,
1447                          struct mlx5_flow_dv_encap_decap_resource *resource,
1448                          struct mlx5_flow *dev_flow,
1449                          struct rte_flow_error *error)
1450 {
1451         struct mlx5_priv *priv = dev->data->dev_private;
1452         struct mlx5_ibv_shared *sh = priv->sh;
1453         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1454         struct rte_flow *flow = dev_flow->flow;
1455         struct mlx5dv_dr_domain *domain;
1456
1457         resource->flags = flow->group ? 0 : 1;
1458         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1459                 domain = sh->fdb_domain;
1460         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1461                 domain = sh->rx_domain;
1462         else
1463                 domain = sh->tx_domain;
1464
1465         /* Lookup a matching resource from cache. */
1466         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1467                 if (resource->reformat_type == cache_resource->reformat_type &&
1468                     resource->ft_type == cache_resource->ft_type &&
1469                     resource->flags == cache_resource->flags &&
1470                     resource->size == cache_resource->size &&
1471                     !memcmp((const void *)resource->buf,
1472                             (const void *)cache_resource->buf,
1473                             resource->size)) {
1474                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1475                                 (void *)cache_resource,
1476                                 rte_atomic32_read(&cache_resource->refcnt));
1477                         rte_atomic32_inc(&cache_resource->refcnt);
1478                         dev_flow->dv.encap_decap = cache_resource;
1479                         return 0;
1480                 }
1481         }
1482         /* Register new encap/decap resource. */
1483         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1484         if (!cache_resource)
1485                 return rte_flow_error_set(error, ENOMEM,
1486                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1487                                           "cannot allocate resource memory");
1488         *cache_resource = *resource;
1489         cache_resource->verbs_action =
1490                 mlx5_glue->dv_create_flow_action_packet_reformat
1491                         (sh->ctx, cache_resource->reformat_type,
1492                          cache_resource->ft_type, domain, cache_resource->flags,
1493                          cache_resource->size,
1494                          (cache_resource->size ? cache_resource->buf : NULL));
1495         if (!cache_resource->verbs_action) {
1496                 rte_free(cache_resource);
1497                 return rte_flow_error_set(error, ENOMEM,
1498                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1499                                           NULL, "cannot create action");
1500         }
1501         rte_atomic32_init(&cache_resource->refcnt);
1502         rte_atomic32_inc(&cache_resource->refcnt);
1503         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1504         dev_flow->dv.encap_decap = cache_resource;
1505         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1506                 (void *)cache_resource,
1507                 rte_atomic32_read(&cache_resource->refcnt));
1508         return 0;
1509 }
1510
1511 /**
1512  * Find existing table jump resource or create and register a new one.
1513  *
1514  * @param dev[in, out]
1515  *   Pointer to rte_eth_dev structure.
1516  * @param[in, out] resource
1517  *   Pointer to jump table resource.
1518  * @parm[in, out] dev_flow
1519  *   Pointer to the dev_flow.
1520  * @param[out] error
1521  *   pointer to error structure.
1522  *
1523  * @return
1524  *   0 on success otherwise -errno and errno is set.
1525  */
1526 static int
1527 flow_dv_jump_tbl_resource_register
1528                         (struct rte_eth_dev *dev,
1529                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1530                          struct mlx5_flow *dev_flow,
1531                          struct rte_flow_error *error)
1532 {
1533         struct mlx5_priv *priv = dev->data->dev_private;
1534         struct mlx5_ibv_shared *sh = priv->sh;
1535         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1536
1537         /* Lookup a matching resource from cache. */
1538         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1539                 if (resource->tbl == cache_resource->tbl) {
1540                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1541                                 (void *)cache_resource,
1542                                 rte_atomic32_read(&cache_resource->refcnt));
1543                         rte_atomic32_inc(&cache_resource->refcnt);
1544                         dev_flow->dv.jump = cache_resource;
1545                         return 0;
1546                 }
1547         }
1548         /* Register new jump table resource. */
1549         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1550         if (!cache_resource)
1551                 return rte_flow_error_set(error, ENOMEM,
1552                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1553                                           "cannot allocate resource memory");
1554         *cache_resource = *resource;
1555         cache_resource->action =
1556                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1557                 (resource->tbl->obj);
1558         if (!cache_resource->action) {
1559                 rte_free(cache_resource);
1560                 return rte_flow_error_set(error, ENOMEM,
1561                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1562                                           NULL, "cannot create action");
1563         }
1564         rte_atomic32_init(&cache_resource->refcnt);
1565         rte_atomic32_inc(&cache_resource->refcnt);
1566         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1567         dev_flow->dv.jump = cache_resource;
1568         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1569                 (void *)cache_resource,
1570                 rte_atomic32_read(&cache_resource->refcnt));
1571         return 0;
1572 }
1573
1574 /**
1575  * Find existing table port ID resource or create and register a new one.
1576  *
1577  * @param dev[in, out]
1578  *   Pointer to rte_eth_dev structure.
1579  * @param[in, out] resource
1580  *   Pointer to port ID action resource.
1581  * @parm[in, out] dev_flow
1582  *   Pointer to the dev_flow.
1583  * @param[out] error
1584  *   pointer to error structure.
1585  *
1586  * @return
1587  *   0 on success otherwise -errno and errno is set.
1588  */
1589 static int
1590 flow_dv_port_id_action_resource_register
1591                         (struct rte_eth_dev *dev,
1592                          struct mlx5_flow_dv_port_id_action_resource *resource,
1593                          struct mlx5_flow *dev_flow,
1594                          struct rte_flow_error *error)
1595 {
1596         struct mlx5_priv *priv = dev->data->dev_private;
1597         struct mlx5_ibv_shared *sh = priv->sh;
1598         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1599
1600         /* Lookup a matching resource from cache. */
1601         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1602                 if (resource->port_id == cache_resource->port_id) {
1603                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1604                                 "refcnt %d++",
1605                                 (void *)cache_resource,
1606                                 rte_atomic32_read(&cache_resource->refcnt));
1607                         rte_atomic32_inc(&cache_resource->refcnt);
1608                         dev_flow->dv.port_id_action = cache_resource;
1609                         return 0;
1610                 }
1611         }
1612         /* Register new port id action resource. */
1613         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1614         if (!cache_resource)
1615                 return rte_flow_error_set(error, ENOMEM,
1616                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1617                                           "cannot allocate resource memory");
1618         *cache_resource = *resource;
1619         cache_resource->action =
1620                 mlx5_glue->dr_create_flow_action_dest_vport
1621                         (priv->sh->fdb_domain, resource->port_id);
1622         if (!cache_resource->action) {
1623                 rte_free(cache_resource);
1624                 return rte_flow_error_set(error, ENOMEM,
1625                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1626                                           NULL, "cannot create action");
1627         }
1628         rte_atomic32_init(&cache_resource->refcnt);
1629         rte_atomic32_inc(&cache_resource->refcnt);
1630         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1631         dev_flow->dv.port_id_action = cache_resource;
1632         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1633                 (void *)cache_resource,
1634                 rte_atomic32_read(&cache_resource->refcnt));
1635         return 0;
1636 }
1637
1638 /**
1639  * Find existing push vlan resource or create and register a new one.
1640  *
1641  * @param dev[in, out]
1642  *   Pointer to rte_eth_dev structure.
1643  * @param[in, out] resource
1644  *   Pointer to port ID action resource.
1645  * @parm[in, out] dev_flow
1646  *   Pointer to the dev_flow.
1647  * @param[out] error
1648  *   pointer to error structure.
1649  *
1650  * @return
1651  *   0 on success otherwise -errno and errno is set.
1652  */
1653 static int
1654 flow_dv_push_vlan_action_resource_register
1655                        (struct rte_eth_dev *dev,
1656                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
1657                         struct mlx5_flow *dev_flow,
1658                         struct rte_flow_error *error)
1659 {
1660         struct mlx5_priv *priv = dev->data->dev_private;
1661         struct mlx5_ibv_shared *sh = priv->sh;
1662         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
1663         struct mlx5dv_dr_domain *domain;
1664
1665         /* Lookup a matching resource from cache. */
1666         LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
1667                 if (resource->vlan_tag == cache_resource->vlan_tag &&
1668                     resource->ft_type == cache_resource->ft_type) {
1669                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
1670                                 "refcnt %d++",
1671                                 (void *)cache_resource,
1672                                 rte_atomic32_read(&cache_resource->refcnt));
1673                         rte_atomic32_inc(&cache_resource->refcnt);
1674                         dev_flow->dv.push_vlan_res = cache_resource;
1675                         return 0;
1676                 }
1677         }
1678         /* Register new push_vlan action resource. */
1679         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1680         if (!cache_resource)
1681                 return rte_flow_error_set(error, ENOMEM,
1682                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1683                                           "cannot allocate resource memory");
1684         *cache_resource = *resource;
1685         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1686                 domain = sh->fdb_domain;
1687         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1688                 domain = sh->rx_domain;
1689         else
1690                 domain = sh->tx_domain;
1691         cache_resource->action =
1692                 mlx5_glue->dr_create_flow_action_push_vlan(domain,
1693                                                            resource->vlan_tag);
1694         if (!cache_resource->action) {
1695                 rte_free(cache_resource);
1696                 return rte_flow_error_set(error, ENOMEM,
1697                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1698                                           NULL, "cannot create action");
1699         }
1700         rte_atomic32_init(&cache_resource->refcnt);
1701         rte_atomic32_inc(&cache_resource->refcnt);
1702         LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
1703         dev_flow->dv.push_vlan_res = cache_resource;
1704         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
1705                 (void *)cache_resource,
1706                 rte_atomic32_read(&cache_resource->refcnt));
1707         return 0;
1708 }
1709 /**
1710  * Get the size of specific rte_flow_item_type
1711  *
1712  * @param[in] item_type
1713  *   Tested rte_flow_item_type.
1714  *
1715  * @return
1716  *   sizeof struct item_type, 0 if void or irrelevant.
1717  */
1718 static size_t
1719 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1720 {
1721         size_t retval;
1722
1723         switch (item_type) {
1724         case RTE_FLOW_ITEM_TYPE_ETH:
1725                 retval = sizeof(struct rte_flow_item_eth);
1726                 break;
1727         case RTE_FLOW_ITEM_TYPE_VLAN:
1728                 retval = sizeof(struct rte_flow_item_vlan);
1729                 break;
1730         case RTE_FLOW_ITEM_TYPE_IPV4:
1731                 retval = sizeof(struct rte_flow_item_ipv4);
1732                 break;
1733         case RTE_FLOW_ITEM_TYPE_IPV6:
1734                 retval = sizeof(struct rte_flow_item_ipv6);
1735                 break;
1736         case RTE_FLOW_ITEM_TYPE_UDP:
1737                 retval = sizeof(struct rte_flow_item_udp);
1738                 break;
1739         case RTE_FLOW_ITEM_TYPE_TCP:
1740                 retval = sizeof(struct rte_flow_item_tcp);
1741                 break;
1742         case RTE_FLOW_ITEM_TYPE_VXLAN:
1743                 retval = sizeof(struct rte_flow_item_vxlan);
1744                 break;
1745         case RTE_FLOW_ITEM_TYPE_GRE:
1746                 retval = sizeof(struct rte_flow_item_gre);
1747                 break;
1748         case RTE_FLOW_ITEM_TYPE_NVGRE:
1749                 retval = sizeof(struct rte_flow_item_nvgre);
1750                 break;
1751         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1752                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1753                 break;
1754         case RTE_FLOW_ITEM_TYPE_MPLS:
1755                 retval = sizeof(struct rte_flow_item_mpls);
1756                 break;
1757         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1758         default:
1759                 retval = 0;
1760                 break;
1761         }
1762         return retval;
1763 }
1764
1765 #define MLX5_ENCAP_IPV4_VERSION         0x40
1766 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1767 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1768 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1769 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1770 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1771 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1772
1773 /**
1774  * Convert the encap action data from list of rte_flow_item to raw buffer
1775  *
1776  * @param[in] items
1777  *   Pointer to rte_flow_item objects list.
1778  * @param[out] buf
1779  *   Pointer to the output buffer.
1780  * @param[out] size
1781  *   Pointer to the output buffer size.
1782  * @param[out] error
1783  *   Pointer to the error structure.
1784  *
1785  * @return
1786  *   0 on success, a negative errno value otherwise and rte_errno is set.
1787  */
1788 static int
1789 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1790                            size_t *size, struct rte_flow_error *error)
1791 {
1792         struct rte_ether_hdr *eth = NULL;
1793         struct rte_vlan_hdr *vlan = NULL;
1794         struct rte_ipv4_hdr *ipv4 = NULL;
1795         struct rte_ipv6_hdr *ipv6 = NULL;
1796         struct rte_udp_hdr *udp = NULL;
1797         struct rte_vxlan_hdr *vxlan = NULL;
1798         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1799         struct rte_gre_hdr *gre = NULL;
1800         size_t len;
1801         size_t temp_size = 0;
1802
1803         if (!items)
1804                 return rte_flow_error_set(error, EINVAL,
1805                                           RTE_FLOW_ERROR_TYPE_ACTION,
1806                                           NULL, "invalid empty data");
1807         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1808                 len = flow_dv_get_item_len(items->type);
1809                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1810                         return rte_flow_error_set(error, EINVAL,
1811                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1812                                                   (void *)items->type,
1813                                                   "items total size is too big"
1814                                                   " for encap action");
1815                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1816                 switch (items->type) {
1817                 case RTE_FLOW_ITEM_TYPE_ETH:
1818                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1819                         break;
1820                 case RTE_FLOW_ITEM_TYPE_VLAN:
1821                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1822                         if (!eth)
1823                                 return rte_flow_error_set(error, EINVAL,
1824                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1825                                                 (void *)items->type,
1826                                                 "eth header not found");
1827                         if (!eth->ether_type)
1828                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1829                         break;
1830                 case RTE_FLOW_ITEM_TYPE_IPV4:
1831                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1832                         if (!vlan && !eth)
1833                                 return rte_flow_error_set(error, EINVAL,
1834                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1835                                                 (void *)items->type,
1836                                                 "neither eth nor vlan"
1837                                                 " header found");
1838                         if (vlan && !vlan->eth_proto)
1839                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1840                         else if (eth && !eth->ether_type)
1841                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1842                         if (!ipv4->version_ihl)
1843                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1844                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1845                         if (!ipv4->time_to_live)
1846                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1847                         break;
1848                 case RTE_FLOW_ITEM_TYPE_IPV6:
1849                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1850                         if (!vlan && !eth)
1851                                 return rte_flow_error_set(error, EINVAL,
1852                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1853                                                 (void *)items->type,
1854                                                 "neither eth nor vlan"
1855                                                 " header found");
1856                         if (vlan && !vlan->eth_proto)
1857                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1858                         else if (eth && !eth->ether_type)
1859                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1860                         if (!ipv6->vtc_flow)
1861                                 ipv6->vtc_flow =
1862                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1863                         if (!ipv6->hop_limits)
1864                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1865                         break;
1866                 case RTE_FLOW_ITEM_TYPE_UDP:
1867                         udp = (struct rte_udp_hdr *)&buf[temp_size];
1868                         if (!ipv4 && !ipv6)
1869                                 return rte_flow_error_set(error, EINVAL,
1870                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1871                                                 (void *)items->type,
1872                                                 "ip header not found");
1873                         if (ipv4 && !ipv4->next_proto_id)
1874                                 ipv4->next_proto_id = IPPROTO_UDP;
1875                         else if (ipv6 && !ipv6->proto)
1876                                 ipv6->proto = IPPROTO_UDP;
1877                         break;
1878                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1879                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1880                         if (!udp)
1881                                 return rte_flow_error_set(error, EINVAL,
1882                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1883                                                 (void *)items->type,
1884                                                 "udp header not found");
1885                         if (!udp->dst_port)
1886                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1887                         if (!vxlan->vx_flags)
1888                                 vxlan->vx_flags =
1889                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1890                         break;
1891                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1892                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1893                         if (!udp)
1894                                 return rte_flow_error_set(error, EINVAL,
1895                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1896                                                 (void *)items->type,
1897                                                 "udp header not found");
1898                         if (!vxlan_gpe->proto)
1899                                 return rte_flow_error_set(error, EINVAL,
1900                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1901                                                 (void *)items->type,
1902                                                 "next protocol not found");
1903                         if (!udp->dst_port)
1904                                 udp->dst_port =
1905                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1906                         if (!vxlan_gpe->vx_flags)
1907                                 vxlan_gpe->vx_flags =
1908                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1909                         break;
1910                 case RTE_FLOW_ITEM_TYPE_GRE:
1911                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1912                         gre = (struct rte_gre_hdr *)&buf[temp_size];
1913                         if (!gre->proto)
1914                                 return rte_flow_error_set(error, EINVAL,
1915                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1916                                                 (void *)items->type,
1917                                                 "next protocol not found");
1918                         if (!ipv4 && !ipv6)
1919                                 return rte_flow_error_set(error, EINVAL,
1920                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1921                                                 (void *)items->type,
1922                                                 "ip header not found");
1923                         if (ipv4 && !ipv4->next_proto_id)
1924                                 ipv4->next_proto_id = IPPROTO_GRE;
1925                         else if (ipv6 && !ipv6->proto)
1926                                 ipv6->proto = IPPROTO_GRE;
1927                         break;
1928                 case RTE_FLOW_ITEM_TYPE_VOID:
1929                         break;
1930                 default:
1931                         return rte_flow_error_set(error, EINVAL,
1932                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1933                                                   (void *)items->type,
1934                                                   "unsupported item type");
1935                         break;
1936                 }
1937                 temp_size += len;
1938         }
1939         *size = temp_size;
1940         return 0;
1941 }
1942
1943 static int
1944 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1945 {
1946         struct rte_ether_hdr *eth = NULL;
1947         struct rte_vlan_hdr *vlan = NULL;
1948         struct rte_ipv6_hdr *ipv6 = NULL;
1949         struct rte_udp_hdr *udp = NULL;
1950         char *next_hdr;
1951         uint16_t proto;
1952
1953         eth = (struct rte_ether_hdr *)data;
1954         next_hdr = (char *)(eth + 1);
1955         proto = RTE_BE16(eth->ether_type);
1956
1957         /* VLAN skipping */
1958         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1959                 vlan = (struct rte_vlan_hdr *)next_hdr;
1960                 proto = RTE_BE16(vlan->eth_proto);
1961                 next_hdr += sizeof(struct rte_vlan_hdr);
1962         }
1963
1964         /* HW calculates IPv4 csum. no need to proceed */
1965         if (proto == RTE_ETHER_TYPE_IPV4)
1966                 return 0;
1967
1968         /* non IPv4/IPv6 header. not supported */
1969         if (proto != RTE_ETHER_TYPE_IPV6) {
1970                 return rte_flow_error_set(error, ENOTSUP,
1971                                           RTE_FLOW_ERROR_TYPE_ACTION,
1972                                           NULL, "Cannot offload non IPv4/IPv6");
1973         }
1974
1975         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1976
1977         /* ignore non UDP */
1978         if (ipv6->proto != IPPROTO_UDP)
1979                 return 0;
1980
1981         udp = (struct rte_udp_hdr *)(ipv6 + 1);
1982         udp->dgram_cksum = 0;
1983
1984         return 0;
1985 }
1986
1987 /**
1988  * Convert L2 encap action to DV specification.
1989  *
1990  * @param[in] dev
1991  *   Pointer to rte_eth_dev structure.
1992  * @param[in] action
1993  *   Pointer to action structure.
1994  * @param[in, out] dev_flow
1995  *   Pointer to the mlx5_flow.
1996  * @param[in] transfer
1997  *   Mark if the flow is E-Switch flow.
1998  * @param[out] error
1999  *   Pointer to the error structure.
2000  *
2001  * @return
2002  *   0 on success, a negative errno value otherwise and rte_errno is set.
2003  */
2004 static int
2005 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
2006                                const struct rte_flow_action *action,
2007                                struct mlx5_flow *dev_flow,
2008                                uint8_t transfer,
2009                                struct rte_flow_error *error)
2010 {
2011         const struct rte_flow_item *encap_data;
2012         const struct rte_flow_action_raw_encap *raw_encap_data;
2013         struct mlx5_flow_dv_encap_decap_resource res = {
2014                 .reformat_type =
2015                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
2016                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2017                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
2018         };
2019
2020         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2021                 raw_encap_data =
2022                         (const struct rte_flow_action_raw_encap *)action->conf;
2023                 res.size = raw_encap_data->size;
2024                 memcpy(res.buf, raw_encap_data->data, res.size);
2025                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
2026                         return -rte_errno;
2027         } else {
2028                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
2029                         encap_data =
2030                                 ((const struct rte_flow_action_vxlan_encap *)
2031                                                 action->conf)->definition;
2032                 else
2033                         encap_data =
2034                                 ((const struct rte_flow_action_nvgre_encap *)
2035                                                 action->conf)->definition;
2036                 if (flow_dv_convert_encap_data(encap_data, res.buf,
2037                                                &res.size, error))
2038                         return -rte_errno;
2039         }
2040         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2041                 return rte_flow_error_set(error, EINVAL,
2042                                           RTE_FLOW_ERROR_TYPE_ACTION,
2043                                           NULL, "can't create L2 encap action");
2044         return 0;
2045 }
2046
2047 /**
2048  * Convert L2 decap action to DV specification.
2049  *
2050  * @param[in] dev
2051  *   Pointer to rte_eth_dev structure.
2052  * @param[in, out] dev_flow
2053  *   Pointer to the mlx5_flow.
2054  * @param[in] transfer
2055  *   Mark if the flow is E-Switch flow.
2056  * @param[out] error
2057  *   Pointer to the error structure.
2058  *
2059  * @return
2060  *   0 on success, a negative errno value otherwise and rte_errno is set.
2061  */
2062 static int
2063 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
2064                                struct mlx5_flow *dev_flow,
2065                                uint8_t transfer,
2066                                struct rte_flow_error *error)
2067 {
2068         struct mlx5_flow_dv_encap_decap_resource res = {
2069                 .size = 0,
2070                 .reformat_type =
2071                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
2072                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
2073                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
2074         };
2075
2076         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2077                 return rte_flow_error_set(error, EINVAL,
2078                                           RTE_FLOW_ERROR_TYPE_ACTION,
2079                                           NULL, "can't create L2 decap action");
2080         return 0;
2081 }
2082
2083 /**
2084  * Convert raw decap/encap (L3 tunnel) action to DV specification.
2085  *
2086  * @param[in] dev
2087  *   Pointer to rte_eth_dev structure.
2088  * @param[in] action
2089  *   Pointer to action structure.
2090  * @param[in, out] dev_flow
2091  *   Pointer to the mlx5_flow.
2092  * @param[in] attr
2093  *   Pointer to the flow attributes.
2094  * @param[out] error
2095  *   Pointer to the error structure.
2096  *
2097  * @return
2098  *   0 on success, a negative errno value otherwise and rte_errno is set.
2099  */
2100 static int
2101 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
2102                                 const struct rte_flow_action *action,
2103                                 struct mlx5_flow *dev_flow,
2104                                 const struct rte_flow_attr *attr,
2105                                 struct rte_flow_error *error)
2106 {
2107         const struct rte_flow_action_raw_encap *encap_data;
2108         struct mlx5_flow_dv_encap_decap_resource res;
2109
2110         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
2111         res.size = encap_data->size;
2112         memcpy(res.buf, encap_data->data, res.size);
2113         res.reformat_type = attr->egress ?
2114                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
2115                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
2116         if (attr->transfer)
2117                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2118         else
2119                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2120                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2121         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2122                 return rte_flow_error_set(error, EINVAL,
2123                                           RTE_FLOW_ERROR_TYPE_ACTION,
2124                                           NULL, "can't create encap action");
2125         return 0;
2126 }
2127
2128 /**
2129  * Create action push VLAN.
2130  *
2131  * @param[in] dev
2132  *   Pointer to rte_eth_dev structure.
2133  * @param[in] vlan_tag
2134  *   the vlan tag to push to the Ethernet header.
2135  * @param[in, out] dev_flow
2136  *   Pointer to the mlx5_flow.
2137  * @param[in] attr
2138  *   Pointer to the flow attributes.
2139  * @param[out] error
2140  *   Pointer to the error structure.
2141  *
2142  * @return
2143  *   0 on success, a negative errno value otherwise and rte_errno is set.
2144  */
2145 static int
2146 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
2147                                 const struct rte_flow_attr *attr,
2148                                 const struct rte_vlan_hdr *vlan,
2149                                 struct mlx5_flow *dev_flow,
2150                                 struct rte_flow_error *error)
2151 {
2152         struct mlx5_flow_dv_push_vlan_action_resource res;
2153
2154         res.vlan_tag =
2155                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
2156                                  vlan->vlan_tci);
2157         if (attr->transfer)
2158                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2159         else
2160                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2161                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2162         return flow_dv_push_vlan_action_resource_register
2163                                             (dev, &res, dev_flow, error);
2164 }
2165
2166 /**
2167  * Validate the modify-header actions.
2168  *
2169  * @param[in] action_flags
2170  *   Holds the actions detected until now.
2171  * @param[in] action
2172  *   Pointer to the modify action.
2173  * @param[out] error
2174  *   Pointer to error structure.
2175  *
2176  * @return
2177  *   0 on success, a negative errno value otherwise and rte_errno is set.
2178  */
2179 static int
2180 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
2181                                    const struct rte_flow_action *action,
2182                                    struct rte_flow_error *error)
2183 {
2184         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
2185                 return rte_flow_error_set(error, EINVAL,
2186                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2187                                           NULL, "action configuration not set");
2188         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2189                 return rte_flow_error_set(error, EINVAL,
2190                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2191                                           "can't have encap action before"
2192                                           " modify action");
2193         return 0;
2194 }
2195
2196 /**
2197  * Validate the modify-header MAC address actions.
2198  *
2199  * @param[in] action_flags
2200  *   Holds the actions detected until now.
2201  * @param[in] action
2202  *   Pointer to the modify action.
2203  * @param[in] item_flags
2204  *   Holds the items detected.
2205  * @param[out] error
2206  *   Pointer to error structure.
2207  *
2208  * @return
2209  *   0 on success, a negative errno value otherwise and rte_errno is set.
2210  */
2211 static int
2212 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
2213                                    const struct rte_flow_action *action,
2214                                    const uint64_t item_flags,
2215                                    struct rte_flow_error *error)
2216 {
2217         int ret = 0;
2218
2219         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2220         if (!ret) {
2221                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
2222                         return rte_flow_error_set(error, EINVAL,
2223                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2224                                                   NULL,
2225                                                   "no L2 item in pattern");
2226         }
2227         return ret;
2228 }
2229
2230 /**
2231  * Validate the modify-header IPv4 address actions.
2232  *
2233  * @param[in] action_flags
2234  *   Holds the actions detected until now.
2235  * @param[in] action
2236  *   Pointer to the modify action.
2237  * @param[in] item_flags
2238  *   Holds the items detected.
2239  * @param[out] error
2240  *   Pointer to error structure.
2241  *
2242  * @return
2243  *   0 on success, a negative errno value otherwise and rte_errno is set.
2244  */
2245 static int
2246 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
2247                                     const struct rte_flow_action *action,
2248                                     const uint64_t item_flags,
2249                                     struct rte_flow_error *error)
2250 {
2251         int ret = 0;
2252
2253         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2254         if (!ret) {
2255                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
2256                         return rte_flow_error_set(error, EINVAL,
2257                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2258                                                   NULL,
2259                                                   "no ipv4 item in pattern");
2260         }
2261         return ret;
2262 }
2263
2264 /**
2265  * Validate the modify-header IPv6 address actions.
2266  *
2267  * @param[in] action_flags
2268  *   Holds the actions detected until now.
2269  * @param[in] action
2270  *   Pointer to the modify action.
2271  * @param[in] item_flags
2272  *   Holds the items detected.
2273  * @param[out] error
2274  *   Pointer to error structure.
2275  *
2276  * @return
2277  *   0 on success, a negative errno value otherwise and rte_errno is set.
2278  */
2279 static int
2280 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
2281                                     const struct rte_flow_action *action,
2282                                     const uint64_t item_flags,
2283                                     struct rte_flow_error *error)
2284 {
2285         int ret = 0;
2286
2287         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2288         if (!ret) {
2289                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
2290                         return rte_flow_error_set(error, EINVAL,
2291                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2292                                                   NULL,
2293                                                   "no ipv6 item in pattern");
2294         }
2295         return ret;
2296 }
2297
2298 /**
2299  * Validate the modify-header TP actions.
2300  *
2301  * @param[in] action_flags
2302  *   Holds the actions detected until now.
2303  * @param[in] action
2304  *   Pointer to the modify action.
2305  * @param[in] item_flags
2306  *   Holds the items detected.
2307  * @param[out] error
2308  *   Pointer to error structure.
2309  *
2310  * @return
2311  *   0 on success, a negative errno value otherwise and rte_errno is set.
2312  */
2313 static int
2314 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
2315                                   const struct rte_flow_action *action,
2316                                   const uint64_t item_flags,
2317                                   struct rte_flow_error *error)
2318 {
2319         int ret = 0;
2320
2321         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2322         if (!ret) {
2323                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
2324                         return rte_flow_error_set(error, EINVAL,
2325                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2326                                                   NULL, "no transport layer "
2327                                                   "in pattern");
2328         }
2329         return ret;
2330 }
2331
2332 /**
2333  * Validate the modify-header actions of increment/decrement
2334  * TCP Sequence-number.
2335  *
2336  * @param[in] action_flags
2337  *   Holds the actions detected until now.
2338  * @param[in] action
2339  *   Pointer to the modify action.
2340  * @param[in] item_flags
2341  *   Holds the items detected.
2342  * @param[out] error
2343  *   Pointer to error structure.
2344  *
2345  * @return
2346  *   0 on success, a negative errno value otherwise and rte_errno is set.
2347  */
2348 static int
2349 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
2350                                        const struct rte_flow_action *action,
2351                                        const uint64_t item_flags,
2352                                        struct rte_flow_error *error)
2353 {
2354         int ret = 0;
2355
2356         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2357         if (!ret) {
2358                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2359                         return rte_flow_error_set(error, EINVAL,
2360                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2361                                                   NULL, "no TCP item in"
2362                                                   " pattern");
2363                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
2364                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
2365                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
2366                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
2367                         return rte_flow_error_set(error, EINVAL,
2368                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2369                                                   NULL,
2370                                                   "cannot decrease and increase"
2371                                                   " TCP sequence number"
2372                                                   " at the same time");
2373         }
2374         return ret;
2375 }
2376
2377 /**
2378  * Validate the modify-header actions of increment/decrement
2379  * TCP Acknowledgment number.
2380  *
2381  * @param[in] action_flags
2382  *   Holds the actions detected until now.
2383  * @param[in] action
2384  *   Pointer to the modify action.
2385  * @param[in] item_flags
2386  *   Holds the items detected.
2387  * @param[out] error
2388  *   Pointer to error structure.
2389  *
2390  * @return
2391  *   0 on success, a negative errno value otherwise and rte_errno is set.
2392  */
2393 static int
2394 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
2395                                        const struct rte_flow_action *action,
2396                                        const uint64_t item_flags,
2397                                        struct rte_flow_error *error)
2398 {
2399         int ret = 0;
2400
2401         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2402         if (!ret) {
2403                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2404                         return rte_flow_error_set(error, EINVAL,
2405                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2406                                                   NULL, "no TCP item in"
2407                                                   " pattern");
2408                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
2409                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
2410                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
2411                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
2412                         return rte_flow_error_set(error, EINVAL,
2413                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2414                                                   NULL,
2415                                                   "cannot decrease and increase"
2416                                                   " TCP acknowledgment number"
2417                                                   " at the same time");
2418         }
2419         return ret;
2420 }
2421
2422 /**
2423  * Validate the modify-header TTL actions.
2424  *
2425  * @param[in] action_flags
2426  *   Holds the actions detected until now.
2427  * @param[in] action
2428  *   Pointer to the modify action.
2429  * @param[in] item_flags
2430  *   Holds the items detected.
2431  * @param[out] error
2432  *   Pointer to error structure.
2433  *
2434  * @return
2435  *   0 on success, a negative errno value otherwise and rte_errno is set.
2436  */
2437 static int
2438 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
2439                                    const struct rte_flow_action *action,
2440                                    const uint64_t item_flags,
2441                                    struct rte_flow_error *error)
2442 {
2443         int ret = 0;
2444
2445         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2446         if (!ret) {
2447                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
2448                         return rte_flow_error_set(error, EINVAL,
2449                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2450                                                   NULL,
2451                                                   "no IP protocol in pattern");
2452         }
2453         return ret;
2454 }
2455
2456 /**
2457  * Validate jump action.
2458  *
2459  * @param[in] action
2460  *   Pointer to the jump action.
2461  * @param[in] action_flags
2462  *   Holds the actions detected until now.
2463  * @param[in] attributes
2464  *   Pointer to flow attributes
2465  * @param[in] external
2466  *   Action belongs to flow rule created by request external to PMD.
2467  * @param[out] error
2468  *   Pointer to error structure.
2469  *
2470  * @return
2471  *   0 on success, a negative errno value otherwise and rte_errno is set.
2472  */
2473 static int
2474 flow_dv_validate_action_jump(const struct rte_flow_action *action,
2475                              uint64_t action_flags,
2476                              const struct rte_flow_attr *attributes,
2477                              bool external, struct rte_flow_error *error)
2478 {
2479         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
2480                                                     MLX5_MAX_TABLES;
2481         uint32_t target_group, table;
2482         int ret = 0;
2483
2484         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2485                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2486                 return rte_flow_error_set(error, EINVAL,
2487                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2488                                           "can't have 2 fate actions in"
2489                                           " same flow");
2490         if (!action->conf)
2491                 return rte_flow_error_set(error, EINVAL,
2492                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2493                                           NULL, "action configuration not set");
2494         target_group =
2495                 ((const struct rte_flow_action_jump *)action->conf)->group;
2496         ret = mlx5_flow_group_to_table(attributes, external, target_group,
2497                                        &table, error);
2498         if (ret)
2499                 return ret;
2500         if (table >= max_group)
2501                 return rte_flow_error_set(error, EINVAL,
2502                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2503                                           "target group index out of range");
2504         if (attributes->group >= target_group)
2505                 return rte_flow_error_set(error, EINVAL,
2506                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2507                                           "target group must be higher than"
2508                                           " the current flow group");
2509         return 0;
2510 }
2511
2512 /*
2513  * Validate the port_id action.
2514  *
2515  * @param[in] dev
2516  *   Pointer to rte_eth_dev structure.
2517  * @param[in] action_flags
2518  *   Bit-fields that holds the actions detected until now.
2519  * @param[in] action
2520  *   Port_id RTE action structure.
2521  * @param[in] attr
2522  *   Attributes of flow that includes this action.
2523  * @param[out] error
2524  *   Pointer to error structure.
2525  *
2526  * @return
2527  *   0 on success, a negative errno value otherwise and rte_errno is set.
2528  */
2529 static int
2530 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2531                                 uint64_t action_flags,
2532                                 const struct rte_flow_action *action,
2533                                 const struct rte_flow_attr *attr,
2534                                 struct rte_flow_error *error)
2535 {
2536         const struct rte_flow_action_port_id *port_id;
2537         struct mlx5_priv *act_priv;
2538         struct mlx5_priv *dev_priv;
2539         uint16_t port;
2540
2541         if (!attr->transfer)
2542                 return rte_flow_error_set(error, ENOTSUP,
2543                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2544                                           NULL,
2545                                           "port id action is valid in transfer"
2546                                           " mode only");
2547         if (!action || !action->conf)
2548                 return rte_flow_error_set(error, ENOTSUP,
2549                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2550                                           NULL,
2551                                           "port id action parameters must be"
2552                                           " specified");
2553         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2554                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2555                 return rte_flow_error_set(error, EINVAL,
2556                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2557                                           "can have only one fate actions in"
2558                                           " a flow");
2559         dev_priv = mlx5_dev_to_eswitch_info(dev);
2560         if (!dev_priv)
2561                 return rte_flow_error_set(error, rte_errno,
2562                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2563                                           NULL,
2564                                           "failed to obtain E-Switch info");
2565         port_id = action->conf;
2566         port = port_id->original ? dev->data->port_id : port_id->id;
2567         act_priv = mlx5_port_to_eswitch_info(port);
2568         if (!act_priv)
2569                 return rte_flow_error_set
2570                                 (error, rte_errno,
2571                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2572                                  "failed to obtain E-Switch port id for port");
2573         if (act_priv->domain_id != dev_priv->domain_id)
2574                 return rte_flow_error_set
2575                                 (error, EINVAL,
2576                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2577                                  "port does not belong to"
2578                                  " E-Switch being configured");
2579         return 0;
2580 }
2581
2582 /**
2583  * Find existing modify-header resource or create and register a new one.
2584  *
2585  * @param dev[in, out]
2586  *   Pointer to rte_eth_dev structure.
2587  * @param[in, out] resource
2588  *   Pointer to modify-header resource.
2589  * @parm[in, out] dev_flow
2590  *   Pointer to the dev_flow.
2591  * @param[out] error
2592  *   pointer to error structure.
2593  *
2594  * @return
2595  *   0 on success otherwise -errno and errno is set.
2596  */
2597 static int
2598 flow_dv_modify_hdr_resource_register
2599                         (struct rte_eth_dev *dev,
2600                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2601                          struct mlx5_flow *dev_flow,
2602                          struct rte_flow_error *error)
2603 {
2604         struct mlx5_priv *priv = dev->data->dev_private;
2605         struct mlx5_ibv_shared *sh = priv->sh;
2606         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2607         struct mlx5dv_dr_domain *ns;
2608
2609         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2610                 ns = sh->fdb_domain;
2611         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2612                 ns = sh->tx_domain;
2613         else
2614                 ns = sh->rx_domain;
2615         resource->flags =
2616                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2617         /* Lookup a matching resource from cache. */
2618         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2619                 if (resource->ft_type == cache_resource->ft_type &&
2620                     resource->actions_num == cache_resource->actions_num &&
2621                     resource->flags == cache_resource->flags &&
2622                     !memcmp((const void *)resource->actions,
2623                             (const void *)cache_resource->actions,
2624                             (resource->actions_num *
2625                                             sizeof(resource->actions[0])))) {
2626                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2627                                 (void *)cache_resource,
2628                                 rte_atomic32_read(&cache_resource->refcnt));
2629                         rte_atomic32_inc(&cache_resource->refcnt);
2630                         dev_flow->dv.modify_hdr = cache_resource;
2631                         return 0;
2632                 }
2633         }
2634         /* Register new modify-header resource. */
2635         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2636         if (!cache_resource)
2637                 return rte_flow_error_set(error, ENOMEM,
2638                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2639                                           "cannot allocate resource memory");
2640         *cache_resource = *resource;
2641         cache_resource->verbs_action =
2642                 mlx5_glue->dv_create_flow_action_modify_header
2643                                         (sh->ctx, cache_resource->ft_type,
2644                                          ns, cache_resource->flags,
2645                                          cache_resource->actions_num *
2646                                          sizeof(cache_resource->actions[0]),
2647                                          (uint64_t *)cache_resource->actions);
2648         if (!cache_resource->verbs_action) {
2649                 rte_free(cache_resource);
2650                 return rte_flow_error_set(error, ENOMEM,
2651                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2652                                           NULL, "cannot create action");
2653         }
2654         rte_atomic32_init(&cache_resource->refcnt);
2655         rte_atomic32_inc(&cache_resource->refcnt);
2656         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2657         dev_flow->dv.modify_hdr = cache_resource;
2658         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2659                 (void *)cache_resource,
2660                 rte_atomic32_read(&cache_resource->refcnt));
2661         return 0;
2662 }
2663
2664 #define MLX5_CNT_CONTAINER_RESIZE 64
2665
2666 /**
2667  * Get or create a flow counter.
2668  *
2669  * @param[in] dev
2670  *   Pointer to the Ethernet device structure.
2671  * @param[in] shared
2672  *   Indicate if this counter is shared with other flows.
2673  * @param[in] id
2674  *   Counter identifier.
2675  *
2676  * @return
2677  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2678  */
2679 static struct mlx5_flow_counter *
2680 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2681                                uint32_t id)
2682 {
2683         struct mlx5_priv *priv = dev->data->dev_private;
2684         struct mlx5_flow_counter *cnt = NULL;
2685         struct mlx5_devx_obj *dcs = NULL;
2686
2687         if (!priv->config.devx) {
2688                 rte_errno = ENOTSUP;
2689                 return NULL;
2690         }
2691         if (shared) {
2692                 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2693                         if (cnt->shared && cnt->id == id) {
2694                                 cnt->ref_cnt++;
2695                                 return cnt;
2696                         }
2697                 }
2698         }
2699         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2700         if (!dcs)
2701                 return NULL;
2702         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2703         if (!cnt) {
2704                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2705                 rte_errno = ENOMEM;
2706                 return NULL;
2707         }
2708         struct mlx5_flow_counter tmpl = {
2709                 .shared = shared,
2710                 .ref_cnt = 1,
2711                 .id = id,
2712                 .dcs = dcs,
2713         };
2714         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2715         if (!tmpl.action) {
2716                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2717                 rte_errno = errno;
2718                 rte_free(cnt);
2719                 return NULL;
2720         }
2721         *cnt = tmpl;
2722         TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2723         return cnt;
2724 }
2725
2726 /**
2727  * Release a flow counter.
2728  *
2729  * @param[in] dev
2730  *   Pointer to the Ethernet device structure.
2731  * @param[in] counter
2732  *   Pointer to the counter handler.
2733  */
2734 static void
2735 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2736                                  struct mlx5_flow_counter *counter)
2737 {
2738         struct mlx5_priv *priv = dev->data->dev_private;
2739
2740         if (!counter)
2741                 return;
2742         if (--counter->ref_cnt == 0) {
2743                 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2744                 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2745                 rte_free(counter);
2746         }
2747 }
2748
2749 /**
2750  * Query a devx flow counter.
2751  *
2752  * @param[in] dev
2753  *   Pointer to the Ethernet device structure.
2754  * @param[in] cnt
2755  *   Pointer to the flow counter.
2756  * @param[out] pkts
2757  *   The statistics value of packets.
2758  * @param[out] bytes
2759  *   The statistics value of bytes.
2760  *
2761  * @return
2762  *   0 on success, otherwise a negative errno value and rte_errno is set.
2763  */
2764 static inline int
2765 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2766                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2767                      uint64_t *bytes)
2768 {
2769         return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2770                                                 0, NULL, NULL, 0);
2771 }
2772
2773 /**
2774  * Get a pool by a counter.
2775  *
2776  * @param[in] cnt
2777  *   Pointer to the counter.
2778  *
2779  * @return
2780  *   The counter pool.
2781  */
2782 static struct mlx5_flow_counter_pool *
2783 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2784 {
2785         if (!cnt->batch) {
2786                 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2787                 return (struct mlx5_flow_counter_pool *)cnt - 1;
2788         }
2789         return cnt->pool;
2790 }
2791
2792 /**
2793  * Get a pool by devx counter ID.
2794  *
2795  * @param[in] cont
2796  *   Pointer to the counter container.
2797  * @param[in] id
2798  *   The counter devx ID.
2799  *
2800  * @return
2801  *   The counter pool pointer if exists, NULL otherwise,
2802  */
2803 static struct mlx5_flow_counter_pool *
2804 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2805 {
2806         struct mlx5_flow_counter_pool *pool;
2807
2808         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2809                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2810                                 MLX5_COUNTERS_PER_POOL;
2811
2812                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2813                         return pool;
2814         };
2815         return NULL;
2816 }
2817
2818 /**
2819  * Allocate a new memory for the counter values wrapped by all the needed
2820  * management.
2821  *
2822  * @param[in] dev
2823  *   Pointer to the Ethernet device structure.
2824  * @param[in] raws_n
2825  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2826  *
2827  * @return
2828  *   The new memory management pointer on success, otherwise NULL and rte_errno
2829  *   is set.
2830  */
2831 static struct mlx5_counter_stats_mem_mng *
2832 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2833 {
2834         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2835                                         (dev->data->dev_private))->sh;
2836         struct mlx5_devx_mkey_attr mkey_attr;
2837         struct mlx5_counter_stats_mem_mng *mem_mng;
2838         volatile struct flow_counter_stats *raw_data;
2839         int size = (sizeof(struct flow_counter_stats) *
2840                         MLX5_COUNTERS_PER_POOL +
2841                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2842                         sizeof(struct mlx5_counter_stats_mem_mng);
2843         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2844         int i;
2845
2846         if (!mem) {
2847                 rte_errno = ENOMEM;
2848                 return NULL;
2849         }
2850         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2851         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2852         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2853                                                  IBV_ACCESS_LOCAL_WRITE);
2854         if (!mem_mng->umem) {
2855                 rte_errno = errno;
2856                 rte_free(mem);
2857                 return NULL;
2858         }
2859         mkey_attr.addr = (uintptr_t)mem;
2860         mkey_attr.size = size;
2861         mkey_attr.umem_id = mem_mng->umem->umem_id;
2862         mkey_attr.pd = sh->pdn;
2863         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2864         if (!mem_mng->dm) {
2865                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
2866                 rte_errno = errno;
2867                 rte_free(mem);
2868                 return NULL;
2869         }
2870         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
2871         raw_data = (volatile struct flow_counter_stats *)mem;
2872         for (i = 0; i < raws_n; ++i) {
2873                 mem_mng->raws[i].mem_mng = mem_mng;
2874                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
2875         }
2876         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
2877         return mem_mng;
2878 }
2879
2880 /**
2881  * Resize a counter container.
2882  *
2883  * @param[in] dev
2884  *   Pointer to the Ethernet device structure.
2885  * @param[in] batch
2886  *   Whether the pool is for counter that was allocated by batch command.
2887  *
2888  * @return
2889  *   The new container pointer on success, otherwise NULL and rte_errno is set.
2890  */
2891 static struct mlx5_pools_container *
2892 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
2893 {
2894         struct mlx5_priv *priv = dev->data->dev_private;
2895         struct mlx5_pools_container *cont =
2896                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
2897         struct mlx5_pools_container *new_cont =
2898                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
2899         struct mlx5_counter_stats_mem_mng *mem_mng;
2900         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
2901         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
2902         int i;
2903
2904         if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
2905                 /* The last resize still hasn't detected by the host thread. */
2906                 rte_errno = EAGAIN;
2907                 return NULL;
2908         }
2909         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
2910         if (!new_cont->pools) {
2911                 rte_errno = ENOMEM;
2912                 return NULL;
2913         }
2914         if (cont->n)
2915                 memcpy(new_cont->pools, cont->pools, cont->n *
2916                        sizeof(struct mlx5_flow_counter_pool *));
2917         mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
2918                 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
2919         if (!mem_mng) {
2920                 rte_free(new_cont->pools);
2921                 return NULL;
2922         }
2923         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
2924                 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
2925                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
2926                                  i, next);
2927         new_cont->n = resize;
2928         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
2929         TAILQ_INIT(&new_cont->pool_list);
2930         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
2931         new_cont->init_mem_mng = mem_mng;
2932         rte_cio_wmb();
2933          /* Flip the master container. */
2934         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
2935         return new_cont;
2936 }
2937
2938 /**
2939  * Query a devx flow counter.
2940  *
2941  * @param[in] dev
2942  *   Pointer to the Ethernet device structure.
2943  * @param[in] cnt
2944  *   Pointer to the flow counter.
2945  * @param[out] pkts
2946  *   The statistics value of packets.
2947  * @param[out] bytes
2948  *   The statistics value of bytes.
2949  *
2950  * @return
2951  *   0 on success, otherwise a negative errno value and rte_errno is set.
2952  */
2953 static inline int
2954 _flow_dv_query_count(struct rte_eth_dev *dev,
2955                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2956                      uint64_t *bytes)
2957 {
2958         struct mlx5_priv *priv = dev->data->dev_private;
2959         struct mlx5_flow_counter_pool *pool =
2960                         flow_dv_counter_pool_get(cnt);
2961         int offset = cnt - &pool->counters_raw[0];
2962
2963         if (priv->counter_fallback)
2964                 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
2965
2966         rte_spinlock_lock(&pool->sl);
2967         /*
2968          * The single counters allocation may allocate smaller ID than the
2969          * current allocated in parallel to the host reading.
2970          * In this case the new counter values must be reported as 0.
2971          */
2972         if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
2973                 *pkts = 0;
2974                 *bytes = 0;
2975         } else {
2976                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
2977                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
2978         }
2979         rte_spinlock_unlock(&pool->sl);
2980         return 0;
2981 }
2982
2983 /**
2984  * Create and initialize a new counter pool.
2985  *
2986  * @param[in] dev
2987  *   Pointer to the Ethernet device structure.
2988  * @param[out] dcs
2989  *   The devX counter handle.
2990  * @param[in] batch
2991  *   Whether the pool is for counter that was allocated by batch command.
2992  *
2993  * @return
2994  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
2995  */
2996 static struct mlx5_flow_counter_pool *
2997 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
2998                     uint32_t batch)
2999 {
3000         struct mlx5_priv *priv = dev->data->dev_private;
3001         struct mlx5_flow_counter_pool *pool;
3002         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3003                                                                0);
3004         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
3005         uint32_t size;
3006
3007         if (cont->n == n_valid) {
3008                 cont = flow_dv_container_resize(dev, batch);
3009                 if (!cont)
3010                         return NULL;
3011         }
3012         size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
3013                         sizeof(struct mlx5_flow_counter);
3014         pool = rte_calloc(__func__, 1, size, 0);
3015         if (!pool) {
3016                 rte_errno = ENOMEM;
3017                 return NULL;
3018         }
3019         pool->min_dcs = dcs;
3020         pool->raw = cont->init_mem_mng->raws + n_valid %
3021                                                      MLX5_CNT_CONTAINER_RESIZE;
3022         pool->raw_hw = NULL;
3023         rte_spinlock_init(&pool->sl);
3024         /*
3025          * The generation of the new allocated counters in this pool is 0, 2 in
3026          * the pool generation makes all the counters valid for allocation.
3027          */
3028         rte_atomic64_set(&pool->query_gen, 0x2);
3029         TAILQ_INIT(&pool->counters);
3030         TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3031         cont->pools[n_valid] = pool;
3032         /* Pool initialization must be updated before host thread access. */
3033         rte_cio_wmb();
3034         rte_atomic16_add(&cont->n_valid, 1);
3035         return pool;
3036 }
3037
3038 /**
3039  * Prepare a new counter and/or a new counter pool.
3040  *
3041  * @param[in] dev
3042  *   Pointer to the Ethernet device structure.
3043  * @param[out] cnt_free
3044  *   Where to put the pointer of a new counter.
3045  * @param[in] batch
3046  *   Whether the pool is for counter that was allocated by batch command.
3047  *
3048  * @return
3049  *   The free counter pool pointer and @p cnt_free is set on success,
3050  *   NULL otherwise and rte_errno is set.
3051  */
3052 static struct mlx5_flow_counter_pool *
3053 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
3054                              struct mlx5_flow_counter **cnt_free,
3055                              uint32_t batch)
3056 {
3057         struct mlx5_priv *priv = dev->data->dev_private;
3058         struct mlx5_flow_counter_pool *pool;
3059         struct mlx5_devx_obj *dcs = NULL;
3060         struct mlx5_flow_counter *cnt;
3061         uint32_t i;
3062
3063         if (!batch) {
3064                 /* bulk_bitmap must be 0 for single counter allocation. */
3065                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
3066                 if (!dcs)
3067                         return NULL;
3068                 pool = flow_dv_find_pool_by_id
3069                         (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
3070                 if (!pool) {
3071                         pool = flow_dv_pool_create(dev, dcs, batch);
3072                         if (!pool) {
3073                                 mlx5_devx_cmd_destroy(dcs);
3074                                 return NULL;
3075                         }
3076                 } else if (dcs->id < pool->min_dcs->id) {
3077                         rte_atomic64_set(&pool->a64_dcs,
3078                                          (int64_t)(uintptr_t)dcs);
3079                 }
3080                 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
3081                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3082                 cnt->dcs = dcs;
3083                 *cnt_free = cnt;
3084                 return pool;
3085         }
3086         /* bulk_bitmap is in 128 counters units. */
3087         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
3088                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
3089         if (!dcs) {
3090                 rte_errno = ENODATA;
3091                 return NULL;
3092         }
3093         pool = flow_dv_pool_create(dev, dcs, batch);
3094         if (!pool) {
3095                 mlx5_devx_cmd_destroy(dcs);
3096                 return NULL;
3097         }
3098         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3099                 cnt = &pool->counters_raw[i];
3100                 cnt->pool = pool;
3101                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3102         }
3103         *cnt_free = &pool->counters_raw[0];
3104         return pool;
3105 }
3106
3107 /**
3108  * Search for existed shared counter.
3109  *
3110  * @param[in] cont
3111  *   Pointer to the relevant counter pool container.
3112  * @param[in] id
3113  *   The shared counter ID to search.
3114  *
3115  * @return
3116  *   NULL if not existed, otherwise pointer to the shared counter.
3117  */
3118 static struct mlx5_flow_counter *
3119 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
3120                               uint32_t id)
3121 {
3122         static struct mlx5_flow_counter *cnt;
3123         struct mlx5_flow_counter_pool *pool;
3124         int i;
3125
3126         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3127                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3128                         cnt = &pool->counters_raw[i];
3129                         if (cnt->ref_cnt && cnt->shared && cnt->id == id)
3130                                 return cnt;
3131                 }
3132         }
3133         return NULL;
3134 }
3135
3136 /**
3137  * Allocate a flow counter.
3138  *
3139  * @param[in] dev
3140  *   Pointer to the Ethernet device structure.
3141  * @param[in] shared
3142  *   Indicate if this counter is shared with other flows.
3143  * @param[in] id
3144  *   Counter identifier.
3145  * @param[in] group
3146  *   Counter flow group.
3147  *
3148  * @return
3149  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
3150  */
3151 static struct mlx5_flow_counter *
3152 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
3153                       uint16_t group)
3154 {
3155         struct mlx5_priv *priv = dev->data->dev_private;
3156         struct mlx5_flow_counter_pool *pool = NULL;
3157         struct mlx5_flow_counter *cnt_free = NULL;
3158         /*
3159          * Currently group 0 flow counter cannot be assigned to a flow if it is
3160          * not the first one in the batch counter allocation, so it is better
3161          * to allocate counters one by one for these flows in a separate
3162          * container.
3163          * A counter can be shared between different groups so need to take
3164          * shared counters from the single container.
3165          */
3166         uint32_t batch = (group && !shared) ? 1 : 0;
3167         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3168                                                                0);
3169
3170         if (priv->counter_fallback)
3171                 return flow_dv_counter_alloc_fallback(dev, shared, id);
3172         if (!priv->config.devx) {
3173                 rte_errno = ENOTSUP;
3174                 return NULL;
3175         }
3176         if (shared) {
3177                 cnt_free = flow_dv_counter_shared_search(cont, id);
3178                 if (cnt_free) {
3179                         if (cnt_free->ref_cnt + 1 == 0) {
3180                                 rte_errno = E2BIG;
3181                                 return NULL;
3182                         }
3183                         cnt_free->ref_cnt++;
3184                         return cnt_free;
3185                 }
3186         }
3187         /* Pools which has a free counters are in the start. */
3188         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3189                 /*
3190                  * The free counter reset values must be updated between the
3191                  * counter release to the counter allocation, so, at least one
3192                  * query must be done in this time. ensure it by saving the
3193                  * query generation in the release time.
3194                  * The free list is sorted according to the generation - so if
3195                  * the first one is not updated, all the others are not
3196                  * updated too.
3197                  */
3198                 cnt_free = TAILQ_FIRST(&pool->counters);
3199                 if (cnt_free && cnt_free->query_gen + 1 <
3200                     rte_atomic64_read(&pool->query_gen))
3201                         break;
3202                 cnt_free = NULL;
3203         }
3204         if (!cnt_free) {
3205                 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
3206                 if (!pool)
3207                         return NULL;
3208         }
3209         cnt_free->batch = batch;
3210         /* Create a DV counter action only in the first time usage. */
3211         if (!cnt_free->action) {
3212                 uint16_t offset;
3213                 struct mlx5_devx_obj *dcs;
3214
3215                 if (batch) {
3216                         offset = cnt_free - &pool->counters_raw[0];
3217                         dcs = pool->min_dcs;
3218                 } else {
3219                         offset = 0;
3220                         dcs = cnt_free->dcs;
3221                 }
3222                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
3223                                         (dcs->obj, offset);
3224                 if (!cnt_free->action) {
3225                         rte_errno = errno;
3226                         return NULL;
3227                 }
3228         }
3229         /* Update the counter reset values. */
3230         if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
3231                                  &cnt_free->bytes))
3232                 return NULL;
3233         cnt_free->shared = shared;
3234         cnt_free->ref_cnt = 1;
3235         cnt_free->id = id;
3236         if (!priv->sh->cmng.query_thread_on)
3237                 /* Start the asynchronous batch query by the host thread. */
3238                 mlx5_set_query_alarm(priv->sh);
3239         TAILQ_REMOVE(&pool->counters, cnt_free, next);
3240         if (TAILQ_EMPTY(&pool->counters)) {
3241                 /* Move the pool to the end of the container pool list. */
3242                 TAILQ_REMOVE(&cont->pool_list, pool, next);
3243                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3244         }
3245         return cnt_free;
3246 }
3247
3248 /**
3249  * Release a flow counter.
3250  *
3251  * @param[in] dev
3252  *   Pointer to the Ethernet device structure.
3253  * @param[in] counter
3254  *   Pointer to the counter handler.
3255  */
3256 static void
3257 flow_dv_counter_release(struct rte_eth_dev *dev,
3258                         struct mlx5_flow_counter *counter)
3259 {
3260         struct mlx5_priv *priv = dev->data->dev_private;
3261
3262         if (!counter)
3263                 return;
3264         if (priv->counter_fallback) {
3265                 flow_dv_counter_release_fallback(dev, counter);
3266                 return;
3267         }
3268         if (--counter->ref_cnt == 0) {
3269                 struct mlx5_flow_counter_pool *pool =
3270                                 flow_dv_counter_pool_get(counter);
3271
3272                 /* Put the counter in the end - the last updated one. */
3273                 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
3274                 counter->query_gen = rte_atomic64_read(&pool->query_gen);
3275         }
3276 }
3277
3278 /**
3279  * Verify the @p attributes will be correctly understood by the NIC and store
3280  * them in the @p flow if everything is correct.
3281  *
3282  * @param[in] dev
3283  *   Pointer to dev struct.
3284  * @param[in] attributes
3285  *   Pointer to flow attributes
3286  * @param[in] external
3287  *   This flow rule is created by request external to PMD.
3288  * @param[out] error
3289  *   Pointer to error structure.
3290  *
3291  * @return
3292  *   0 on success, a negative errno value otherwise and rte_errno is set.
3293  */
3294 static int
3295 flow_dv_validate_attributes(struct rte_eth_dev *dev,
3296                             const struct rte_flow_attr *attributes,
3297                             bool external __rte_unused,
3298                             struct rte_flow_error *error)
3299 {
3300         struct mlx5_priv *priv = dev->data->dev_private;
3301         uint32_t priority_max = priv->config.flow_prio - 1;
3302
3303 #ifndef HAVE_MLX5DV_DR
3304         if (attributes->group)
3305                 return rte_flow_error_set(error, ENOTSUP,
3306                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3307                                           NULL,
3308                                           "groups are not supported");
3309 #else
3310         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
3311                                                     MLX5_MAX_TABLES;
3312         uint32_t table;
3313         int ret;
3314
3315         ret = mlx5_flow_group_to_table(attributes, external,
3316                                        attributes->group,
3317                                        &table, error);
3318         if (ret)
3319                 return ret;
3320         if (table >= max_group)
3321                 return rte_flow_error_set(error, EINVAL,
3322                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
3323                                           "group index out of range");
3324 #endif
3325         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
3326             attributes->priority >= priority_max)
3327                 return rte_flow_error_set(error, ENOTSUP,
3328                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
3329                                           NULL,
3330                                           "priority out of range");
3331         if (attributes->transfer) {
3332                 if (!priv->config.dv_esw_en)
3333                         return rte_flow_error_set
3334                                 (error, ENOTSUP,
3335                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3336                                  "E-Switch dr is not supported");
3337                 if (!(priv->representor || priv->master))
3338                         return rte_flow_error_set
3339                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3340                                  NULL, "E-Switch configuration can only be"
3341                                  " done by a master or a representor device");
3342                 if (attributes->egress)
3343                         return rte_flow_error_set
3344                                 (error, ENOTSUP,
3345                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
3346                                  "egress is not supported");
3347         }
3348         if (!(attributes->egress ^ attributes->ingress))
3349                 return rte_flow_error_set(error, ENOTSUP,
3350                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
3351                                           "must specify exactly one of "
3352                                           "ingress or egress");
3353         return 0;
3354 }
3355
3356 /**
3357  * Internal validation function. For validating both actions and items.
3358  *
3359  * @param[in] dev
3360  *   Pointer to the rte_eth_dev structure.
3361  * @param[in] attr
3362  *   Pointer to the flow attributes.
3363  * @param[in] items
3364  *   Pointer to the list of items.
3365  * @param[in] actions
3366  *   Pointer to the list of actions.
3367  * @param[in] external
3368  *   This flow rule is created by request external to PMD.
3369  * @param[out] error
3370  *   Pointer to the error structure.
3371  *
3372  * @return
3373  *   0 on success, a negative errno value otherwise and rte_errno is set.
3374  */
3375 static int
3376 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3377                  const struct rte_flow_item items[],
3378                  const struct rte_flow_action actions[],
3379                  bool external, struct rte_flow_error *error)
3380 {
3381         int ret;
3382         uint64_t action_flags = 0;
3383         uint64_t item_flags = 0;
3384         uint64_t last_item = 0;
3385         uint8_t next_protocol = 0xff;
3386         int actions_n = 0;
3387         const struct rte_flow_item *gre_item = NULL;
3388         struct rte_flow_item_tcp nic_tcp_mask = {
3389                 .hdr = {
3390                         .tcp_flags = 0xFF,
3391                         .src_port = RTE_BE16(UINT16_MAX),
3392                         .dst_port = RTE_BE16(UINT16_MAX),
3393                 }
3394         };
3395
3396         if (items == NULL)
3397                 return -1;
3398         ret = flow_dv_validate_attributes(dev, attr, external, error);
3399         if (ret < 0)
3400                 return ret;
3401         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3402                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3403                 int type = items->type;
3404
3405                 switch (type) {
3406                 case RTE_FLOW_ITEM_TYPE_VOID:
3407                         break;
3408                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3409                         ret = flow_dv_validate_item_port_id
3410                                         (dev, items, attr, item_flags, error);
3411                         if (ret < 0)
3412                                 return ret;
3413                         last_item = MLX5_FLOW_ITEM_PORT_ID;
3414                         break;
3415                 case RTE_FLOW_ITEM_TYPE_ETH:
3416                         ret = mlx5_flow_validate_item_eth(items, item_flags,
3417                                                           error);
3418                         if (ret < 0)
3419                                 return ret;
3420                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3421                                              MLX5_FLOW_LAYER_OUTER_L2;
3422                         break;
3423                 case RTE_FLOW_ITEM_TYPE_VLAN:
3424                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
3425                                                            dev, error);
3426                         if (ret < 0)
3427                                 return ret;
3428                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
3429                                              MLX5_FLOW_LAYER_OUTER_VLAN;
3430                         break;
3431                 case RTE_FLOW_ITEM_TYPE_IPV4:
3432                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3433                                                   &item_flags, &tunnel);
3434                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
3435                                                            NULL, error);
3436                         if (ret < 0)
3437                                 return ret;
3438                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3439                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3440                         if (items->mask != NULL &&
3441                             ((const struct rte_flow_item_ipv4 *)
3442                              items->mask)->hdr.next_proto_id) {
3443                                 next_protocol =
3444                                         ((const struct rte_flow_item_ipv4 *)
3445                                          (items->spec))->hdr.next_proto_id;
3446                                 next_protocol &=
3447                                         ((const struct rte_flow_item_ipv4 *)
3448                                          (items->mask))->hdr.next_proto_id;
3449                         } else {
3450                                 /* Reset for inner layer. */
3451                                 next_protocol = 0xff;
3452                         }
3453                         break;
3454                 case RTE_FLOW_ITEM_TYPE_IPV6:
3455                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3456                                                   &item_flags, &tunnel);
3457                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
3458                                                            NULL, error);
3459                         if (ret < 0)
3460                                 return ret;
3461                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3462                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3463                         if (items->mask != NULL &&
3464                             ((const struct rte_flow_item_ipv6 *)
3465                              items->mask)->hdr.proto) {
3466                                 next_protocol =
3467                                         ((const struct rte_flow_item_ipv6 *)
3468                                          items->spec)->hdr.proto;
3469                                 next_protocol &=
3470                                         ((const struct rte_flow_item_ipv6 *)
3471                                          items->mask)->hdr.proto;
3472                         } else {
3473                                 /* Reset for inner layer. */
3474                                 next_protocol = 0xff;
3475                         }
3476                         break;
3477                 case RTE_FLOW_ITEM_TYPE_TCP:
3478                         ret = mlx5_flow_validate_item_tcp
3479                                                 (items, item_flags,
3480                                                  next_protocol,
3481                                                  &nic_tcp_mask,
3482                                                  error);
3483                         if (ret < 0)
3484                                 return ret;
3485                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3486                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3487                         break;
3488                 case RTE_FLOW_ITEM_TYPE_UDP:
3489                         ret = mlx5_flow_validate_item_udp(items, item_flags,
3490                                                           next_protocol,
3491                                                           error);
3492                         if (ret < 0)
3493                                 return ret;
3494                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3495                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3496                         break;
3497                 case RTE_FLOW_ITEM_TYPE_GRE:
3498                         ret = mlx5_flow_validate_item_gre(items, item_flags,
3499                                                           next_protocol, error);
3500                         if (ret < 0)
3501                                 return ret;
3502                         gre_item = items;
3503                         last_item = MLX5_FLOW_LAYER_GRE;
3504                         break;
3505                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3506                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
3507                                                             next_protocol,
3508                                                             error);
3509                         if (ret < 0)
3510                                 return ret;
3511                         last_item = MLX5_FLOW_LAYER_NVGRE;
3512                         break;
3513                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
3514                         ret = mlx5_flow_validate_item_gre_key
3515                                 (items, item_flags, gre_item, error);
3516                         if (ret < 0)
3517                                 return ret;
3518                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
3519                         break;
3520                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3521                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
3522                                                             error);
3523                         if (ret < 0)
3524                                 return ret;
3525                         last_item = MLX5_FLOW_LAYER_VXLAN;
3526                         break;
3527                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3528                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
3529                                                                 item_flags, dev,
3530                                                                 error);
3531                         if (ret < 0)
3532                                 return ret;
3533                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3534                         break;
3535                 case RTE_FLOW_ITEM_TYPE_GENEVE:
3536                         ret = mlx5_flow_validate_item_geneve(items,
3537                                                              item_flags, dev,
3538                                                              error);
3539                         if (ret < 0)
3540                                 return ret;
3541                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3542                         break;
3543                 case RTE_FLOW_ITEM_TYPE_MPLS:
3544                         ret = mlx5_flow_validate_item_mpls(dev, items,
3545                                                            item_flags,
3546                                                            last_item, error);
3547                         if (ret < 0)
3548                                 return ret;
3549                         last_item = MLX5_FLOW_LAYER_MPLS;
3550                         break;
3551                 case RTE_FLOW_ITEM_TYPE_META:
3552                         ret = flow_dv_validate_item_meta(dev, items, attr,
3553                                                          error);
3554                         if (ret < 0)
3555                                 return ret;
3556                         last_item = MLX5_FLOW_ITEM_METADATA;
3557                         break;
3558                 case RTE_FLOW_ITEM_TYPE_ICMP:
3559                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
3560                                                            next_protocol,
3561                                                            error);
3562                         if (ret < 0)
3563                                 return ret;
3564                         last_item = MLX5_FLOW_LAYER_ICMP;
3565                         break;
3566                 case RTE_FLOW_ITEM_TYPE_ICMP6:
3567                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3568                                                             next_protocol,
3569                                                             error);
3570                         if (ret < 0)
3571                                 return ret;
3572                         last_item = MLX5_FLOW_LAYER_ICMP6;
3573                         break;
3574                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
3575                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
3576                         break;
3577                 default:
3578                         return rte_flow_error_set(error, ENOTSUP,
3579                                                   RTE_FLOW_ERROR_TYPE_ITEM,
3580                                                   NULL, "item not supported");
3581                 }
3582                 item_flags |= last_item;
3583         }
3584         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3585                 int type = actions->type;
3586                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3587                         return rte_flow_error_set(error, ENOTSUP,
3588                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3589                                                   actions, "too many actions");
3590                 switch (type) {
3591                 case RTE_FLOW_ACTION_TYPE_VOID:
3592                         break;
3593                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3594                         ret = flow_dv_validate_action_port_id(dev,
3595                                                               action_flags,
3596                                                               actions,
3597                                                               attr,
3598                                                               error);
3599                         if (ret)
3600                                 return ret;
3601                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3602                         ++actions_n;
3603                         break;
3604                 case RTE_FLOW_ACTION_TYPE_FLAG:
3605                         ret = mlx5_flow_validate_action_flag(action_flags,
3606                                                              attr, error);
3607                         if (ret < 0)
3608                                 return ret;
3609                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3610                         ++actions_n;
3611                         break;
3612                 case RTE_FLOW_ACTION_TYPE_MARK:
3613                         ret = mlx5_flow_validate_action_mark(actions,
3614                                                              action_flags,
3615                                                              attr, error);
3616                         if (ret < 0)
3617                                 return ret;
3618                         action_flags |= MLX5_FLOW_ACTION_MARK;
3619                         ++actions_n;
3620                         break;
3621                 case RTE_FLOW_ACTION_TYPE_DROP:
3622                         ret = mlx5_flow_validate_action_drop(action_flags,
3623                                                              attr, error);
3624                         if (ret < 0)
3625                                 return ret;
3626                         action_flags |= MLX5_FLOW_ACTION_DROP;
3627                         ++actions_n;
3628                         break;
3629                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3630                         ret = mlx5_flow_validate_action_queue(actions,
3631                                                               action_flags, dev,
3632                                                               attr, error);
3633                         if (ret < 0)
3634                                 return ret;
3635                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3636                         ++actions_n;
3637                         break;
3638                 case RTE_FLOW_ACTION_TYPE_RSS:
3639                         ret = mlx5_flow_validate_action_rss(actions,
3640                                                             action_flags, dev,
3641                                                             attr, item_flags,
3642                                                             error);
3643                         if (ret < 0)
3644                                 return ret;
3645                         action_flags |= MLX5_FLOW_ACTION_RSS;
3646                         ++actions_n;
3647                         break;
3648                 case RTE_FLOW_ACTION_TYPE_COUNT:
3649                         ret = flow_dv_validate_action_count(dev, error);
3650                         if (ret < 0)
3651                                 return ret;
3652                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3653                         ++actions_n;
3654                         break;
3655                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3656                         if (flow_dv_validate_action_pop_vlan(dev,
3657                                                              action_flags,
3658                                                              actions,
3659                                                              item_flags, attr,
3660                                                              error))
3661                                 return -rte_errno;
3662                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
3663                         ++actions_n;
3664                         break;
3665                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3666                         ret = flow_dv_validate_action_push_vlan(action_flags,
3667                                                                 item_flags,
3668                                                                 actions, attr,
3669                                                                 error);
3670                         if (ret < 0)
3671                                 return ret;
3672                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
3673                         ++actions_n;
3674                         break;
3675                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3676                         ret = flow_dv_validate_action_set_vlan_pcp
3677                                                 (action_flags, actions, error);
3678                         if (ret < 0)
3679                                 return ret;
3680                         /* Count PCP with push_vlan command. */
3681                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
3682                         break;
3683                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3684                         ret = flow_dv_validate_action_set_vlan_vid
3685                                                 (item_flags, action_flags,
3686                                                  actions, error);
3687                         if (ret < 0)
3688                                 return ret;
3689                         /* Count VID with push_vlan command. */
3690                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
3691                         break;
3692                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3693                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3694                         ret = flow_dv_validate_action_l2_encap(action_flags,
3695                                                                actions, attr,
3696                                                                error);
3697                         if (ret < 0)
3698                                 return ret;
3699                         action_flags |= actions->type ==
3700                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3701                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3702                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3703                         ++actions_n;
3704                         break;
3705                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3706                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3707                         ret = flow_dv_validate_action_l2_decap(action_flags,
3708                                                                attr, error);
3709                         if (ret < 0)
3710                                 return ret;
3711                         action_flags |= actions->type ==
3712                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3713                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3714                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3715                         ++actions_n;
3716                         break;
3717                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3718                         ret = flow_dv_validate_action_raw_encap(action_flags,
3719                                                                 actions, attr,
3720                                                                 error);
3721                         if (ret < 0)
3722                                 return ret;
3723                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3724                         ++actions_n;
3725                         break;
3726                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3727                         ret = flow_dv_validate_action_raw_decap(action_flags,
3728                                                                 actions, attr,
3729                                                                 error);
3730                         if (ret < 0)
3731                                 return ret;
3732                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3733                         ++actions_n;
3734                         break;
3735                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3736                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3737                         ret = flow_dv_validate_action_modify_mac(action_flags,
3738                                                                  actions,
3739                                                                  item_flags,
3740                                                                  error);
3741                         if (ret < 0)
3742                                 return ret;
3743                         /* Count all modify-header actions as one action. */
3744                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3745                                 ++actions_n;
3746                         action_flags |= actions->type ==
3747                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3748                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
3749                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
3750                         break;
3751
3752                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3753                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3754                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
3755                                                                   actions,
3756                                                                   item_flags,
3757                                                                   error);
3758                         if (ret < 0)
3759                                 return ret;
3760                         /* Count all modify-header actions as one action. */
3761                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3762                                 ++actions_n;
3763                         action_flags |= actions->type ==
3764                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3765                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3766                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
3767                         break;
3768                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3769                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3770                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
3771                                                                   actions,
3772                                                                   item_flags,
3773                                                                   error);
3774                         if (ret < 0)
3775                                 return ret;
3776                         /* Count all modify-header actions as one action. */
3777                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3778                                 ++actions_n;
3779                         action_flags |= actions->type ==
3780                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3781                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3782                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
3783                         break;
3784                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3785                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3786                         ret = flow_dv_validate_action_modify_tp(action_flags,
3787                                                                 actions,
3788                                                                 item_flags,
3789                                                                 error);
3790                         if (ret < 0)
3791                                 return ret;
3792                         /* Count all modify-header actions as one action. */
3793                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3794                                 ++actions_n;
3795                         action_flags |= actions->type ==
3796                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3797                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
3798                                                 MLX5_FLOW_ACTION_SET_TP_DST;
3799                         break;
3800                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3801                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3802                         ret = flow_dv_validate_action_modify_ttl(action_flags,
3803                                                                  actions,
3804                                                                  item_flags,
3805                                                                  error);
3806                         if (ret < 0)
3807                                 return ret;
3808                         /* Count all modify-header actions as one action. */
3809                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3810                                 ++actions_n;
3811                         action_flags |= actions->type ==
3812                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
3813                                                 MLX5_FLOW_ACTION_SET_TTL :
3814                                                 MLX5_FLOW_ACTION_DEC_TTL;
3815                         break;
3816                 case RTE_FLOW_ACTION_TYPE_JUMP:
3817                         ret = flow_dv_validate_action_jump(actions,
3818                                                            action_flags,
3819                                                            attr, external,
3820                                                            error);
3821                         if (ret)
3822                                 return ret;
3823                         ++actions_n;
3824                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3825                         break;
3826                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3827                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3828                         ret = flow_dv_validate_action_modify_tcp_seq
3829                                                                 (action_flags,
3830                                                                  actions,
3831                                                                  item_flags,
3832                                                                  error);
3833                         if (ret < 0)
3834                                 return ret;
3835                         /* Count all modify-header actions as one action. */
3836                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3837                                 ++actions_n;
3838                         action_flags |= actions->type ==
3839                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
3840                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
3841                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
3842                         break;
3843                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
3844                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
3845                         ret = flow_dv_validate_action_modify_tcp_ack
3846                                                                 (action_flags,
3847                                                                  actions,
3848                                                                  item_flags,
3849                                                                  error);
3850                         if (ret < 0)
3851                                 return ret;
3852                         /* Count all modify-header actions as one action. */
3853                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3854                                 ++actions_n;
3855                         action_flags |= actions->type ==
3856                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
3857                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
3858                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
3859                         break;
3860                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
3861                         break;
3862                 default:
3863                         return rte_flow_error_set(error, ENOTSUP,
3864                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3865                                                   actions,
3866                                                   "action not supported");
3867                 }
3868         }
3869         if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
3870             (action_flags & MLX5_FLOW_VLAN_ACTIONS))
3871                 return rte_flow_error_set(error, ENOTSUP,
3872                                           RTE_FLOW_ERROR_TYPE_ACTION,
3873                                           actions,
3874                                           "can't have vxlan and vlan"
3875                                           " actions in the same rule");
3876         /* Eswitch has few restrictions on using items and actions */
3877         if (attr->transfer) {
3878                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3879                         return rte_flow_error_set(error, ENOTSUP,
3880                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3881                                                   NULL,
3882                                                   "unsupported action FLAG");
3883                 if (action_flags & MLX5_FLOW_ACTION_MARK)
3884                         return rte_flow_error_set(error, ENOTSUP,
3885                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3886                                                   NULL,
3887                                                   "unsupported action MARK");
3888                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
3889                         return rte_flow_error_set(error, ENOTSUP,
3890                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3891                                                   NULL,
3892                                                   "unsupported action QUEUE");
3893                 if (action_flags & MLX5_FLOW_ACTION_RSS)
3894                         return rte_flow_error_set(error, ENOTSUP,
3895                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3896                                                   NULL,
3897                                                   "unsupported action RSS");
3898                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3899                         return rte_flow_error_set(error, EINVAL,
3900                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3901                                                   actions,
3902                                                   "no fate action is found");
3903         } else {
3904                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
3905                         return rte_flow_error_set(error, EINVAL,
3906                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3907                                                   actions,
3908                                                   "no fate action is found");
3909         }
3910         return 0;
3911 }
3912
3913 /**
3914  * Internal preparation function. Allocates the DV flow size,
3915  * this size is constant.
3916  *
3917  * @param[in] attr
3918  *   Pointer to the flow attributes.
3919  * @param[in] items
3920  *   Pointer to the list of items.
3921  * @param[in] actions
3922  *   Pointer to the list of actions.
3923  * @param[out] error
3924  *   Pointer to the error structure.
3925  *
3926  * @return
3927  *   Pointer to mlx5_flow object on success,
3928  *   otherwise NULL and rte_errno is set.
3929  */
3930 static struct mlx5_flow *
3931 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
3932                 const struct rte_flow_item items[] __rte_unused,
3933                 const struct rte_flow_action actions[] __rte_unused,
3934                 struct rte_flow_error *error)
3935 {
3936         uint32_t size = sizeof(struct mlx5_flow);
3937         struct mlx5_flow *flow;
3938
3939         flow = rte_calloc(__func__, 1, size, 0);
3940         if (!flow) {
3941                 rte_flow_error_set(error, ENOMEM,
3942                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3943                                    "not enough memory to create flow");
3944                 return NULL;
3945         }
3946         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
3947         return flow;
3948 }
3949
3950 #ifndef NDEBUG
3951 /**
3952  * Sanity check for match mask and value. Similar to check_valid_spec() in
3953  * kernel driver. If unmasked bit is present in value, it returns failure.
3954  *
3955  * @param match_mask
3956  *   pointer to match mask buffer.
3957  * @param match_value
3958  *   pointer to match value buffer.
3959  *
3960  * @return
3961  *   0 if valid, -EINVAL otherwise.
3962  */
3963 static int
3964 flow_dv_check_valid_spec(void *match_mask, void *match_value)
3965 {
3966         uint8_t *m = match_mask;
3967         uint8_t *v = match_value;
3968         unsigned int i;
3969
3970         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
3971                 if (v[i] & ~m[i]) {
3972                         DRV_LOG(ERR,
3973                                 "match_value differs from match_criteria"
3974                                 " %p[%u] != %p[%u]",
3975                                 match_value, i, match_mask, i);
3976                         return -EINVAL;
3977                 }
3978         }
3979         return 0;
3980 }
3981 #endif
3982
3983 /**
3984  * Add Ethernet item to matcher and to the value.
3985  *
3986  * @param[in, out] matcher
3987  *   Flow matcher.
3988  * @param[in, out] key
3989  *   Flow matcher value.
3990  * @param[in] item
3991  *   Flow pattern to translate.
3992  * @param[in] inner
3993  *   Item is inner pattern.
3994  */
3995 static void
3996 flow_dv_translate_item_eth(void *matcher, void *key,
3997                            const struct rte_flow_item *item, int inner)
3998 {
3999         const struct rte_flow_item_eth *eth_m = item->mask;
4000         const struct rte_flow_item_eth *eth_v = item->spec;
4001         const struct rte_flow_item_eth nic_mask = {
4002                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4003                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
4004                 .type = RTE_BE16(0xffff),
4005         };
4006         void *headers_m;
4007         void *headers_v;
4008         char *l24_v;
4009         unsigned int i;
4010
4011         if (!eth_v)
4012                 return;
4013         if (!eth_m)
4014                 eth_m = &nic_mask;
4015         if (inner) {
4016                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4017                                          inner_headers);
4018                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4019         } else {
4020                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4021                                          outer_headers);
4022                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4023         }
4024         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
4025                &eth_m->dst, sizeof(eth_m->dst));
4026         /* The value must be in the range of the mask. */
4027         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
4028         for (i = 0; i < sizeof(eth_m->dst); ++i)
4029                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
4030         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
4031                &eth_m->src, sizeof(eth_m->src));
4032         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
4033         /* The value must be in the range of the mask. */
4034         for (i = 0; i < sizeof(eth_m->dst); ++i)
4035                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
4036         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4037                  rte_be_to_cpu_16(eth_m->type));
4038         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
4039         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
4040 }
4041
4042 /**
4043  * Add VLAN item to matcher and to the value.
4044  *
4045  * @param[in, out] dev_flow
4046  *   Flow descriptor.
4047  * @param[in, out] matcher
4048  *   Flow matcher.
4049  * @param[in, out] key
4050  *   Flow matcher value.
4051  * @param[in] item
4052  *   Flow pattern to translate.
4053  * @param[in] inner
4054  *   Item is inner pattern.
4055  */
4056 static void
4057 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
4058                             void *matcher, void *key,
4059                             const struct rte_flow_item *item,
4060                             int inner)
4061 {
4062         const struct rte_flow_item_vlan *vlan_m = item->mask;
4063         const struct rte_flow_item_vlan *vlan_v = item->spec;
4064         void *headers_m;
4065         void *headers_v;
4066         uint16_t tci_m;
4067         uint16_t tci_v;
4068
4069         if (!vlan_v)
4070                 return;
4071         if (!vlan_m)
4072                 vlan_m = &rte_flow_item_vlan_mask;
4073         if (inner) {
4074                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4075                                          inner_headers);
4076                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4077         } else {
4078                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4079                                          outer_headers);
4080                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4081                 /*
4082                  * This is workaround, masks are not supported,
4083                  * and pre-validated.
4084                  */
4085                 dev_flow->dv.vf_vlan.tag =
4086                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
4087         }
4088         tci_m = rte_be_to_cpu_16(vlan_m->tci);
4089         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
4090         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
4091         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
4092         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
4093         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
4094         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
4095         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
4096         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
4097         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
4098         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
4099                  rte_be_to_cpu_16(vlan_m->inner_type));
4100         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
4101                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
4102 }
4103
4104 /**
4105  * Add IPV4 item to matcher and to the value.
4106  *
4107  * @param[in, out] matcher
4108  *   Flow matcher.
4109  * @param[in, out] key
4110  *   Flow matcher value.
4111  * @param[in] item
4112  *   Flow pattern to translate.
4113  * @param[in] inner
4114  *   Item is inner pattern.
4115  * @param[in] group
4116  *   The group to insert the rule.
4117  */
4118 static void
4119 flow_dv_translate_item_ipv4(void *matcher, void *key,
4120                             const struct rte_flow_item *item,
4121                             int inner, uint32_t group)
4122 {
4123         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
4124         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
4125         const struct rte_flow_item_ipv4 nic_mask = {
4126                 .hdr = {
4127                         .src_addr = RTE_BE32(0xffffffff),
4128                         .dst_addr = RTE_BE32(0xffffffff),
4129                         .type_of_service = 0xff,
4130                         .next_proto_id = 0xff,
4131                 },
4132         };
4133         void *headers_m;
4134         void *headers_v;
4135         char *l24_m;
4136         char *l24_v;
4137         uint8_t tos;
4138
4139         if (inner) {
4140                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4141                                          inner_headers);
4142                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4143         } else {
4144                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4145                                          outer_headers);
4146                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4147         }
4148         if (group == 0)
4149                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4150         else
4151                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
4152         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
4153         if (!ipv4_v)
4154                 return;
4155         if (!ipv4_m)
4156                 ipv4_m = &nic_mask;
4157         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4158                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4159         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4160                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4161         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
4162         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
4163         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4164                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4165         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4166                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4167         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
4168         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
4169         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
4170         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
4171                  ipv4_m->hdr.type_of_service);
4172         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
4173         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
4174                  ipv4_m->hdr.type_of_service >> 2);
4175         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
4176         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4177                  ipv4_m->hdr.next_proto_id);
4178         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4179                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
4180 }
4181
4182 /**
4183  * Add IPV6 item to matcher and to the value.
4184  *
4185  * @param[in, out] matcher
4186  *   Flow matcher.
4187  * @param[in, out] key
4188  *   Flow matcher value.
4189  * @param[in] item
4190  *   Flow pattern to translate.
4191  * @param[in] inner
4192  *   Item is inner pattern.
4193  * @param[in] group
4194  *   The group to insert the rule.
4195  */
4196 static void
4197 flow_dv_translate_item_ipv6(void *matcher, void *key,
4198                             const struct rte_flow_item *item,
4199                             int inner, uint32_t group)
4200 {
4201         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
4202         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
4203         const struct rte_flow_item_ipv6 nic_mask = {
4204                 .hdr = {
4205                         .src_addr =
4206                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4207                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4208                         .dst_addr =
4209                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4210                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4211                         .vtc_flow = RTE_BE32(0xffffffff),
4212                         .proto = 0xff,
4213                         .hop_limits = 0xff,
4214                 },
4215         };
4216         void *headers_m;
4217         void *headers_v;
4218         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4219         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4220         char *l24_m;
4221         char *l24_v;
4222         uint32_t vtc_m;
4223         uint32_t vtc_v;
4224         int i;
4225         int size;
4226
4227         if (inner) {
4228                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4229                                          inner_headers);
4230                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4231         } else {
4232                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4233                                          outer_headers);
4234                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4235         }
4236         if (group == 0)
4237                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4238         else
4239                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
4240         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
4241         if (!ipv6_v)
4242                 return;
4243         if (!ipv6_m)
4244                 ipv6_m = &nic_mask;
4245         size = sizeof(ipv6_m->hdr.dst_addr);
4246         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4247                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4248         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4249                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4250         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
4251         for (i = 0; i < size; ++i)
4252                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
4253         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4254                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4255         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4256                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4257         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
4258         for (i = 0; i < size; ++i)
4259                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
4260         /* TOS. */
4261         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
4262         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
4263         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
4264         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
4265         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
4266         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
4267         /* Label. */
4268         if (inner) {
4269                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
4270                          vtc_m);
4271                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
4272                          vtc_v);
4273         } else {
4274                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
4275                          vtc_m);
4276                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
4277                          vtc_v);
4278         }
4279         /* Protocol. */
4280         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4281                  ipv6_m->hdr.proto);
4282         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4283                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
4284 }
4285
4286 /**
4287  * Add TCP item to matcher and to the value.
4288  *
4289  * @param[in, out] matcher
4290  *   Flow matcher.
4291  * @param[in, out] key
4292  *   Flow matcher value.
4293  * @param[in] item
4294  *   Flow pattern to translate.
4295  * @param[in] inner
4296  *   Item is inner pattern.
4297  */
4298 static void
4299 flow_dv_translate_item_tcp(void *matcher, void *key,
4300                            const struct rte_flow_item *item,
4301                            int inner)
4302 {
4303         const struct rte_flow_item_tcp *tcp_m = item->mask;
4304         const struct rte_flow_item_tcp *tcp_v = item->spec;
4305         void *headers_m;
4306         void *headers_v;
4307
4308         if (inner) {
4309                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4310                                          inner_headers);
4311                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4312         } else {
4313                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4314                                          outer_headers);
4315                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4316         }
4317         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4318         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
4319         if (!tcp_v)
4320                 return;
4321         if (!tcp_m)
4322                 tcp_m = &rte_flow_item_tcp_mask;
4323         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
4324                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
4325         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
4326                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
4327         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
4328                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
4329         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
4330                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
4331         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
4332                  tcp_m->hdr.tcp_flags);
4333         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
4334                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
4335 }
4336
4337 /**
4338  * Add UDP item to matcher and to the value.
4339  *
4340  * @param[in, out] matcher
4341  *   Flow matcher.
4342  * @param[in, out] key
4343  *   Flow matcher value.
4344  * @param[in] item
4345  *   Flow pattern to translate.
4346  * @param[in] inner
4347  *   Item is inner pattern.
4348  */
4349 static void
4350 flow_dv_translate_item_udp(void *matcher, void *key,
4351                            const struct rte_flow_item *item,
4352                            int inner)
4353 {
4354         const struct rte_flow_item_udp *udp_m = item->mask;
4355         const struct rte_flow_item_udp *udp_v = item->spec;
4356         void *headers_m;
4357         void *headers_v;
4358
4359         if (inner) {
4360                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4361                                          inner_headers);
4362                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4363         } else {
4364                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4365                                          outer_headers);
4366                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4367         }
4368         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4369         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
4370         if (!udp_v)
4371                 return;
4372         if (!udp_m)
4373                 udp_m = &rte_flow_item_udp_mask;
4374         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
4375                  rte_be_to_cpu_16(udp_m->hdr.src_port));
4376         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
4377                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
4378         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
4379                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
4380         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4381                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
4382 }
4383
4384 /**
4385  * Add GRE optional Key item to matcher and to the value.
4386  *
4387  * @param[in, out] matcher
4388  *   Flow matcher.
4389  * @param[in, out] key
4390  *   Flow matcher value.
4391  * @param[in] item
4392  *   Flow pattern to translate.
4393  * @param[in] inner
4394  *   Item is inner pattern.
4395  */
4396 static void
4397 flow_dv_translate_item_gre_key(void *matcher, void *key,
4398                                    const struct rte_flow_item *item)
4399 {
4400         const rte_be32_t *key_m = item->mask;
4401         const rte_be32_t *key_v = item->spec;
4402         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4403         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4404         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
4405
4406         if (!key_v)
4407                 return;
4408         if (!key_m)
4409                 key_m = &gre_key_default_mask;
4410         /* GRE K bit must be on and should already be validated */
4411         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
4412         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
4413         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
4414                  rte_be_to_cpu_32(*key_m) >> 8);
4415         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
4416                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
4417         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
4418                  rte_be_to_cpu_32(*key_m) & 0xFF);
4419         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
4420                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
4421 }
4422
4423 /**
4424  * Add GRE item to matcher and to the value.
4425  *
4426  * @param[in, out] matcher
4427  *   Flow matcher.
4428  * @param[in, out] key
4429  *   Flow matcher value.
4430  * @param[in] item
4431  *   Flow pattern to translate.
4432  * @param[in] inner
4433  *   Item is inner pattern.
4434  */
4435 static void
4436 flow_dv_translate_item_gre(void *matcher, void *key,
4437                            const struct rte_flow_item *item,
4438                            int inner)
4439 {
4440         const struct rte_flow_item_gre *gre_m = item->mask;
4441         const struct rte_flow_item_gre *gre_v = item->spec;
4442         void *headers_m;
4443         void *headers_v;
4444         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4445         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4446         struct {
4447                 union {
4448                         __extension__
4449                         struct {
4450                                 uint16_t version:3;
4451                                 uint16_t rsvd0:9;
4452                                 uint16_t s_present:1;
4453                                 uint16_t k_present:1;
4454                                 uint16_t rsvd_bit1:1;
4455                                 uint16_t c_present:1;
4456                         };
4457                         uint16_t value;
4458                 };
4459         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
4460
4461         if (inner) {
4462                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4463                                          inner_headers);
4464                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4465         } else {
4466                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4467                                          outer_headers);
4468                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4469         }
4470         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4471         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
4472         if (!gre_v)
4473                 return;
4474         if (!gre_m)
4475                 gre_m = &rte_flow_item_gre_mask;
4476         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
4477                  rte_be_to_cpu_16(gre_m->protocol));
4478         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4479                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
4480         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
4481         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
4482         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
4483                  gre_crks_rsvd0_ver_m.c_present);
4484         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
4485                  gre_crks_rsvd0_ver_v.c_present &
4486                  gre_crks_rsvd0_ver_m.c_present);
4487         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
4488                  gre_crks_rsvd0_ver_m.k_present);
4489         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
4490                  gre_crks_rsvd0_ver_v.k_present &
4491                  gre_crks_rsvd0_ver_m.k_present);
4492         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
4493                  gre_crks_rsvd0_ver_m.s_present);
4494         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
4495                  gre_crks_rsvd0_ver_v.s_present &
4496                  gre_crks_rsvd0_ver_m.s_present);
4497 }
4498
4499 /**
4500  * Add NVGRE item to matcher and to the value.
4501  *
4502  * @param[in, out] matcher
4503  *   Flow matcher.
4504  * @param[in, out] key
4505  *   Flow matcher value.
4506  * @param[in] item
4507  *   Flow pattern to translate.
4508  * @param[in] inner
4509  *   Item is inner pattern.
4510  */
4511 static void
4512 flow_dv_translate_item_nvgre(void *matcher, void *key,
4513                              const struct rte_flow_item *item,
4514                              int inner)
4515 {
4516         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
4517         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
4518         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4519         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4520         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
4521         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
4522         char *gre_key_m;
4523         char *gre_key_v;
4524         int size;
4525         int i;
4526
4527         /* For NVGRE, GRE header fields must be set with defined values. */
4528         const struct rte_flow_item_gre gre_spec = {
4529                 .c_rsvd0_ver = RTE_BE16(0x2000),
4530                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
4531         };
4532         const struct rte_flow_item_gre gre_mask = {
4533                 .c_rsvd0_ver = RTE_BE16(0xB000),
4534                 .protocol = RTE_BE16(UINT16_MAX),
4535         };
4536         const struct rte_flow_item gre_item = {
4537                 .spec = &gre_spec,
4538                 .mask = &gre_mask,
4539                 .last = NULL,
4540         };
4541         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
4542         if (!nvgre_v)
4543                 return;
4544         if (!nvgre_m)
4545                 nvgre_m = &rte_flow_item_nvgre_mask;
4546         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
4547         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
4548         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
4549         memcpy(gre_key_m, tni_flow_id_m, size);
4550         for (i = 0; i < size; ++i)
4551                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
4552 }
4553
4554 /**
4555  * Add VXLAN item to matcher and to the value.
4556  *
4557  * @param[in, out] matcher
4558  *   Flow matcher.
4559  * @param[in, out] key
4560  *   Flow matcher value.
4561  * @param[in] item
4562  *   Flow pattern to translate.
4563  * @param[in] inner
4564  *   Item is inner pattern.
4565  */
4566 static void
4567 flow_dv_translate_item_vxlan(void *matcher, void *key,
4568                              const struct rte_flow_item *item,
4569                              int inner)
4570 {
4571         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
4572         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
4573         void *headers_m;
4574         void *headers_v;
4575         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4576         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4577         char *vni_m;
4578         char *vni_v;
4579         uint16_t dport;
4580         int size;
4581         int i;
4582
4583         if (inner) {
4584                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4585                                          inner_headers);
4586                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4587         } else {
4588                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4589                                          outer_headers);
4590                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4591         }
4592         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
4593                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
4594         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4595                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4596                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4597         }
4598         if (!vxlan_v)
4599                 return;
4600         if (!vxlan_m)
4601                 vxlan_m = &rte_flow_item_vxlan_mask;
4602         size = sizeof(vxlan_m->vni);
4603         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
4604         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
4605         memcpy(vni_m, vxlan_m->vni, size);
4606         for (i = 0; i < size; ++i)
4607                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
4608 }
4609
4610 /**
4611  * Add Geneve item to matcher and to the value.
4612  *
4613  * @param[in, out] matcher
4614  *   Flow matcher.
4615  * @param[in, out] key
4616  *   Flow matcher value.
4617  * @param[in] item
4618  *   Flow pattern to translate.
4619  * @param[in] inner
4620  *   Item is inner pattern.
4621  */
4622
4623 static void
4624 flow_dv_translate_item_geneve(void *matcher, void *key,
4625                               const struct rte_flow_item *item, int inner)
4626 {
4627         const struct rte_flow_item_geneve *geneve_m = item->mask;
4628         const struct rte_flow_item_geneve *geneve_v = item->spec;
4629         void *headers_m;
4630         void *headers_v;
4631         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4632         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4633         uint16_t dport;
4634         uint16_t gbhdr_m;
4635         uint16_t gbhdr_v;
4636         char *vni_m;
4637         char *vni_v;
4638         size_t size, i;
4639
4640         if (inner) {
4641                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4642                                          inner_headers);
4643                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4644         } else {
4645                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4646                                          outer_headers);
4647                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4648         }
4649         dport = MLX5_UDP_PORT_GENEVE;
4650         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4651                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4652                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4653         }
4654         if (!geneve_v)
4655                 return;
4656         if (!geneve_m)
4657                 geneve_m = &rte_flow_item_geneve_mask;
4658         size = sizeof(geneve_m->vni);
4659         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
4660         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
4661         memcpy(vni_m, geneve_m->vni, size);
4662         for (i = 0; i < size; ++i)
4663                 vni_v[i] = vni_m[i] & geneve_v->vni[i];
4664         MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
4665                  rte_be_to_cpu_16(geneve_m->protocol));
4666         MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
4667                  rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
4668         gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
4669         gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
4670         MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
4671                  MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4672         MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
4673                  MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
4674         MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
4675                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4676         MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
4677                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
4678                  MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
4679 }
4680
4681 /**
4682  * Add MPLS item to matcher and to the value.
4683  *
4684  * @param[in, out] matcher
4685  *   Flow matcher.
4686  * @param[in, out] key
4687  *   Flow matcher value.
4688  * @param[in] item
4689  *   Flow pattern to translate.
4690  * @param[in] prev_layer
4691  *   The protocol layer indicated in previous item.
4692  * @param[in] inner
4693  *   Item is inner pattern.
4694  */
4695 static void
4696 flow_dv_translate_item_mpls(void *matcher, void *key,
4697                             const struct rte_flow_item *item,
4698                             uint64_t prev_layer,
4699                             int inner)
4700 {
4701         const uint32_t *in_mpls_m = item->mask;
4702         const uint32_t *in_mpls_v = item->spec;
4703         uint32_t *out_mpls_m = 0;
4704         uint32_t *out_mpls_v = 0;
4705         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4706         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4707         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4708                                      misc_parameters_2);
4709         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4710         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4711         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4712
4713         switch (prev_layer) {
4714         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4715                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4716                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4717                          MLX5_UDP_PORT_MPLS);
4718                 break;
4719         case MLX5_FLOW_LAYER_GRE:
4720                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4721                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4722                          RTE_ETHER_TYPE_MPLS);
4723                 break;
4724         default:
4725                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4726                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4727                          IPPROTO_MPLS);
4728                 break;
4729         }
4730         if (!in_mpls_v)
4731                 return;
4732         if (!in_mpls_m)
4733                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4734         switch (prev_layer) {
4735         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4736                 out_mpls_m =
4737                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4738                                                  outer_first_mpls_over_udp);
4739                 out_mpls_v =
4740                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4741                                                  outer_first_mpls_over_udp);
4742                 break;
4743         case MLX5_FLOW_LAYER_GRE:
4744                 out_mpls_m =
4745                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4746                                                  outer_first_mpls_over_gre);
4747                 out_mpls_v =
4748                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4749                                                  outer_first_mpls_over_gre);
4750                 break;
4751         default:
4752                 /* Inner MPLS not over GRE is not supported. */
4753                 if (!inner) {
4754                         out_mpls_m =
4755                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4756                                                          misc2_m,
4757                                                          outer_first_mpls);
4758                         out_mpls_v =
4759                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4760                                                          misc2_v,
4761                                                          outer_first_mpls);
4762                 }
4763                 break;
4764         }
4765         if (out_mpls_m && out_mpls_v) {
4766                 *out_mpls_m = *in_mpls_m;
4767                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4768         }
4769 }
4770
4771 /**
4772  * Add META item to matcher
4773  *
4774  * @param[in, out] matcher
4775  *   Flow matcher.
4776  * @param[in, out] key
4777  *   Flow matcher value.
4778  * @param[in] item
4779  *   Flow pattern to translate.
4780  * @param[in] inner
4781  *   Item is inner pattern.
4782  */
4783 static void
4784 flow_dv_translate_item_meta(void *matcher, void *key,
4785                             const struct rte_flow_item *item)
4786 {
4787         const struct rte_flow_item_meta *meta_m;
4788         const struct rte_flow_item_meta *meta_v;
4789         void *misc2_m =
4790                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4791         void *misc2_v =
4792                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4793
4794         meta_m = (const void *)item->mask;
4795         if (!meta_m)
4796                 meta_m = &rte_flow_item_meta_mask;
4797         meta_v = (const void *)item->spec;
4798         if (meta_v) {
4799                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
4800                          rte_be_to_cpu_32(meta_m->data));
4801                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
4802                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
4803         }
4804 }
4805
4806 /**
4807  * Add vport metadata Reg C0 item to matcher
4808  *
4809  * @param[in, out] matcher
4810  *   Flow matcher.
4811  * @param[in, out] key
4812  *   Flow matcher value.
4813  * @param[in] reg
4814  *   Flow pattern to translate.
4815  */
4816 static void
4817 flow_dv_translate_item_meta_vport(void *matcher, void *key,
4818                                   uint32_t value, uint32_t mask)
4819 {
4820         void *misc2_m =
4821                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4822         void *misc2_v =
4823                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4824
4825         MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, mask);
4826         MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, value);
4827 }
4828
4829 /**
4830  * Add tag item to matcher
4831  *
4832  * @param[in, out] matcher
4833  *   Flow matcher.
4834  * @param[in, out] key
4835  *   Flow matcher value.
4836  * @param[in] item
4837  *   Flow pattern to translate.
4838  */
4839 static void
4840 flow_dv_translate_item_tag(void *matcher, void *key,
4841                            const struct rte_flow_item *item)
4842 {
4843         void *misc2_m =
4844                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4845         void *misc2_v =
4846                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4847         const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
4848         const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
4849         enum modify_reg reg = tag_v->id;
4850         rte_be32_t value = tag_v->data;
4851         rte_be32_t mask = tag_m->data;
4852
4853         switch (reg) {
4854         case REG_A:
4855                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
4856                                 rte_be_to_cpu_32(mask));
4857                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
4858                                 rte_be_to_cpu_32(value));
4859                 break;
4860         case REG_B:
4861                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b,
4862                                  rte_be_to_cpu_32(mask));
4863                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b,
4864                                 rte_be_to_cpu_32(value));
4865                 break;
4866         case REG_C_0:
4867                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0,
4868                                  rte_be_to_cpu_32(mask));
4869                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0,
4870                                 rte_be_to_cpu_32(value));
4871                 break;
4872         case REG_C_1:
4873                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1,
4874                                  rte_be_to_cpu_32(mask));
4875                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1,
4876                                 rte_be_to_cpu_32(value));
4877                 break;
4878         case REG_C_2:
4879                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2,
4880                                  rte_be_to_cpu_32(mask));
4881                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2,
4882                                 rte_be_to_cpu_32(value));
4883                 break;
4884         case REG_C_3:
4885                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3,
4886                                  rte_be_to_cpu_32(mask));
4887                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3,
4888                                 rte_be_to_cpu_32(value));
4889                 break;
4890         case REG_C_4:
4891                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4,
4892                                  rte_be_to_cpu_32(mask));
4893                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4,
4894                                 rte_be_to_cpu_32(value));
4895                 break;
4896         case REG_C_5:
4897                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5,
4898                                  rte_be_to_cpu_32(mask));
4899                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5,
4900                                 rte_be_to_cpu_32(value));
4901                 break;
4902         case REG_C_6:
4903                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6,
4904                                  rte_be_to_cpu_32(mask));
4905                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6,
4906                                 rte_be_to_cpu_32(value));
4907                 break;
4908         case REG_C_7:
4909                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7,
4910                                  rte_be_to_cpu_32(mask));
4911                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7,
4912                                 rte_be_to_cpu_32(value));
4913                 break;
4914         }
4915 }
4916
4917 /**
4918  * Add source vport match to the specified matcher.
4919  *
4920  * @param[in, out] matcher
4921  *   Flow matcher.
4922  * @param[in, out] key
4923  *   Flow matcher value.
4924  * @param[in] port
4925  *   Source vport value to match
4926  * @param[in] mask
4927  *   Mask
4928  */
4929 static void
4930 flow_dv_translate_item_source_vport(void *matcher, void *key,
4931                                     int16_t port, uint16_t mask)
4932 {
4933         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4934         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4935
4936         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
4937         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
4938 }
4939
4940 /**
4941  * Translate port-id item to eswitch match on  port-id.
4942  *
4943  * @param[in] dev
4944  *   The devich to configure through.
4945  * @param[in, out] matcher
4946  *   Flow matcher.
4947  * @param[in, out] key
4948  *   Flow matcher value.
4949  * @param[in] item
4950  *   Flow pattern to translate.
4951  *
4952  * @return
4953  *   0 on success, a negative errno value otherwise.
4954  */
4955 static int
4956 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
4957                                void *key, const struct rte_flow_item *item)
4958 {
4959         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
4960         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
4961         struct mlx5_priv *priv;
4962         uint16_t mask, id;
4963
4964         mask = pid_m ? pid_m->id : 0xffff;
4965         id = pid_v ? pid_v->id : dev->data->port_id;
4966         priv = mlx5_port_to_eswitch_info(id);
4967         if (!priv)
4968                 return -rte_errno;
4969         /* Translate to vport field or to metadata, depending on mode. */
4970         if (priv->vport_meta_mask)
4971                 flow_dv_translate_item_meta_vport(matcher, key,
4972                                                   priv->vport_meta_tag,
4973                                                   priv->vport_meta_mask);
4974         else
4975                 flow_dv_translate_item_source_vport(matcher, key,
4976                                                     priv->vport_id, mask);
4977         return 0;
4978 }
4979
4980 /**
4981  * Add ICMP6 item to matcher and to the value.
4982  *
4983  * @param[in, out] matcher
4984  *   Flow matcher.
4985  * @param[in, out] key
4986  *   Flow matcher value.
4987  * @param[in] item
4988  *   Flow pattern to translate.
4989  * @param[in] inner
4990  *   Item is inner pattern.
4991  */
4992 static void
4993 flow_dv_translate_item_icmp6(void *matcher, void *key,
4994                               const struct rte_flow_item *item,
4995                               int inner)
4996 {
4997         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
4998         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
4999         void *headers_m;
5000         void *headers_v;
5001         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5002                                      misc_parameters_3);
5003         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5004         if (inner) {
5005                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5006                                          inner_headers);
5007                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5008         } else {
5009                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5010                                          outer_headers);
5011                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5012         }
5013         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5014         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
5015         if (!icmp6_v)
5016                 return;
5017         if (!icmp6_m)
5018                 icmp6_m = &rte_flow_item_icmp6_mask;
5019         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
5020         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
5021                  icmp6_v->type & icmp6_m->type);
5022         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
5023         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
5024                  icmp6_v->code & icmp6_m->code);
5025 }
5026
5027 /**
5028  * Add ICMP item to matcher and to the value.
5029  *
5030  * @param[in, out] matcher
5031  *   Flow matcher.
5032  * @param[in, out] key
5033  *   Flow matcher value.
5034  * @param[in] item
5035  *   Flow pattern to translate.
5036  * @param[in] inner
5037  *   Item is inner pattern.
5038  */
5039 static void
5040 flow_dv_translate_item_icmp(void *matcher, void *key,
5041                             const struct rte_flow_item *item,
5042                             int inner)
5043 {
5044         const struct rte_flow_item_icmp *icmp_m = item->mask;
5045         const struct rte_flow_item_icmp *icmp_v = item->spec;
5046         void *headers_m;
5047         void *headers_v;
5048         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
5049                                      misc_parameters_3);
5050         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
5051         if (inner) {
5052                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5053                                          inner_headers);
5054                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
5055         } else {
5056                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
5057                                          outer_headers);
5058                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
5059         }
5060         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
5061         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
5062         if (!icmp_v)
5063                 return;
5064         if (!icmp_m)
5065                 icmp_m = &rte_flow_item_icmp_mask;
5066         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
5067                  icmp_m->hdr.icmp_type);
5068         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
5069                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
5070         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
5071                  icmp_m->hdr.icmp_code);
5072         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
5073                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
5074 }
5075
5076 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
5077
5078 #define HEADER_IS_ZERO(match_criteria, headers)                              \
5079         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
5080                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
5081
5082 /**
5083  * Calculate flow matcher enable bitmap.
5084  *
5085  * @param match_criteria
5086  *   Pointer to flow matcher criteria.
5087  *
5088  * @return
5089  *   Bitmap of enabled fields.
5090  */
5091 static uint8_t
5092 flow_dv_matcher_enable(uint32_t *match_criteria)
5093 {
5094         uint8_t match_criteria_enable;
5095
5096         match_criteria_enable =
5097                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
5098                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
5099         match_criteria_enable |=
5100                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
5101                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
5102         match_criteria_enable |=
5103                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
5104                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
5105         match_criteria_enable |=
5106                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
5107                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
5108         match_criteria_enable |=
5109                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
5110                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
5111         return match_criteria_enable;
5112 }
5113
5114
5115 /**
5116  * Get a flow table.
5117  *
5118  * @param dev[in, out]
5119  *   Pointer to rte_eth_dev structure.
5120  * @param[in] table_id
5121  *   Table id to use.
5122  * @param[in] egress
5123  *   Direction of the table.
5124  * @param[in] transfer
5125  *   E-Switch or NIC flow.
5126  * @param[out] error
5127  *   pointer to error structure.
5128  *
5129  * @return
5130  *   Returns tables resource based on the index, NULL in case of failed.
5131  */
5132 static struct mlx5_flow_tbl_resource *
5133 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
5134                          uint32_t table_id, uint8_t egress,
5135                          uint8_t transfer,
5136                          struct rte_flow_error *error)
5137 {
5138         struct mlx5_priv *priv = dev->data->dev_private;
5139         struct mlx5_ibv_shared *sh = priv->sh;
5140         struct mlx5_flow_tbl_resource *tbl;
5141
5142 #ifdef HAVE_MLX5DV_DR
5143         if (transfer) {
5144                 tbl = &sh->fdb_tbl[table_id];
5145                 if (!tbl->obj)
5146                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5147                                 (sh->fdb_domain, table_id);
5148         } else if (egress) {
5149                 tbl = &sh->tx_tbl[table_id];
5150                 if (!tbl->obj)
5151                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5152                                 (sh->tx_domain, table_id);
5153         } else {
5154                 tbl = &sh->rx_tbl[table_id];
5155                 if (!tbl->obj)
5156                         tbl->obj = mlx5_glue->dr_create_flow_tbl
5157                                 (sh->rx_domain, table_id);
5158         }
5159         if (!tbl->obj) {
5160                 rte_flow_error_set(error, ENOMEM,
5161                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5162                                    NULL, "cannot create table");
5163                 return NULL;
5164         }
5165         rte_atomic32_inc(&tbl->refcnt);
5166         return tbl;
5167 #else
5168         (void)error;
5169         (void)tbl;
5170         if (transfer)
5171                 return &sh->fdb_tbl[table_id];
5172         else if (egress)
5173                 return &sh->tx_tbl[table_id];
5174         else
5175                 return &sh->rx_tbl[table_id];
5176 #endif
5177 }
5178
5179 /**
5180  * Release a flow table.
5181  *
5182  * @param[in] tbl
5183  *   Table resource to be released.
5184  *
5185  * @return
5186  *   Returns 0 if table was released, else return 1;
5187  */
5188 static int
5189 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
5190 {
5191         if (!tbl)
5192                 return 0;
5193         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
5194                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
5195                 tbl->obj = NULL;
5196                 return 0;
5197         }
5198         return 1;
5199 }
5200
5201 /**
5202  * Register the flow matcher.
5203  *
5204  * @param dev[in, out]
5205  *   Pointer to rte_eth_dev structure.
5206  * @param[in, out] matcher
5207  *   Pointer to flow matcher.
5208  * @parm[in, out] dev_flow
5209  *   Pointer to the dev_flow.
5210  * @param[out] error
5211  *   pointer to error structure.
5212  *
5213  * @return
5214  *   0 on success otherwise -errno and errno is set.
5215  */
5216 static int
5217 flow_dv_matcher_register(struct rte_eth_dev *dev,
5218                          struct mlx5_flow_dv_matcher *matcher,
5219                          struct mlx5_flow *dev_flow,
5220                          struct rte_flow_error *error)
5221 {
5222         struct mlx5_priv *priv = dev->data->dev_private;
5223         struct mlx5_ibv_shared *sh = priv->sh;
5224         struct mlx5_flow_dv_matcher *cache_matcher;
5225         struct mlx5dv_flow_matcher_attr dv_attr = {
5226                 .type = IBV_FLOW_ATTR_NORMAL,
5227                 .match_mask = (void *)&matcher->mask,
5228         };
5229         struct mlx5_flow_tbl_resource *tbl = NULL;
5230
5231         /* Lookup from cache. */
5232         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
5233                 if (matcher->crc == cache_matcher->crc &&
5234                     matcher->priority == cache_matcher->priority &&
5235                     matcher->egress == cache_matcher->egress &&
5236                     matcher->group == cache_matcher->group &&
5237                     matcher->transfer == cache_matcher->transfer &&
5238                     !memcmp((const void *)matcher->mask.buf,
5239                             (const void *)cache_matcher->mask.buf,
5240                             cache_matcher->mask.size)) {
5241                         DRV_LOG(DEBUG,
5242                                 "priority %hd use %s matcher %p: refcnt %d++",
5243                                 cache_matcher->priority,
5244                                 cache_matcher->egress ? "tx" : "rx",
5245                                 (void *)cache_matcher,
5246                                 rte_atomic32_read(&cache_matcher->refcnt));
5247                         rte_atomic32_inc(&cache_matcher->refcnt);
5248                         dev_flow->dv.matcher = cache_matcher;
5249                         return 0;
5250                 }
5251         }
5252         /* Register new matcher. */
5253         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
5254         if (!cache_matcher)
5255                 return rte_flow_error_set(error, ENOMEM,
5256                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5257                                           "cannot allocate matcher memory");
5258         tbl = flow_dv_tbl_resource_get(dev, matcher->group,
5259                                        matcher->egress, matcher->transfer,
5260                                        error);
5261         if (!tbl) {
5262                 rte_free(cache_matcher);
5263                 return rte_flow_error_set(error, ENOMEM,
5264                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5265                                           NULL, "cannot create table");
5266         }
5267         *cache_matcher = *matcher;
5268         dv_attr.match_criteria_enable =
5269                 flow_dv_matcher_enable(cache_matcher->mask.buf);
5270         dv_attr.priority = matcher->priority;
5271         if (matcher->egress)
5272                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
5273         cache_matcher->matcher_object =
5274                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
5275         if (!cache_matcher->matcher_object) {
5276                 rte_free(cache_matcher);
5277 #ifdef HAVE_MLX5DV_DR
5278                 flow_dv_tbl_resource_release(tbl);
5279 #endif
5280                 return rte_flow_error_set(error, ENOMEM,
5281                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5282                                           NULL, "cannot create matcher");
5283         }
5284         rte_atomic32_inc(&cache_matcher->refcnt);
5285         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
5286         dev_flow->dv.matcher = cache_matcher;
5287         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
5288                 cache_matcher->priority,
5289                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
5290                 rte_atomic32_read(&cache_matcher->refcnt));
5291         rte_atomic32_inc(&tbl->refcnt);
5292         return 0;
5293 }
5294
5295 /**
5296  * Find existing tag resource or create and register a new one.
5297  *
5298  * @param dev[in, out]
5299  *   Pointer to rte_eth_dev structure.
5300  * @param[in, out] resource
5301  *   Pointer to tag resource.
5302  * @parm[in, out] dev_flow
5303  *   Pointer to the dev_flow.
5304  * @param[out] error
5305  *   pointer to error structure.
5306  *
5307  * @return
5308  *   0 on success otherwise -errno and errno is set.
5309  */
5310 static int
5311 flow_dv_tag_resource_register
5312                         (struct rte_eth_dev *dev,
5313                          struct mlx5_flow_dv_tag_resource *resource,
5314                          struct mlx5_flow *dev_flow,
5315                          struct rte_flow_error *error)
5316 {
5317         struct mlx5_priv *priv = dev->data->dev_private;
5318         struct mlx5_ibv_shared *sh = priv->sh;
5319         struct mlx5_flow_dv_tag_resource *cache_resource;
5320
5321         /* Lookup a matching resource from cache. */
5322         LIST_FOREACH(cache_resource, &sh->tags, next) {
5323                 if (resource->tag == cache_resource->tag) {
5324                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
5325                                 (void *)cache_resource,
5326                                 rte_atomic32_read(&cache_resource->refcnt));
5327                         rte_atomic32_inc(&cache_resource->refcnt);
5328                         dev_flow->flow->tag_resource = cache_resource;
5329                         return 0;
5330                 }
5331         }
5332         /* Register new  resource. */
5333         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
5334         if (!cache_resource)
5335                 return rte_flow_error_set(error, ENOMEM,
5336                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5337                                           "cannot allocate resource memory");
5338         *cache_resource = *resource;
5339         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
5340                 (resource->tag);
5341         if (!cache_resource->action) {
5342                 rte_free(cache_resource);
5343                 return rte_flow_error_set(error, ENOMEM,
5344                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5345                                           NULL, "cannot create action");
5346         }
5347         rte_atomic32_init(&cache_resource->refcnt);
5348         rte_atomic32_inc(&cache_resource->refcnt);
5349         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
5350         dev_flow->flow->tag_resource = cache_resource;
5351         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
5352                 (void *)cache_resource,
5353                 rte_atomic32_read(&cache_resource->refcnt));
5354         return 0;
5355 }
5356
5357 /**
5358  * Release the tag.
5359  *
5360  * @param dev
5361  *   Pointer to Ethernet device.
5362  * @param flow
5363  *   Pointer to mlx5_flow.
5364  *
5365  * @return
5366  *   1 while a reference on it exists, 0 when freed.
5367  */
5368 static int
5369 flow_dv_tag_release(struct rte_eth_dev *dev,
5370                     struct mlx5_flow_dv_tag_resource *tag)
5371 {
5372         assert(tag);
5373         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
5374                 dev->data->port_id, (void *)tag,
5375                 rte_atomic32_read(&tag->refcnt));
5376         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
5377                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
5378                 LIST_REMOVE(tag, next);
5379                 DRV_LOG(DEBUG, "port %u tag %p: removed",
5380                         dev->data->port_id, (void *)tag);
5381                 rte_free(tag);
5382                 return 0;
5383         }
5384         return 1;
5385 }
5386
5387 /**
5388  * Translate port ID action to vport.
5389  *
5390  * @param[in] dev
5391  *   Pointer to rte_eth_dev structure.
5392  * @param[in] action
5393  *   Pointer to the port ID action.
5394  * @param[out] dst_port_id
5395  *   The target port ID.
5396  * @param[out] error
5397  *   Pointer to the error structure.
5398  *
5399  * @return
5400  *   0 on success, a negative errno value otherwise and rte_errno is set.
5401  */
5402 static int
5403 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
5404                                  const struct rte_flow_action *action,
5405                                  uint32_t *dst_port_id,
5406                                  struct rte_flow_error *error)
5407 {
5408         uint32_t port;
5409         struct mlx5_priv *priv;
5410         const struct rte_flow_action_port_id *conf =
5411                         (const struct rte_flow_action_port_id *)action->conf;
5412
5413         port = conf->original ? dev->data->port_id : conf->id;
5414         priv = mlx5_port_to_eswitch_info(port);
5415         if (!priv)
5416                 return rte_flow_error_set(error, -rte_errno,
5417                                           RTE_FLOW_ERROR_TYPE_ACTION,
5418                                           NULL,
5419                                           "No eswitch info was found for port");
5420         if (priv->vport_meta_mask)
5421                 *dst_port_id = priv->vport_meta_tag;
5422         else
5423                 *dst_port_id = priv->vport_id;
5424         return 0;
5425 }
5426
5427 /**
5428  * Add Tx queue matcher
5429  *
5430  * @param[in] dev
5431  *   Pointer to the dev struct.
5432  * @param[in, out] matcher
5433  *   Flow matcher.
5434  * @param[in, out] key
5435  *   Flow matcher value.
5436  * @param[in] item
5437  *   Flow pattern to translate.
5438  * @param[in] inner
5439  *   Item is inner pattern.
5440  */
5441 static void
5442 flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
5443                                 void *matcher, void *key,
5444                                 const struct rte_flow_item *item)
5445 {
5446         const struct mlx5_rte_flow_item_tx_queue *queue_m;
5447         const struct mlx5_rte_flow_item_tx_queue *queue_v;
5448         void *misc_m =
5449                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
5450         void *misc_v =
5451                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
5452         struct mlx5_txq_ctrl *txq;
5453         uint32_t queue;
5454
5455
5456         queue_m = (const void *)item->mask;
5457         if (!queue_m)
5458                 return;
5459         queue_v = (const void *)item->spec;
5460         if (!queue_v)
5461                 return;
5462         txq = mlx5_txq_get(dev, queue_v->queue);
5463         if (!txq)
5464                 return;
5465         queue = txq->obj->sq->id;
5466         MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
5467         MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
5468                  queue & queue_m->queue);
5469         mlx5_txq_release(dev, queue_v->queue);
5470 }
5471
5472 /**
5473  * Fill the flow with DV spec.
5474  *
5475  * @param[in] dev
5476  *   Pointer to rte_eth_dev structure.
5477  * @param[in, out] dev_flow
5478  *   Pointer to the sub flow.
5479  * @param[in] attr
5480  *   Pointer to the flow attributes.
5481  * @param[in] items
5482  *   Pointer to the list of items.
5483  * @param[in] actions
5484  *   Pointer to the list of actions.
5485  * @param[out] error
5486  *   Pointer to the error structure.
5487  *
5488  * @return
5489  *   0 on success, a negative errno value otherwise and rte_errno is set.
5490  */
5491 static int
5492 flow_dv_translate(struct rte_eth_dev *dev,
5493                   struct mlx5_flow *dev_flow,
5494                   const struct rte_flow_attr *attr,
5495                   const struct rte_flow_item items[],
5496                   const struct rte_flow_action actions[],
5497                   struct rte_flow_error *error)
5498 {
5499         struct mlx5_priv *priv = dev->data->dev_private;
5500         struct rte_flow *flow = dev_flow->flow;
5501         uint64_t item_flags = 0;
5502         uint64_t last_item = 0;
5503         uint64_t action_flags = 0;
5504         uint64_t priority = attr->priority;
5505         struct mlx5_flow_dv_matcher matcher = {
5506                 .mask = {
5507                         .size = sizeof(matcher.mask.buf),
5508                 },
5509         };
5510         int actions_n = 0;
5511         bool actions_end = false;
5512         struct mlx5_flow_dv_modify_hdr_resource res = {
5513                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
5514                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
5515         };
5516         union flow_dv_attr flow_attr = { .attr = 0 };
5517         struct mlx5_flow_dv_tag_resource tag_resource;
5518         uint32_t modify_action_position = UINT32_MAX;
5519         void *match_mask = matcher.mask.buf;
5520         void *match_value = dev_flow->dv.value.buf;
5521         uint8_t next_protocol = 0xff;
5522         struct rte_vlan_hdr vlan = { 0 };
5523         uint32_t table;
5524         int ret = 0;
5525
5526         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
5527                                        &table, error);
5528         if (ret)
5529                 return ret;
5530         flow->group = table;
5531         if (attr->transfer)
5532                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
5533         if (priority == MLX5_FLOW_PRIO_RSVD)
5534                 priority = priv->config.flow_prio - 1;
5535         for (; !actions_end ; actions++) {
5536                 const struct rte_flow_action_queue *queue;
5537                 const struct rte_flow_action_rss *rss;
5538                 const struct rte_flow_action *action = actions;
5539                 const struct rte_flow_action_count *count = action->conf;
5540                 const uint8_t *rss_key;
5541                 const struct rte_flow_action_jump *jump_data;
5542                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
5543                 struct mlx5_flow_tbl_resource *tbl;
5544                 uint32_t port_id = 0;
5545                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
5546                 int action_type = actions->type;
5547                 const struct rte_flow_action *found_action = NULL;
5548
5549                 switch (action_type) {
5550                 case RTE_FLOW_ACTION_TYPE_VOID:
5551                         break;
5552                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5553                         if (flow_dv_translate_action_port_id(dev, action,
5554                                                              &port_id, error))
5555                                 return -rte_errno;
5556                         port_id_resource.port_id = port_id;
5557                         if (flow_dv_port_id_action_resource_register
5558                             (dev, &port_id_resource, dev_flow, error))
5559                                 return -rte_errno;
5560                         dev_flow->dv.actions[actions_n++] =
5561                                 dev_flow->dv.port_id_action->action;
5562                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5563                         break;
5564                 case RTE_FLOW_ACTION_TYPE_FLAG:
5565                         tag_resource.tag =
5566                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
5567                         if (!flow->tag_resource)
5568                                 if (flow_dv_tag_resource_register
5569                                     (dev, &tag_resource, dev_flow, error))
5570                                         return errno;
5571                         dev_flow->dv.actions[actions_n++] =
5572                                 flow->tag_resource->action;
5573                         action_flags |= MLX5_FLOW_ACTION_FLAG;
5574                         break;
5575                 case RTE_FLOW_ACTION_TYPE_MARK:
5576                         tag_resource.tag = mlx5_flow_mark_set
5577                               (((const struct rte_flow_action_mark *)
5578                                (actions->conf))->id);
5579                         if (!flow->tag_resource)
5580                                 if (flow_dv_tag_resource_register
5581                                     (dev, &tag_resource, dev_flow, error))
5582                                         return errno;
5583                         dev_flow->dv.actions[actions_n++] =
5584                                 flow->tag_resource->action;
5585                         action_flags |= MLX5_FLOW_ACTION_MARK;
5586                         break;
5587                 case RTE_FLOW_ACTION_TYPE_DROP:
5588                         action_flags |= MLX5_FLOW_ACTION_DROP;
5589                         break;
5590                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5591                         queue = actions->conf;
5592                         flow->rss.queue_num = 1;
5593                         (*flow->queue)[0] = queue->index;
5594                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5595                         break;
5596                 case RTE_FLOW_ACTION_TYPE_RSS:
5597                         rss = actions->conf;
5598                         if (flow->queue)
5599                                 memcpy((*flow->queue), rss->queue,
5600                                        rss->queue_num * sizeof(uint16_t));
5601                         flow->rss.queue_num = rss->queue_num;
5602                         /* NULL RSS key indicates default RSS key. */
5603                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
5604                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
5605                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
5606                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
5607                         flow->rss.level = rss->level;
5608                         action_flags |= MLX5_FLOW_ACTION_RSS;
5609                         break;
5610                 case RTE_FLOW_ACTION_TYPE_COUNT:
5611                         if (!priv->config.devx) {
5612                                 rte_errno = ENOTSUP;
5613                                 goto cnt_err;
5614                         }
5615                         flow->counter = flow_dv_counter_alloc(dev,
5616                                                               count->shared,
5617                                                               count->id,
5618                                                               flow->group);
5619                         if (flow->counter == NULL)
5620                                 goto cnt_err;
5621                         dev_flow->dv.actions[actions_n++] =
5622                                 flow->counter->action;
5623                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5624                         break;
5625 cnt_err:
5626                         if (rte_errno == ENOTSUP)
5627                                 return rte_flow_error_set
5628                                               (error, ENOTSUP,
5629                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5630                                                NULL,
5631                                                "count action not supported");
5632                         else
5633                                 return rte_flow_error_set
5634                                                 (error, rte_errno,
5635                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5636                                                  action,
5637                                                  "cannot create counter"
5638                                                   " object.");
5639                         break;
5640                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5641                         dev_flow->dv.actions[actions_n++] =
5642                                                 priv->sh->pop_vlan_action;
5643                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5644                         break;
5645                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5646                         flow_dev_get_vlan_info_from_items(items, &vlan);
5647                         vlan.eth_proto = rte_be_to_cpu_16
5648                              ((((const struct rte_flow_action_of_push_vlan *)
5649                                                    actions->conf)->ethertype));
5650                         found_action = mlx5_flow_find_action
5651                                         (actions + 1,
5652                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
5653                         if (found_action)
5654                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5655                         found_action = mlx5_flow_find_action
5656                                         (actions + 1,
5657                                          RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
5658                         if (found_action)
5659                                 mlx5_update_vlan_vid_pcp(found_action, &vlan);
5660                         if (flow_dv_create_action_push_vlan
5661                                             (dev, attr, &vlan, dev_flow, error))
5662                                 return -rte_errno;
5663                         dev_flow->dv.actions[actions_n++] =
5664                                            dev_flow->dv.push_vlan_res->action;
5665                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5666                         break;
5667                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5668                         /* of_vlan_push action handled this action */
5669                         assert(action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN);
5670                         break;
5671                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5672                         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
5673                                 break;
5674                         flow_dev_get_vlan_info_from_items(items, &vlan);
5675                         mlx5_update_vlan_vid_pcp(actions, &vlan);
5676                         /* If no VLAN push - this is a modify header action */
5677                         if (flow_dv_convert_action_modify_vlan_vid
5678                                                         (&res, actions, error))
5679                                 return -rte_errno;
5680                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5681                         break;
5682                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5683                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5684                         if (flow_dv_create_action_l2_encap(dev, actions,
5685                                                            dev_flow,
5686                                                            attr->transfer,
5687                                                            error))
5688                                 return -rte_errno;
5689                         dev_flow->dv.actions[actions_n++] =
5690                                 dev_flow->dv.encap_decap->verbs_action;
5691                         action_flags |= actions->type ==
5692                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
5693                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
5694                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
5695                         break;
5696                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5697                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5698                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
5699                                                            attr->transfer,
5700                                                            error))
5701                                 return -rte_errno;
5702                         dev_flow->dv.actions[actions_n++] =
5703                                 dev_flow->dv.encap_decap->verbs_action;
5704                         action_flags |= actions->type ==
5705                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
5706                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
5707                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
5708                         break;
5709                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5710                         /* Handle encap with preceding decap. */
5711                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
5712                                 if (flow_dv_create_action_raw_encap
5713                                         (dev, actions, dev_flow, attr, error))
5714                                         return -rte_errno;
5715                                 dev_flow->dv.actions[actions_n++] =
5716                                         dev_flow->dv.encap_decap->verbs_action;
5717                         } else {
5718                                 /* Handle encap without preceding decap. */
5719                                 if (flow_dv_create_action_l2_encap
5720                                     (dev, actions, dev_flow, attr->transfer,
5721                                      error))
5722                                         return -rte_errno;
5723                                 dev_flow->dv.actions[actions_n++] =
5724                                         dev_flow->dv.encap_decap->verbs_action;
5725                         }
5726                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
5727                         break;
5728                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5729                         /* Check if this decap is followed by encap. */
5730                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
5731                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
5732                                action++) {
5733                         }
5734                         /* Handle decap only if it isn't followed by encap. */
5735                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5736                                 if (flow_dv_create_action_l2_decap
5737                                     (dev, dev_flow, attr->transfer, error))
5738                                         return -rte_errno;
5739                                 dev_flow->dv.actions[actions_n++] =
5740                                         dev_flow->dv.encap_decap->verbs_action;
5741                         }
5742                         /* If decap is followed by encap, handle it at encap. */
5743                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
5744                         break;
5745                 case RTE_FLOW_ACTION_TYPE_JUMP:
5746                         jump_data = action->conf;
5747                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
5748                                                        jump_data->group, &table,
5749                                                        error);
5750                         if (ret)
5751                                 return ret;
5752                         tbl = flow_dv_tbl_resource_get(dev, table,
5753                                                        attr->egress,
5754                                                        attr->transfer, error);
5755                         if (!tbl)
5756                                 return rte_flow_error_set
5757                                                 (error, errno,
5758                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5759                                                  NULL,
5760                                                  "cannot create jump action.");
5761                         jump_tbl_resource.tbl = tbl;
5762                         if (flow_dv_jump_tbl_resource_register
5763                             (dev, &jump_tbl_resource, dev_flow, error)) {
5764                                 flow_dv_tbl_resource_release(tbl);
5765                                 return rte_flow_error_set
5766                                                 (error, errno,
5767                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5768                                                  NULL,
5769                                                  "cannot create jump action.");
5770                         }
5771                         dev_flow->dv.actions[actions_n++] =
5772                                 dev_flow->dv.jump->action;
5773                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5774                         break;
5775                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5776                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5777                         if (flow_dv_convert_action_modify_mac(&res, actions,
5778                                                               error))
5779                                 return -rte_errno;
5780                         action_flags |= actions->type ==
5781                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5782                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
5783                                         MLX5_FLOW_ACTION_SET_MAC_DST;
5784                         break;
5785                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5786                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5787                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
5788                                                                error))
5789                                 return -rte_errno;
5790                         action_flags |= actions->type ==
5791                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5792                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
5793                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
5794                         break;
5795                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5796                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5797                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
5798                                                                error))
5799                                 return -rte_errno;
5800                         action_flags |= actions->type ==
5801                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5802                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
5803                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
5804                         break;
5805                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5806                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5807                         if (flow_dv_convert_action_modify_tp(&res, actions,
5808                                                              items, &flow_attr,
5809                                                              error))
5810                                 return -rte_errno;
5811                         action_flags |= actions->type ==
5812                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5813                                         MLX5_FLOW_ACTION_SET_TP_SRC :
5814                                         MLX5_FLOW_ACTION_SET_TP_DST;
5815                         break;
5816                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5817                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
5818                                                                   &flow_attr,
5819                                                                   error))
5820                                 return -rte_errno;
5821                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
5822                         break;
5823                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5824                         if (flow_dv_convert_action_modify_ttl(&res, actions,
5825                                                              items, &flow_attr,
5826                                                              error))
5827                                 return -rte_errno;
5828                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
5829                         break;
5830                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5831                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5832                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
5833                                                                   error))
5834                                 return -rte_errno;
5835                         action_flags |= actions->type ==
5836                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5837                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
5838                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5839                         break;
5840
5841                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5842                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5843                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
5844                                                                   error))
5845                                 return -rte_errno;
5846                         action_flags |= actions->type ==
5847                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5848                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
5849                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
5850                         break;
5851                 case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
5852                         if (flow_dv_convert_action_set_reg(&res, actions,
5853                                                            error))
5854                                 return -rte_errno;
5855                         action_flags |= MLX5_FLOW_ACTION_SET_TAG;
5856                         break;
5857                 case RTE_FLOW_ACTION_TYPE_END:
5858                         actions_end = true;
5859                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
5860                                 /* create modify action if needed. */
5861                                 if (flow_dv_modify_hdr_resource_register
5862                                                                 (dev, &res,
5863                                                                  dev_flow,
5864                                                                  error))
5865                                         return -rte_errno;
5866                                 dev_flow->dv.actions[modify_action_position] =
5867                                         dev_flow->dv.modify_hdr->verbs_action;
5868                         }
5869                         break;
5870                 default:
5871                         break;
5872                 }
5873                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
5874                     modify_action_position == UINT32_MAX)
5875                         modify_action_position = actions_n++;
5876         }
5877         dev_flow->dv.actions_n = actions_n;
5878         dev_flow->actions = action_flags;
5879         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5880                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5881                 int item_type = items->type;
5882
5883                 switch (item_type) {
5884                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5885                         flow_dv_translate_item_port_id(dev, match_mask,
5886                                                        match_value, items);
5887                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5888                         break;
5889                 case RTE_FLOW_ITEM_TYPE_ETH:
5890                         flow_dv_translate_item_eth(match_mask, match_value,
5891                                                    items, tunnel);
5892                         matcher.priority = MLX5_PRIORITY_MAP_L2;
5893                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5894                                              MLX5_FLOW_LAYER_OUTER_L2;
5895                         break;
5896                 case RTE_FLOW_ITEM_TYPE_VLAN:
5897                         flow_dv_translate_item_vlan(dev_flow,
5898                                                     match_mask, match_value,
5899                                                     items, tunnel);
5900                         matcher.priority = MLX5_PRIORITY_MAP_L2;
5901                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
5902                                               MLX5_FLOW_LAYER_INNER_VLAN) :
5903                                              (MLX5_FLOW_LAYER_OUTER_L2 |
5904                                               MLX5_FLOW_LAYER_OUTER_VLAN);
5905                         break;
5906                 case RTE_FLOW_ITEM_TYPE_IPV4:
5907                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5908                                                   &item_flags, &tunnel);
5909                         flow_dv_translate_item_ipv4(match_mask, match_value,
5910                                                     items, tunnel, flow->group);
5911                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5912                         dev_flow->dv.hash_fields |=
5913                                 mlx5_flow_hashfields_adjust
5914                                         (dev_flow, tunnel,
5915                                          MLX5_IPV4_LAYER_TYPES,
5916                                          MLX5_IPV4_IBV_RX_HASH);
5917                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5918                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5919                         if (items->mask != NULL &&
5920                             ((const struct rte_flow_item_ipv4 *)
5921                              items->mask)->hdr.next_proto_id) {
5922                                 next_protocol =
5923                                         ((const struct rte_flow_item_ipv4 *)
5924                                          (items->spec))->hdr.next_proto_id;
5925                                 next_protocol &=
5926                                         ((const struct rte_flow_item_ipv4 *)
5927                                          (items->mask))->hdr.next_proto_id;
5928                         } else {
5929                                 /* Reset for inner layer. */
5930                                 next_protocol = 0xff;
5931                         }
5932                         break;
5933                 case RTE_FLOW_ITEM_TYPE_IPV6:
5934                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5935                                                   &item_flags, &tunnel);
5936                         flow_dv_translate_item_ipv6(match_mask, match_value,
5937                                                     items, tunnel, flow->group);
5938                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5939                         dev_flow->dv.hash_fields |=
5940                                 mlx5_flow_hashfields_adjust
5941                                         (dev_flow, tunnel,
5942                                          MLX5_IPV6_LAYER_TYPES,
5943                                          MLX5_IPV6_IBV_RX_HASH);
5944                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5945                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5946                         if (items->mask != NULL &&
5947                             ((const struct rte_flow_item_ipv6 *)
5948                              items->mask)->hdr.proto) {
5949                                 next_protocol =
5950                                         ((const struct rte_flow_item_ipv6 *)
5951                                          items->spec)->hdr.proto;
5952                                 next_protocol &=
5953                                         ((const struct rte_flow_item_ipv6 *)
5954                                          items->mask)->hdr.proto;
5955                         } else {
5956                                 /* Reset for inner layer. */
5957                                 next_protocol = 0xff;
5958                         }
5959                         break;
5960                 case RTE_FLOW_ITEM_TYPE_TCP:
5961                         flow_dv_translate_item_tcp(match_mask, match_value,
5962                                                    items, tunnel);
5963                         matcher.priority = MLX5_PRIORITY_MAP_L4;
5964                         dev_flow->dv.hash_fields |=
5965                                 mlx5_flow_hashfields_adjust
5966                                         (dev_flow, tunnel, ETH_RSS_TCP,
5967                                          IBV_RX_HASH_SRC_PORT_TCP |
5968                                          IBV_RX_HASH_DST_PORT_TCP);
5969                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5970                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5971                         break;
5972                 case RTE_FLOW_ITEM_TYPE_UDP:
5973                         flow_dv_translate_item_udp(match_mask, match_value,
5974                                                    items, tunnel);
5975                         matcher.priority = MLX5_PRIORITY_MAP_L4;
5976                         dev_flow->dv.hash_fields |=
5977                                 mlx5_flow_hashfields_adjust
5978                                         (dev_flow, tunnel, ETH_RSS_UDP,
5979                                          IBV_RX_HASH_SRC_PORT_UDP |
5980                                          IBV_RX_HASH_DST_PORT_UDP);
5981                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5982                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5983                         break;
5984                 case RTE_FLOW_ITEM_TYPE_GRE:
5985                         flow_dv_translate_item_gre(match_mask, match_value,
5986                                                    items, tunnel);
5987                         last_item = MLX5_FLOW_LAYER_GRE;
5988                         break;
5989                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5990                         flow_dv_translate_item_gre_key(match_mask,
5991                                                        match_value, items);
5992                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5993                         break;
5994                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5995                         flow_dv_translate_item_nvgre(match_mask, match_value,
5996                                                      items, tunnel);
5997                         last_item = MLX5_FLOW_LAYER_GRE;
5998                         break;
5999                 case RTE_FLOW_ITEM_TYPE_VXLAN:
6000                         flow_dv_translate_item_vxlan(match_mask, match_value,
6001                                                      items, tunnel);
6002                         last_item = MLX5_FLOW_LAYER_VXLAN;
6003                         break;
6004                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6005                         flow_dv_translate_item_vxlan(match_mask, match_value,
6006                                                      items, tunnel);
6007                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
6008                         break;
6009                 case RTE_FLOW_ITEM_TYPE_GENEVE:
6010                         flow_dv_translate_item_geneve(match_mask, match_value,
6011                                                       items, tunnel);
6012                         last_item = MLX5_FLOW_LAYER_GENEVE;
6013                         break;
6014                 case RTE_FLOW_ITEM_TYPE_MPLS:
6015                         flow_dv_translate_item_mpls(match_mask, match_value,
6016                                                     items, last_item, tunnel);
6017                         last_item = MLX5_FLOW_LAYER_MPLS;
6018                         break;
6019                 case RTE_FLOW_ITEM_TYPE_META:
6020                         flow_dv_translate_item_meta(match_mask, match_value,
6021                                                     items);
6022                         last_item = MLX5_FLOW_ITEM_METADATA;
6023                         break;
6024                 case RTE_FLOW_ITEM_TYPE_ICMP:
6025                         flow_dv_translate_item_icmp(match_mask, match_value,
6026                                                     items, tunnel);
6027                         last_item = MLX5_FLOW_LAYER_ICMP;
6028                         break;
6029                 case RTE_FLOW_ITEM_TYPE_ICMP6:
6030                         flow_dv_translate_item_icmp6(match_mask, match_value,
6031                                                       items, tunnel);
6032                         last_item = MLX5_FLOW_LAYER_ICMP6;
6033                         break;
6034                 case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
6035                         flow_dv_translate_item_tag(match_mask, match_value,
6036                                                    items);
6037                         last_item = MLX5_FLOW_ITEM_TAG;
6038                         break;
6039                 case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
6040                         flow_dv_translate_item_tx_queue(dev, match_mask,
6041                                                         match_value,
6042                                                         items);
6043                         last_item = MLX5_FLOW_ITEM_TX_QUEUE;
6044                         break;
6045                 default:
6046                         break;
6047                 }
6048                 item_flags |= last_item;
6049         }
6050         /*
6051          * In case of ingress traffic when E-Switch mode is enabled,
6052          * we have two cases where we need to set the source port manually.
6053          * The first one, is in case of Nic steering rule, and the second is
6054          * E-Switch rule where no port_id item was found. In both cases
6055          * the source port is set according the current port in use.
6056          */
6057         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
6058             (priv->representor || priv->master)) {
6059                 if (flow_dv_translate_item_port_id(dev, match_mask,
6060                                                    match_value, NULL))
6061                         return -rte_errno;
6062         }
6063         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
6064                                          dev_flow->dv.value.buf));
6065         dev_flow->layers = item_flags;
6066         /* Register matcher. */
6067         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
6068                                     matcher.mask.size);
6069         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
6070                                                      matcher.priority);
6071         matcher.egress = attr->egress;
6072         matcher.group = flow->group;
6073         matcher.transfer = attr->transfer;
6074         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
6075                 return -rte_errno;
6076         return 0;
6077 }
6078
6079 /**
6080  * Apply the flow to the NIC.
6081  *
6082  * @param[in] dev
6083  *   Pointer to the Ethernet device structure.
6084  * @param[in, out] flow
6085  *   Pointer to flow structure.
6086  * @param[out] error
6087  *   Pointer to error structure.
6088  *
6089  * @return
6090  *   0 on success, a negative errno value otherwise and rte_errno is set.
6091  */
6092 static int
6093 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
6094               struct rte_flow_error *error)
6095 {
6096         struct mlx5_flow_dv *dv;
6097         struct mlx5_flow *dev_flow;
6098         struct mlx5_priv *priv = dev->data->dev_private;
6099         int n;
6100         int err;
6101
6102         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6103                 dv = &dev_flow->dv;
6104                 n = dv->actions_n;
6105                 if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
6106                         if (flow->transfer) {
6107                                 dv->actions[n++] = priv->sh->esw_drop_action;
6108                         } else {
6109                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
6110                                 if (!dv->hrxq) {
6111                                         rte_flow_error_set
6112                                                 (error, errno,
6113                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6114                                                  NULL,
6115                                                  "cannot get drop hash queue");
6116                                         goto error;
6117                                 }
6118                                 dv->actions[n++] = dv->hrxq->action;
6119                         }
6120                 } else if (dev_flow->actions &
6121                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
6122                         struct mlx5_hrxq *hrxq;
6123
6124                         hrxq = mlx5_hrxq_get(dev, flow->key,
6125                                              MLX5_RSS_HASH_KEY_LEN,
6126                                              dv->hash_fields,
6127                                              (*flow->queue),
6128                                              flow->rss.queue_num);
6129                         if (!hrxq) {
6130                                 hrxq = mlx5_hrxq_new
6131                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
6132                                          dv->hash_fields, (*flow->queue),
6133                                          flow->rss.queue_num,
6134                                          !!(dev_flow->layers &
6135                                             MLX5_FLOW_LAYER_TUNNEL));
6136                         }
6137                         if (!hrxq) {
6138                                 rte_flow_error_set
6139                                         (error, rte_errno,
6140                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
6141                                          "cannot get hash queue");
6142                                 goto error;
6143                         }
6144                         dv->hrxq = hrxq;
6145                         dv->actions[n++] = dv->hrxq->action;
6146                 }
6147                 dv->flow =
6148                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
6149                                                   (void *)&dv->value, n,
6150                                                   dv->actions);
6151                 if (!dv->flow) {
6152                         rte_flow_error_set(error, errno,
6153                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6154                                            NULL,
6155                                            "hardware refuses to create flow");
6156                         goto error;
6157                 }
6158                 if (priv->vmwa_context &&
6159                     dev_flow->dv.vf_vlan.tag &&
6160                     !dev_flow->dv.vf_vlan.created) {
6161                         /*
6162                          * The rule contains the VLAN pattern.
6163                          * For VF we are going to create VLAN
6164                          * interface to make hypervisor set correct
6165                          * e-Switch vport context.
6166                          */
6167                         mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
6168                 }
6169         }
6170         return 0;
6171 error:
6172         err = rte_errno; /* Save rte_errno before cleanup. */
6173         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6174                 struct mlx5_flow_dv *dv = &dev_flow->dv;
6175                 if (dv->hrxq) {
6176                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6177                                 mlx5_hrxq_drop_release(dev);
6178                         else
6179                                 mlx5_hrxq_release(dev, dv->hrxq);
6180                         dv->hrxq = NULL;
6181                 }
6182                 if (dev_flow->dv.vf_vlan.tag &&
6183                     dev_flow->dv.vf_vlan.created)
6184                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6185         }
6186         rte_errno = err; /* Restore rte_errno. */
6187         return -rte_errno;
6188 }
6189
6190 /**
6191  * Release the flow matcher.
6192  *
6193  * @param dev
6194  *   Pointer to Ethernet device.
6195  * @param flow
6196  *   Pointer to mlx5_flow.
6197  *
6198  * @return
6199  *   1 while a reference on it exists, 0 when freed.
6200  */
6201 static int
6202 flow_dv_matcher_release(struct rte_eth_dev *dev,
6203                         struct mlx5_flow *flow)
6204 {
6205         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
6206         struct mlx5_priv *priv = dev->data->dev_private;
6207         struct mlx5_ibv_shared *sh = priv->sh;
6208         struct mlx5_flow_tbl_resource *tbl;
6209
6210         assert(matcher->matcher_object);
6211         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
6212                 dev->data->port_id, (void *)matcher,
6213                 rte_atomic32_read(&matcher->refcnt));
6214         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
6215                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
6216                            (matcher->matcher_object));
6217                 LIST_REMOVE(matcher, next);
6218                 if (matcher->egress)
6219                         tbl = &sh->tx_tbl[matcher->group];
6220                 else
6221                         tbl = &sh->rx_tbl[matcher->group];
6222                 flow_dv_tbl_resource_release(tbl);
6223                 rte_free(matcher);
6224                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
6225                         dev->data->port_id, (void *)matcher);
6226                 return 0;
6227         }
6228         return 1;
6229 }
6230
6231 /**
6232  * Release an encap/decap resource.
6233  *
6234  * @param flow
6235  *   Pointer to mlx5_flow.
6236  *
6237  * @return
6238  *   1 while a reference on it exists, 0 when freed.
6239  */
6240 static int
6241 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
6242 {
6243         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
6244                                                 flow->dv.encap_decap;
6245
6246         assert(cache_resource->verbs_action);
6247         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
6248                 (void *)cache_resource,
6249                 rte_atomic32_read(&cache_resource->refcnt));
6250         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6251                 claim_zero(mlx5_glue->destroy_flow_action
6252                                 (cache_resource->verbs_action));
6253                 LIST_REMOVE(cache_resource, next);
6254                 rte_free(cache_resource);
6255                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
6256                         (void *)cache_resource);
6257                 return 0;
6258         }
6259         return 1;
6260 }
6261
6262 /**
6263  * Release an jump to table action resource.
6264  *
6265  * @param flow
6266  *   Pointer to mlx5_flow.
6267  *
6268  * @return
6269  *   1 while a reference on it exists, 0 when freed.
6270  */
6271 static int
6272 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
6273 {
6274         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
6275                                                 flow->dv.jump;
6276
6277         assert(cache_resource->action);
6278         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
6279                 (void *)cache_resource,
6280                 rte_atomic32_read(&cache_resource->refcnt));
6281         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6282                 claim_zero(mlx5_glue->destroy_flow_action
6283                                 (cache_resource->action));
6284                 LIST_REMOVE(cache_resource, next);
6285                 flow_dv_tbl_resource_release(cache_resource->tbl);
6286                 rte_free(cache_resource);
6287                 DRV_LOG(DEBUG, "jump table resource %p: removed",
6288                         (void *)cache_resource);
6289                 return 0;
6290         }
6291         return 1;
6292 }
6293
6294 /**
6295  * Release a modify-header resource.
6296  *
6297  * @param flow
6298  *   Pointer to mlx5_flow.
6299  *
6300  * @return
6301  *   1 while a reference on it exists, 0 when freed.
6302  */
6303 static int
6304 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
6305 {
6306         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
6307                                                 flow->dv.modify_hdr;
6308
6309         assert(cache_resource->verbs_action);
6310         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
6311                 (void *)cache_resource,
6312                 rte_atomic32_read(&cache_resource->refcnt));
6313         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6314                 claim_zero(mlx5_glue->destroy_flow_action
6315                                 (cache_resource->verbs_action));
6316                 LIST_REMOVE(cache_resource, next);
6317                 rte_free(cache_resource);
6318                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
6319                         (void *)cache_resource);
6320                 return 0;
6321         }
6322         return 1;
6323 }
6324
6325 /**
6326  * Release port ID action resource.
6327  *
6328  * @param flow
6329  *   Pointer to mlx5_flow.
6330  *
6331  * @return
6332  *   1 while a reference on it exists, 0 when freed.
6333  */
6334 static int
6335 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
6336 {
6337         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
6338                 flow->dv.port_id_action;
6339
6340         assert(cache_resource->action);
6341         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
6342                 (void *)cache_resource,
6343                 rte_atomic32_read(&cache_resource->refcnt));
6344         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6345                 claim_zero(mlx5_glue->destroy_flow_action
6346                                 (cache_resource->action));
6347                 LIST_REMOVE(cache_resource, next);
6348                 rte_free(cache_resource);
6349                 DRV_LOG(DEBUG, "port id action resource %p: removed",
6350                         (void *)cache_resource);
6351                 return 0;
6352         }
6353         return 1;
6354 }
6355
6356 /**
6357  * Release push vlan action resource.
6358  *
6359  * @param flow
6360  *   Pointer to mlx5_flow.
6361  *
6362  * @return
6363  *   1 while a reference on it exists, 0 when freed.
6364  */
6365 static int
6366 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
6367 {
6368         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
6369                 flow->dv.push_vlan_res;
6370
6371         assert(cache_resource->action);
6372         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
6373                 (void *)cache_resource,
6374                 rte_atomic32_read(&cache_resource->refcnt));
6375         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6376                 claim_zero(mlx5_glue->destroy_flow_action
6377                                 (cache_resource->action));
6378                 LIST_REMOVE(cache_resource, next);
6379                 rte_free(cache_resource);
6380                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
6381                         (void *)cache_resource);
6382                 return 0;
6383         }
6384         return 1;
6385 }
6386
6387 /**
6388  * Remove the flow from the NIC but keeps it in memory.
6389  *
6390  * @param[in] dev
6391  *   Pointer to Ethernet device.
6392  * @param[in, out] flow
6393  *   Pointer to flow structure.
6394  */
6395 static void
6396 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6397 {
6398         struct mlx5_flow_dv *dv;
6399         struct mlx5_flow *dev_flow;
6400
6401         if (!flow)
6402                 return;
6403         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6404                 dv = &dev_flow->dv;
6405                 if (dv->flow) {
6406                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
6407                         dv->flow = NULL;
6408                 }
6409                 if (dv->hrxq) {
6410                         if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
6411                                 mlx5_hrxq_drop_release(dev);
6412                         else
6413                                 mlx5_hrxq_release(dev, dv->hrxq);
6414                         dv->hrxq = NULL;
6415                 }
6416                 if (dev_flow->dv.vf_vlan.tag &&
6417                     dev_flow->dv.vf_vlan.created)
6418                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6419         }
6420 }
6421
6422 /**
6423  * Remove the flow from the NIC and the memory.
6424  *
6425  * @param[in] dev
6426  *   Pointer to the Ethernet device structure.
6427  * @param[in, out] flow
6428  *   Pointer to flow structure.
6429  */
6430 static void
6431 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6432 {
6433         struct mlx5_flow *dev_flow;
6434
6435         if (!flow)
6436                 return;
6437         flow_dv_remove(dev, flow);
6438         if (flow->counter) {
6439                 flow_dv_counter_release(dev, flow->counter);
6440                 flow->counter = NULL;
6441         }
6442         if (flow->tag_resource) {
6443                 flow_dv_tag_release(dev, flow->tag_resource);
6444                 flow->tag_resource = NULL;
6445         }
6446         while (!LIST_EMPTY(&flow->dev_flows)) {
6447                 dev_flow = LIST_FIRST(&flow->dev_flows);
6448                 LIST_REMOVE(dev_flow, next);
6449                 if (dev_flow->dv.matcher)
6450                         flow_dv_matcher_release(dev, dev_flow);
6451                 if (dev_flow->dv.encap_decap)
6452                         flow_dv_encap_decap_resource_release(dev_flow);
6453                 if (dev_flow->dv.modify_hdr)
6454                         flow_dv_modify_hdr_resource_release(dev_flow);
6455                 if (dev_flow->dv.jump)
6456                         flow_dv_jump_tbl_resource_release(dev_flow);
6457                 if (dev_flow->dv.port_id_action)
6458                         flow_dv_port_id_action_resource_release(dev_flow);
6459                 if (dev_flow->dv.push_vlan_res)
6460                         flow_dv_push_vlan_action_resource_release(dev_flow);
6461                 rte_free(dev_flow);
6462         }
6463 }
6464
6465 /**
6466  * Query a dv flow  rule for its statistics via devx.
6467  *
6468  * @param[in] dev
6469  *   Pointer to Ethernet device.
6470  * @param[in] flow
6471  *   Pointer to the sub flow.
6472  * @param[out] data
6473  *   data retrieved by the query.
6474  * @param[out] error
6475  *   Perform verbose error reporting if not NULL.
6476  *
6477  * @return
6478  *   0 on success, a negative errno value otherwise and rte_errno is set.
6479  */
6480 static int
6481 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
6482                     void *data, struct rte_flow_error *error)
6483 {
6484         struct mlx5_priv *priv = dev->data->dev_private;
6485         struct rte_flow_query_count *qc = data;
6486
6487         if (!priv->config.devx)
6488                 return rte_flow_error_set(error, ENOTSUP,
6489                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6490                                           NULL,
6491                                           "counters are not supported");
6492         if (flow->counter) {
6493                 uint64_t pkts, bytes;
6494                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
6495                                                &bytes);
6496
6497                 if (err)
6498                         return rte_flow_error_set(error, -err,
6499                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6500                                         NULL, "cannot read counters");
6501                 qc->hits_set = 1;
6502                 qc->bytes_set = 1;
6503                 qc->hits = pkts - flow->counter->hits;
6504                 qc->bytes = bytes - flow->counter->bytes;
6505                 if (qc->reset) {
6506                         flow->counter->hits = pkts;
6507                         flow->counter->bytes = bytes;
6508                 }
6509                 return 0;
6510         }
6511         return rte_flow_error_set(error, EINVAL,
6512                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6513                                   NULL,
6514                                   "counters are not available");
6515 }
6516
6517 /**
6518  * Query a flow.
6519  *
6520  * @see rte_flow_query()
6521  * @see rte_flow_ops
6522  */
6523 static int
6524 flow_dv_query(struct rte_eth_dev *dev,
6525               struct rte_flow *flow __rte_unused,
6526               const struct rte_flow_action *actions __rte_unused,
6527               void *data __rte_unused,
6528               struct rte_flow_error *error __rte_unused)
6529 {
6530         int ret = -EINVAL;
6531
6532         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6533                 switch (actions->type) {
6534                 case RTE_FLOW_ACTION_TYPE_VOID:
6535                         break;
6536                 case RTE_FLOW_ACTION_TYPE_COUNT:
6537                         ret = flow_dv_query_count(dev, flow, data, error);
6538                         break;
6539                 default:
6540                         return rte_flow_error_set(error, ENOTSUP,
6541                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6542                                                   actions,
6543                                                   "action not supported");
6544                 }
6545         }
6546         return ret;
6547 }
6548
6549 /*
6550  * Mutex-protected thunk to flow_dv_translate().
6551  */
6552 static int
6553 flow_d_translate(struct rte_eth_dev *dev,
6554                  struct mlx5_flow *dev_flow,
6555                  const struct rte_flow_attr *attr,
6556                  const struct rte_flow_item items[],
6557                  const struct rte_flow_action actions[],
6558                  struct rte_flow_error *error)
6559 {
6560         int ret;
6561
6562         flow_d_shared_lock(dev);
6563         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
6564         flow_d_shared_unlock(dev);
6565         return ret;
6566 }
6567
6568 /*
6569  * Mutex-protected thunk to flow_dv_apply().
6570  */
6571 static int
6572 flow_d_apply(struct rte_eth_dev *dev,
6573              struct rte_flow *flow,
6574              struct rte_flow_error *error)
6575 {
6576         int ret;
6577
6578         flow_d_shared_lock(dev);
6579         ret = flow_dv_apply(dev, flow, error);
6580         flow_d_shared_unlock(dev);
6581         return ret;
6582 }
6583
6584 /*
6585  * Mutex-protected thunk to flow_dv_remove().
6586  */
6587 static void
6588 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6589 {
6590         flow_d_shared_lock(dev);
6591         flow_dv_remove(dev, flow);
6592         flow_d_shared_unlock(dev);
6593 }
6594
6595 /*
6596  * Mutex-protected thunk to flow_dv_destroy().
6597  */
6598 static void
6599 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6600 {
6601         flow_d_shared_lock(dev);
6602         flow_dv_destroy(dev, flow);
6603         flow_d_shared_unlock(dev);
6604 }
6605
6606 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
6607         .validate = flow_dv_validate,
6608         .prepare = flow_dv_prepare,
6609         .translate = flow_d_translate,
6610         .apply = flow_d_apply,
6611         .remove = flow_d_remove,
6612         .destroy = flow_d_destroy,
6613         .query = flow_dv_query,
6614 };
6615
6616 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */